ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
c64344e6-ea88-492c-9734-1b829227dfc1 | cpp | tensorflow/tensorflow | autotune_serialize | tensorflow/core/util/autotune_maps/autotune_serialize.cc | tensorflow/core/util/autotune_maps/autotune_serialize_test.cc | #include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "xla/status_macros.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/util/activation_mode.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
namespace tensorflow {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmConfigProto;
using stream_executor::dnn::AlgorithmDesc;
using stream_executor::dnn::AlgorithmProto;
template <typename Op>
StatusOr<ConvMapProto> ConvMapToProto(
const AutotuneMap<ConvParameters, AutotuneEntry<Op>> &autotune_map) {
ConvMapProto proto;
std::map<string, ConvMapProto::Entry> sorted_map;
for (auto const &p : autotune_map.GetMap()) {
const ConvParameters ¶ms = p.first;
const ConvParametersProto ¶ms_proto = params.proto();
VLOG(1) << "Reading: " << params.ToString();
ConvMapProto::Entry kv;
*kv.mutable_key() = params_proto;
if (p.second.is_algorithm_config()) {
*kv.mutable_value() = p.second.GetAlgorithmConfig().ToProto();
} else {
const auto &runners = p.second.GetOpRunners();
*kv.mutable_value()->mutable_algorithm() =
runners.primary->ToAlgorithmDesc().ToProto();
if (runners.no_scratch_fallback) {
*kv.mutable_value()->mutable_algorithm_no_scratch() =
runners.no_scratch_fallback->ToAlgorithmDesc().ToProto();
}
}
std::string serialized_params;
TF_RET_CHECK(
tsl::SerializeToStringDeterministic(params_proto, &serialized_params));
sorted_map.insert(std::make_pair(std::move(serialized_params), kv));
}
for (auto const &p : sorted_map) {
ConvMapProto::Entry *kv = proto.add_kv_pairs();
*kv = p.second;
}
return proto;
}
template <typename Op>
Status PopulateConvMap(
const ConvMapProto &m,
AutotuneMap<ConvParameters, AutotuneEntry<Op>> *autotune_map) {
if (m.kv_pairs().size() == 0) {
return OkStatus();
}
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(se::GpuPlatformName()));
std::vector<std::string> device_descs;
for (int i = 0; i < platform->VisibleDeviceCount(); i++) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::DeviceDescription> device_desc,
platform->DescriptionForDevice(i));
device_descs.push_back(device_desc->model_str());
}
std::set<std::string> unmatched_device_descs;
for (const ConvMapProto::Entry &kv : m.kv_pairs()) {
const ConvParametersProto ¶ms_proto = kv.key();
if (params_proto.version() != ConvParameters::kVersion) {
VLOG(1) << "ConvParametersProto with the incompatible version:"
<< params_proto.DebugString();
return errors::Aborted(
"Aborted because the loaded autotune results for convolution "
"operations have a version different "
"from runtime's version. Expected version: ",
ConvParameters::kVersion,
". Actual version: ", params_proto.version());
}
const AlgorithmConfigProto &algorithm_config_proto = kv.value();
const AlgorithmDesc primary(algorithm_config_proto.algorithm());
const absl::optional<AlgorithmDesc> fallback =
algorithm_config_proto.has_algorithm_no_scratch()
? absl::optional<AlgorithmDesc>(
AlgorithmDesc(algorithm_config_proto.algorithm_no_scratch()))
: absl::nullopt;
bool devices_matched = false;
for (int ordinal = 0; ordinal < device_descs.size(); ordinal++) {
const std::string &desc_str = device_descs[ordinal];
if (desc_str != params_proto.device_identifier()) {
continue;
}
devices_matched = true;
AutotuneEntry<Op> entry;
#if TENSORFLOW_USE_ROCM
entry = AutotuneEntry<Op>(AlgorithmConfig(algorithm_config_proto));
#else
entry = AutotuneEntry<Op>(primary, fallback);
#endif
autotune_map->Insert(ConvParameters(ordinal, params_proto), entry);
}
if (!devices_matched) {
unmatched_device_descs.insert(params_proto.device_identifier());
}
}
if (!unmatched_device_descs.empty()) {
LOG(WARNING) << "Unmatched device id's from AoT autotuning data: "
<< str_util::Join(unmatched_device_descs, ", ")
<< "; existing devices: "
<< str_util::Join(device_descs, ", ");
}
return OkStatus();
}
}
#endif
Status SerializeAutotuneMaps(std::string *output) {
AutotuneMapsProto proto;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(*proto.mutable_conv_map(),
ConvMapToProto(*ConvAutotuneMap::GetInstance()));
TF_ASSIGN_OR_RETURN(*proto.mutable_fused_conv_map(),
ConvMapToProto(*FusedConvAutotuneMap::GetInstance()));
#endif
TF_RET_CHECK(tsl::SerializeToStringDeterministic(proto, output));
return absl::OkStatus();
}
Status LoadSerializedAutotuneMaps(absl::string_view s) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
AutotuneMapsProto proto;
if (!proto.ParseFromString(string(s))) {
return errors::InvalidArgument(
"Failed to parse the autotune maps from string.");
}
TF_RETURN_IF_ERROR(
PopulateConvMap(proto.conv_map(), ConvAutotuneMap::GetInstance()));
TF_RETURN_IF_ERROR(PopulateConvMap(proto.fused_conv_map(),
FusedConvAutotuneMap::GetInstance()));
#endif
return absl::OkStatus();
}
void ResetAutotuneMaps() {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ConvAutotuneMap::GetInstance()->ClearMap();
FusedConvAutotuneMap::GetInstance()->ClearMap();
#endif
}
} | #include "xla/stream_executor/platform_manager.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/stream_executor/gpu/gpu_init.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmDesc;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
se::StreamExecutor* GetStreamExec() {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
CHECK_GT(platform->VisibleDeviceCount(), 0);
return platform->ExecutorForDevice(0).value();
}
TEST(AutotuneSerializeTest, Empty) {
ResetAutotuneMaps();
std::string output;
TF_CHECK_OK(SerializeAutotuneMaps(&output));
TF_CHECK_OK(LoadSerializedAutotuneMaps(output));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
TEST(AutotuneSerializeTest, Consistency) {
ResetAutotuneMaps();
ConvParameters conv_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1};
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
};
ConvParameters contrib_fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kRelu,
true}};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AutotuneEntry<se::dnn::ConvOp> example_a(algorithm, algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(conv_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(fused_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(contrib_fused_params_example_a,
example_a);
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
TF_CHECK_OK(LoadSerializedAutotuneMaps(serialized_string));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 3);
AutotuneEntry<se::dnn::ConvOp> entry;
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(conv_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(ConvAutotuneMap::GetInstance()->Find(
contrib_fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
}
TEST(AutotuneSerializeTest, VersionControl) {
ResetAutotuneMaps();
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
ConvParameters::kVersion - 1};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AlgorithmConfig algorithm_config_example_a(algorithm, 1,
algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(
fused_params_example_a,
AutotuneEntry<se::dnn::ConvOp>(algorithm_config_example_a));
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
EXPECT_THAT(
LoadSerializedAutotuneMaps(serialized_string),
StatusIs(error::ABORTED,
HasSubstr("Aborted because the loaded autotune results")));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/autotune_serialize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/autotune_serialize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e3f7268-df80-4d87-9b14-8d5ae82ee539 | cpp | tensorflow/tensorflow | tensor_bundle | tensorflow/core/util/tensor_bundle/tensor_bundle.cc | tensorflow/core/util/tensor_bundle/tensor_bundle_test.cc | #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/lib/io/buffered_file.h"
#include "xla/tsl/util/byte_swap_array.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/coding.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/crc32c.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/core/util/tensor_slice_util.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tensorflow {
const int kTensorBundleMinProducer = 0;
const int kTensorBundleMinConsumer = 0;
const int kTensorBundleVersion = 1;
static const int kBufferSize = 1024 * 1024;
const char* const kHeaderEntryKey = "";
const int64_t kLargeTensorThreshold = static_cast<int64_t>(1) << 32;
const int kMaxFileReadThreads = 8;
const int64_t kMinSectionSize = static_cast<int64_t>(1) << 31;
namespace {
Status ReadStringTensor(io::InputBuffer* buffered_file, size_t num_elements,
size_t offset, size_t size, tstring* destination,
uint32* actual_crc32c, bool need_to_swap_bytes) {
if (size == 0) return absl::OkStatus();
CHECK_GT(size, 0);
TF_RETURN_IF_ERROR(buffered_file->Seek(offset));
TF_RETURN_IF_ERROR(buffered_file->Hint(size));
std::vector<uint64> string_lengths(num_elements);
for (size_t i = 0; i < num_elements; ++i) {
TF_RETURN_IF_ERROR(buffered_file->ReadVarint64(&string_lengths[i]));
if (string_lengths[i] <= UINT32_MAX) {
uint32 elem_size_uint32 = static_cast<uint32>(string_lengths[i]);
if (need_to_swap_bytes) {
elem_size_uint32 = BYTE_SWAP_32(elem_size_uint32);
}
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<const char*>(&elem_size_uint32),
sizeof(uint32));
} else {
uint64 length = string_lengths[i];
if (need_to_swap_bytes) {
length = BYTE_SWAP_64(length);
}
*actual_crc32c =
crc32c::Extend(*actual_crc32c, reinterpret_cast<const char*>(&length),
sizeof(uint64));
}
}
if (offset + size < buffered_file->Tell()) {
return errors::DataLoss("String lengths longer than expected offset ",
offset + size);
}
uint32 raw_length_checksum = 0;
uint32 length_checksum = 0;
size_t unused_bytes_read = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadNBytes(
sizeof(uint32), reinterpret_cast<char*>(&raw_length_checksum),
&unused_bytes_read));
length_checksum = need_to_swap_bytes ? BYTE_SWAP_32(raw_length_checksum)
: raw_length_checksum;
if (crc32c::Unmask(length_checksum) != *actual_crc32c) {
return errors::DataLoss(
"The length checksum does not match: expected ",
strings::Printf("%08u", crc32c::Unmask(length_checksum)),
" but actual is ", strings::Printf("%08u", *actual_crc32c));
}
*actual_crc32c = crc32c::Extend(*actual_crc32c,
reinterpret_cast<char*>(&raw_length_checksum),
sizeof(uint32));
for (size_t i = 0; i < num_elements; ++i) {
const uint64 string_length = string_lengths[i];
tstring* buffer = &destination[i];
buffer->resize(string_length);
size_t bytes_read = 0;
TF_RETURN_IF_ERROR(
buffered_file->ReadNBytes(string_length, &(*buffer)[0], &bytes_read));
*actual_crc32c = crc32c::Extend(*actual_crc32c, buffer->data(), bytes_read);
}
return absl::OkStatus();
}
Status ReadVariantTensor(io::InputBuffer* buffered_file, Tensor* ret,
size_t offset, size_t size, uint32* actual_crc32c) {
if (size == 0) return absl::OkStatus();
size_t num_elements = ret->NumElements();
TF_RETURN_IF_ERROR(buffered_file->Seek(offset));
TF_RETURN_IF_ERROR(buffered_file->Hint(size));
for (size_t i = 0; i < num_elements; ++i) {
uint64 string_length = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadVarint64(&string_length));
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<const char*>(&string_length),
sizeof(uint64));
string buffer;
buffer.resize(string_length);
size_t bytes_read = 0;
TF_RETURN_IF_ERROR(
buffered_file->ReadNBytes(string_length, &buffer[0], &bytes_read));
*actual_crc32c = crc32c::Extend(*actual_crc32c, buffer.data(), bytes_read);
VariantTensorDataProto proto;
if (!proto.ParseFromString(buffer)) {
return errors::DataLoss("Unable to parse VariantTensorDataProto from ",
"buffer of size ", string_length, ". ",
"Bundle entry offset: ", offset, " size: ", size);
}
Variant v = proto;
if (!DecodeUnaryVariant(&v)) {
return errors::Internal("Could not decode variant with type_name: \"",
v.TypeName(), "\". Perhaps you forgot to ",
"register a decoder via ",
"REGISTER_UNARY_VARIANT_DECODE_FUNCTION?");
}
uint32 checksum = 0;
size_t unused_bytes_read = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadNBytes(
sizeof(uint32), reinterpret_cast<char*>(&checksum),
&unused_bytes_read));
if (crc32c::Unmask(checksum) != *actual_crc32c) {
return errors::DataLoss(
"The checksum after Variant ", i, " does not match.",
" Expected: ", strings::Printf("%08u", crc32c::Unmask(checksum)),
" Actual: ", strings::Printf("%08u", *actual_crc32c));
}
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<char*>(&checksum), sizeof(uint32));
ret->flat<Variant>()(i) = std::move(v);
}
return absl::OkStatus();
}
char* GetBackingBuffer(const Tensor& val) {
CHECK(DataTypeCanUseMemcpy(val.dtype())) << val.dtype();
return const_cast<char*>(val.tensor_data().data());
}
tstring* GetStringBackingBuffer(const Tensor& val) {
CHECK_EQ(DT_STRING, val.dtype());
return const_cast<tstring*>(val.flat<tstring>().data());
}
Status ParseEntryProto(StringPiece key, StringPiece value,
protobuf::MessageLite* out) {
if (!out->ParseFromArray(value.data(), value.size())) {
return errors::DataLoss("Entry for key ", key, " not parseable.");
}
return absl::OkStatus();
}
Status WriteTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written) {
DCHECK_NE(val.dtype(), DT_STRING);
DCHECK_NE(val.dtype(), DT_VARIANT);
*bytes_written = val.TotalBytes();
char* buf = GetBackingBuffer(val);
VLOG(1) << "Appending " << *bytes_written << " bytes to file";
return out->Append(StringPiece(buf, *bytes_written));
}
Status WriteStringTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written, uint32* crc32c) {
DCHECK_EQ(val.dtype(), DT_STRING);
const tstring* strings = GetStringBackingBuffer(val);
string lengths;
lengths.reserve(val.NumElements());
*crc32c = 0;
for (int64_t i = 0; i < val.NumElements(); ++i) {
const tstring* elem = &strings[i];
DCHECK_EQ(elem->size(), static_cast<uint64>(elem->size()));
const uint64 elem_size = static_cast<uint64>(elem->size());
core::PutVarint64(&lengths, elem_size);
if (elem_size <= UINT32_MAX) {
const uint32 elem_size_uint32 = static_cast<uint32>(elem_size);
*crc32c = crc32c::Extend(*crc32c,
reinterpret_cast<const char*>(&elem_size_uint32),
sizeof(uint32));
} else {
*crc32c = crc32c::Extend(
*crc32c, reinterpret_cast<const char*>(&elem_size), sizeof(uint64));
}
}
TF_RETURN_IF_ERROR(out->Append(lengths));
*bytes_written = lengths.size();
const uint32 length_checksum = crc32c::Mask(*crc32c);
TF_RETURN_IF_ERROR(out->Append(StringPiece(
reinterpret_cast<const char*>(&length_checksum), sizeof(uint32))));
*crc32c = crc32c::Extend(
*crc32c, reinterpret_cast<const char*>(&length_checksum), sizeof(uint32));
*bytes_written += sizeof(uint32);
for (int64_t i = 0; i < val.NumElements(); ++i) {
const tstring* string = &strings[i];
TF_RETURN_IF_ERROR(out->Append(*string));
*bytes_written += string->size();
*crc32c = crc32c::Extend(*crc32c, string->data(), string->size());
}
return absl::OkStatus();
}
Status WriteVariantTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written, uint32* crc32c) {
DCHECK_EQ(val.dtype(), DT_VARIANT);
*crc32c = 0;
*bytes_written = 0;
for (int64_t i = 0; i < val.NumElements(); ++i) {
VariantTensorData data;
val.flat<Variant>()(i).Encode(&data);
VariantTensorDataProto proto;
data.ToProto(&proto);
string elem;
if (!proto.SerializeToString(&elem)) {
return errors::Unknown(
"Failed to serialize tensor data of size ", proto.ByteSizeLong(),
". Tensor: ", val.flat<Variant>()(i).DebugString());
}
DCHECK_EQ(elem.size(), static_cast<uint64>(elem.size()));
const auto elem_size = static_cast<uint64>(elem.size());
string len;
core::PutVarint64(&len, elem_size);
TF_RETURN_IF_ERROR(out->Append(len));
*crc32c = crc32c::Extend(*crc32c, reinterpret_cast<const char*>(&elem_size),
sizeof(uint64));
*bytes_written += len.size();
TF_RETURN_IF_ERROR(out->Append(elem));
*crc32c = crc32c::Extend(*crc32c, elem.data(), elem.size());
*bytes_written += elem.size();
const uint32 length_checksum = crc32c::Mask(*crc32c);
TF_RETURN_IF_ERROR(out->Append(StringPiece(
reinterpret_cast<const char*>(&length_checksum), sizeof(uint32))));
*crc32c =
crc32c::Extend(*crc32c, reinterpret_cast<const char*>(&length_checksum),
sizeof(uint32));
*bytes_written += sizeof(uint32);
}
return absl::OkStatus();
}
bool IsFullSlice(const TensorSlice& slice_spec,
const TensorShape& full_tensor_shape) {
if (slice_spec.IsFull()) {
return true;
} else {
TensorShape sliced_shape;
slice_spec.SliceTensorShape(full_tensor_shape, &sliced_shape).IgnoreError();
return sliced_shape == full_tensor_shape;
}
}
Status CorruptFileError(const Status& in_status, const string& filename,
const string& detail) {
if (in_status.ok()) {
return errors::Internal("Unable to read file (", filename,
"). Perhaps the file is corrupt or was produced by "
"a newer version of TensorFlow with format changes "
"(",
detail, ")");
}
return Status(
in_status.code(),
strings::StrCat("Unable to read file (", filename,
"). Perhaps the file is corrupt or was produced by a "
"newer version of TensorFlow with format changes (",
detail, "): ", in_status.message()));
}
table::Options TableBuilderOptions() {
table::Options o;
o.compression = table::kNoCompression;
return o;
}
Status PadAlignment(tsl::BufferedWritableFile* out, int alignment,
int64_t* size) {
int bytes_over = *size % alignment;
if (bytes_over == 0) {
return absl::OkStatus();
}
int bytes_to_write = alignment - bytes_over;
Status status = out->Append(string(bytes_to_write, '\0'));
if (status.ok()) {
*size += bytes_to_write;
}
return status;
}
}
BundleWriter::BundleWriter(Env* env, StringPiece prefix, const Options& options)
: env_(env), options_(options), prefix_(prefix), out_(nullptr), size_(0) {
status_ = env_->HasAtomicMove(prefix_, &use_temp_file_);
if (!status_.ok()) return;
data_path_ = DataFilename(prefix_, 0, 1);
metadata_path_ = MetaFilename(prefix_);
if (use_temp_file_) {
data_path_ = strings::StrCat(data_path_, ".tempstate", random::New64());
metadata_path_ =
strings::StrCat(metadata_path_, ".tempstate", random::New64());
}
status_ = env_->CreateDir(string(io::Dirname(prefix_)));
if (!status_.ok() && !errors::IsAlreadyExists(status_)) {
return;
}
std::unique_ptr<WritableFile> wrapper;
status_ = env_->NewWritableFile(data_path_, &wrapper);
if (!status_.ok()) return;
out_ = std::make_unique<tsl::BufferedWritableFile>(
std::move(wrapper), 8 << 20 );
VLOG(1) << "Writing to file " << data_path_;
}
Status BundleWriter::Add(StringPiece key, const Tensor& val) {
if (!status_.ok()) return status_;
CHECK_NE(key, kHeaderEntryKey);
const string key_string(key);
if (entries_.find(key_string) != entries_.end()) {
status_ = errors::InvalidArgument("Adding duplicate key: ", key);
return status_;
}
BundleEntryProto* entry = &entries_[key_string];
entry->set_dtype(val.dtype());
val.shape().AsProto(entry->mutable_shape());
entry->set_shard_id(0);
entry->set_offset(size_);
size_t data_bytes_written = 0;
uint32 crc32c = 0;
out_->reset_crc32();
if (val.dtype() == DT_STRING) {
status_ = WriteStringTensor(val, out_.get(), &data_bytes_written, &crc32c);
} else if (val.dtype() == DT_VARIANT) {
status_ = WriteVariantTensor(val, out_.get(), &data_bytes_written, &crc32c);
} else {
status_ = WriteTensor(val, out_.get(), &data_bytes_written);
crc32c = out_->crc32();
}
if (status_.ok()) {
entry->set_size(data_bytes_written);
entry->set_crc32c(crc32c::Mask(crc32c));
size_ += data_bytes_written;
status_ = PadAlignment(out_.get(), options_.data_alignment, &size_);
}
return status_;
}
Status BundleWriter::AddSlice(StringPiece full_tensor_key,
const TensorShape& full_tensor_shape,
const TensorSlice& slice_spec,
const Tensor& slice_tensor) {
if (!status_.ok()) return status_;
CHECK_NE(full_tensor_key, kHeaderEntryKey);
if (IsFullSlice(slice_spec, full_tensor_shape)) {
return Add(full_tensor_key, slice_tensor);
}
const string full_tensor_key_string(full_tensor_key);
BundleEntryProto* full_entry = &entries_[full_tensor_key_string];
if (full_entry->dtype() != DT_INVALID) {
CHECK_EQ(full_entry->dtype(), slice_tensor.dtype());
}
if (full_entry->has_shape()) {
CHECK(TensorShape(full_entry->shape()) == full_tensor_shape);
}
full_entry->set_dtype(slice_tensor.dtype());
full_tensor_shape.AsProto(full_entry->mutable_shape());
TensorSliceProto* slice_proto = full_entry->add_slices();
slice_spec.AsProto(slice_proto);
const string slice_name =
checkpoint::EncodeTensorNameSlice(full_tensor_key_string, slice_spec);
status_ = Add(slice_name, slice_tensor);
return status_;
}
Status BundleWriter::Finish() {
if (out_) {
status_.Update(out_->Close());
out_ = nullptr;
if (status_.ok()) {
if (use_temp_file_) {
status_ =
Env::Default()->RenameFile(data_path_, DataFilename(prefix_, 0, 1));
}
} else {
Env::Default()->DeleteFile(data_path_).IgnoreError();
}
}
if (!status_.ok()) return status_;
std::unique_ptr<WritableFile> file;
status_ = env_->NewWritableFile(metadata_path_, &file);
if (!status_.ok()) return status_;
{
table::Options options;
options.compression = table::kNoCompression;
table::TableBuilder builder(options, file.get());
BundleHeaderProto header;
header.set_num_shards(1);
header.set_endianness(BundleHeaderProto::LITTLE);
if (!port::kLittleEndian) header.set_endianness(BundleHeaderProto::BIG);
VersionDef* version = header.mutable_version();
version->set_producer(kTensorBundleVersion);
version->set_min_consumer(kTensorBundleMinConsumer);
builder.Add(kHeaderEntryKey, header.SerializeAsString());
for (const auto& p : entries_) {
builder.Add(p.first, p.second.SerializeAsString());
}
status_ = builder.Finish();
}
status_.Update(file->Close());
if (!status_.ok()) {
Env::Default()->DeleteFile(metadata_path_).IgnoreError();
return status_;
} else if (use_temp_file_) {
status_ = Env::Default()->RenameFile(metadata_path_, MetaFilename(prefix_));
if (!status_.ok()) return status_;
}
status_ = errors::Internal("BundleWriter is closed");
return absl::OkStatus();
}
struct MergeState {
int num_shards = 0;
bool seen_first_bundle = false;
BundleHeaderProto_Endianness endianness;
VersionDef version;
std::map<string, BundleEntryProto> entries;
std::unordered_map<string, int32> shard_ids;
};
static Status MergeOneBundle(Env* env, StringPiece prefix,
MergeState* merge_state) {
VLOG(1) << "Merging bundle:" << prefix;
const string filename = MetaFilename(prefix);
uint64 file_size;
TF_RETURN_IF_ERROR(env->GetFileSize(filename, &file_size));
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename, &file));
table::Table* table = nullptr;
TF_RETURN_IF_ERROR(
table::Table::Open(TableBuilderOptions(), file.get(), file_size, &table));
std::unique_ptr<table::Table> table_deleter(table);
std::unique_ptr<table::Iterator> iter(table->NewIterator());
int num_shards;
{
iter->Seek(kHeaderEntryKey);
if (!iter->Valid()) {
return CorruptFileError(iter->status(), filename,
"failed to seek to header entry");
}
BundleHeaderProto header;
Status s = ParseEntryProto(iter->key(), iter->value(), &header);
if (!s.ok()) return CorruptFileError(s, filename, "unable to parse header");
merge_state->num_shards += header.num_shards();
if (!merge_state->seen_first_bundle) {
merge_state->seen_first_bundle = true;
merge_state->endianness = header.endianness();
merge_state->version = header.version();
} else {
if (merge_state->endianness != header.endianness()) {
return errors::InvalidArgument(
"Merging bundles with conflicting endianness; inputs corrupted?");
}
string curr_version, merge_version;
header.version().SerializeToString(&curr_version);
merge_state->version.SerializeToString(&merge_version);
if (curr_version != merge_version) {
return errors::InvalidArgument(
"Merging bundles with different format versions: merged ",
merge_version, " vs. curr ", curr_version);
}
}
num_shards = header.num_shards();
iter->Next();
}
BundleEntryProto to_merge_entry;
for (; iter->Valid(); iter->Next()) {
const string key(iter->key());
const auto entry_iter = merge_state->entries.find(key);
if (entry_iter != merge_state->entries.end() &&
entry_iter->second.slices().empty()) {
return errors::InvalidArgument(
"Duplicate tensor keyed by ", key,
" encountered, when merging prefix: ", prefix);
}
TF_RETURN_IF_ERROR(
ParseEntryProto(iter->key(), iter->value(), &to_merge_entry));
if (entry_iter != merge_state->entries.end()) {
BundleEntryProto& existing_entry = entry_iter->second;
if (to_merge_entry.slices().empty()) {
return errors::Internal(
"Duplicate tensor keyed by ", key,
"; attempting to merge in a non-slice bundle entry");
}
for (int i = 0; i < to_merge_entry.slices_size(); ++i) {
TensorSliceProto* slot = existing_entry.add_slices();
*slot = to_merge_entry.slices(i);
}
CHECK_EQ(existing_entry.dtype(), to_merge_entry.dtype());
CHECK(TensorShape(existing_entry.shape()) ==
TensorShape(to_merge_entry.shape()));
continue;
}
auto result = merge_state->shard_ids.insert(
{DataFilename(prefix, to_merge_entry.shard_id(), num_shards),
merge_state->shard_ids.size()});
to_merge_entry.set_shard_id(result.first->second);
merge_state->entries[key] = to_merge_entry;
}
return absl::OkStatus();
}
Status MergeBundles(Env* env, absl::Span<const tstring> prefixes,
StringPiece merged_prefix, bool allow_missing_files) {
MergeState merge;
Status status = env->CreateDir(string(io::Dirname(merged_prefix)));
if (!status.ok() && !errors::IsAlreadyExists(status)) return status;
bool atleast_one_file_exists = false;
for (auto& prefix : prefixes) {
if (!env->FileExists(MetaFilename(prefix)).ok()) {
if (allow_missing_files) continue;
return errors::InvalidArgument(
"allow_missing_files was set to false and ", prefix,
" did not exist.", env->FileExists(prefix).ToString());
}
atleast_one_file_exists = true;
TF_RETURN_IF_ERROR(MergeOneBundle(env, prefix, &merge));
}
if (!atleast_one_file_exists) {
return errors::InvalidArgument(
"At least one prefix checkpoint file must exist, but none existed.");
}
for (const auto& p : merge.shard_ids) {
VLOG(1) << "Renaming " << p.first << " to "
<< DataFilename(merged_prefix, p.second, merge.shard_ids.size());
TF_RETURN_IF_ERROR(env->RenameFile(
p.first,
DataFilename(merged_prefix, p.second, merge.shard_ids.size())));
}
std::unique_ptr<WritableFile> merged_metadata;
TF_RETURN_IF_ERROR(
env->NewWritableFile(MetaFilename(merged_prefix), &merged_metadata));
{
table::TableBuilder builder(TableBuilderOptions(), merged_metadata.get());
BundleHeaderProto header;
header.set_num_shards(merge.num_shards);
header.set_endianness(merge.endianness);
*header.mutable_version() = merge.version;
builder.Add(kHeaderEntryKey, header.SerializeAsString());
for (const auto& p : merge.entries) {
builder.Add(p.first, p.second.SerializeAsString());
}
status = builder.Finish();
}
status.Update(merged_metadata->Close());
if (!status.ok()) return status;
VLOG(1) << "Merged bundles to:" << merged_prefix;
for (const tstring& prefix : prefixes) {
env->DeleteFile(MetaFilename(prefix)).IgnoreError();
}
return status;
}
BundleReader::BundleReader(
Env* env, StringPiece prefix,
bool enable_multi_threading_for_testing )
: BundleReader(env, prefix, {nullptr, enable_multi_threading_for_testing}) {
}
BundleReader::BundleReader(Env* env, StringPiece prefix, Options options)
: env_(env),
prefix_(prefix),
cache_(options.cache),
metadata_(nullptr),
table_(nullptr),
index_cache_(nullptr),
iter_(nullptr),
need_to_swap_bytes_(false),
enable_multi_threading_for_testing_(
options.enable_multi_threading_for_testing) {
if (cache_ == nullptr) {
owned_cache_ = std::make_unique<BundleCache>(env);
cache_ = owned_cache_.get();
}
const string filename = MetaFilename(prefix_);
uint64 file_size;
status_ = env_->GetFileSize(filename, &file_size);
if (!status_.ok()) return;
std::unique_ptr<RandomAccessFile> wrapper;
status_ = env_->NewRandomAccessFile(filename, &wrapper);
if (!status_.ok()) return;
metadata_ = wrapper.release();
table::Options o;
int64_t cache_size;
Status s =
ReadInt64FromEnvVar("TF_TABLE_INDEX_CACHE_SIZE_IN_MB", 0, &cache_size);
if (s.ok() && cache_size > 0) {
index_cache_ = table::NewLRUCache(cache_size << 20);
o.block_cache = index_cache_;
}
status_ = table::Table::Open(o, metadata_, file_size, &table_);
if (!status_.ok()) return;
iter_ = table_->NewIterator();
iter_->Seek(kHeaderEntryKey);
if (!iter_->Valid()) {
status_ = CorruptFileError(iter_->status(), filename,
"failed to seek to header entry");
return;
}
BundleHeaderProto header;
status_ = ParseEntryProto(iter_->key(), iter_->value(), &header);
if (!status_.ok()) {
status_ = CorruptFileError(status_, filename, "unable to parse header");
return;
}
num_shards_ = header.num_shards();
if ((header.endianness() == BundleHeaderProto::BIG && port::kLittleEndian) ||
(header.endianness() == BundleHeaderProto::LITTLE &&
!port::kLittleEndian)) {
need_to_swap_bytes_ = true;
}
status_ = CheckVersions(header.version(), kTensorBundleVersion,
kTensorBundleMinProducer, "Checkpoint", "checkpoint");
}
BundleReader::~BundleReader() {
delete metadata_;
delete iter_;
delete table_;
if (index_cache_) {
delete index_cache_;
}
for (auto& temp : data_) {
delete temp.second;
}
for (auto& temp : tensor_slices_) {
delete temp.second;
}
data_.clear();
tensor_slices_.clear();
}
Status BundleReader::GetBundleEntryProto(StringPiece key,
BundleEntryProto* entry) {
entry->Clear();
TF_CHECK_OK(status_);
Seek(key);
if (!iter_->Valid() || iter_->key() != key) {
return errors::NotFound("Key ", key, " not found in checkpoint");
}
BundleEntryProto entry_copy;
TF_RETURN_IF_ERROR(
ParseEntryProto(iter_->key(), iter_->value(), &entry_copy));
if (!TensorShape::IsValid(entry_copy.shape())) {
return errors::DataLoss("Invalid tensor shape: ", key, " ",
entry_copy.shape().ShortDebugString());
}
entry->Swap(&entry_copy);
return absl::OkStatus();
}
Status BundleReader::GetValue(const BundleEntryProto& entry, Tensor* val) {
Tensor* ret = val;
const TensorShape stored_shape(TensorShape(entry.shape()));
if (val->NumElements() == 0) {
ret = new Tensor(entry.dtype(), stored_shape);
}
if (entry.dtype() != DT_STRING && entry.dtype() != DT_VARIANT) {
if (entry.size() != ret->TotalBytes()) {
return errors::DataLoss("Invalid size in bundle entry: key ", key(),
"; stored size ", entry.size(),
"; expected size ", ret->TotalBytes());
}
} else if (entry.dtype() == DT_STRING) {
const size_t lower_bound = ret->NumElements() + ret->TotalBytes() -
sizeof(tstring) * ret->NumElements();
if (entry.size() < lower_bound) {
return errors::DataLoss("Invalid size in bundle entry: key ", key(),
"; stored size ", entry.size(),
"; expected size is at least ", lower_bound);
}
}
io::InputBuffer* buffered_file = data_[entry.shard_id()];
if (buffered_file == nullptr) {
RandomAccessFile* file = nullptr;
TF_RETURN_IF_ERROR(cache_->GetFile(
DataFilename(prefix_, entry.shard_id(), num_shards_), &file));
buffered_file = new io::InputBuffer(file, kBufferSize);
data_[entry.shard_id()] = buffered_file;
}
CHECK(buffered_file != nullptr);
TF_RETURN_IF_ERROR(buffered_file->Seek(entry.offset()));
uint32 actual_crc32c = 0;
if (DataTypeCanUseMemcpy(entry.dtype())) {
char* backing_buffer = const_cast<char*>((ret->tensor_data().data()));
size_t unused_bytes_read;
if (entry.size() > kBufferSize || enable_multi_threading_for_testing_) {
StringPiece sp;
if (!enable_multi_threading_for_testing_ &&
entry.size() < kLargeTensorThreshold) {
TF_RETURN_IF_ERROR(buffered_file->file()->Read(
entry.offset(), entry.size(), &sp, backing_buffer));
if (sp.data() != backing_buffer) {
memmove(backing_buffer, sp.data(), entry.size());
}
} else {
int64_t section_size = kMinSectionSize;
int64_t thread_pool_size =
(entry.size() + kMinSectionSize - 1) / kMinSectionSize;
if (thread_pool_size > kMaxFileReadThreads ||
enable_multi_threading_for_testing_) {
thread_pool_size = kMaxFileReadThreads;
section_size =
(entry.size() + kMaxFileReadThreads - 1) / kMaxFileReadThreads;
}
std::vector<Status> statuses(thread_pool_size);
auto reader_pool = std::make_unique<thread::ThreadPool>(
Env::Default(), "restore_large_tensor", thread_pool_size);
for (int i = 0; i < thread_pool_size; ++i) {
reader_pool->Schedule([&, i]() {
int64_t offset = i * section_size;
int64_t size = i == thread_pool_size - 1 ? entry.size() - offset
: section_size;
std::unique_ptr<RandomAccessFile> section_reader = nullptr;
StringPiece sp;
if (auto file_status = env_->NewRandomAccessFile(
DataFilename(prefix_, entry.shard_id(), num_shards_),
§ion_reader);
!file_status.ok()) {
statuses[i] = file_status;
return;
}
auto backing_buffer_current_pos = backing_buffer + offset;
auto status = section_reader->Read(entry.offset() + offset, size,
&sp, backing_buffer_current_pos);
if (sp.data() != backing_buffer_current_pos) {
memmove(backing_buffer_current_pos, sp.data(), size);
}
statuses[i] = std::move(status);
});
}
reader_pool = nullptr;
for (const auto& status : statuses) {
TF_RETURN_IF_ERROR(status);
}
}
} else {
TF_RETURN_IF_ERROR(buffered_file->ReadNBytes(entry.size(), backing_buffer,
&unused_bytes_read));
}
actual_crc32c = crc32c::Value(backing_buffer, entry.size());
if (need_to_swap_bytes_) {
TF_RETURN_IF_ERROR(ByteSwapTensor(ret));
}
} else if (entry.dtype() == DT_VARIANT) {
if (need_to_swap_bytes_) {
return errors::Unimplemented(
"TensorBundle at ", prefix_,
"is of a different endianness than this machine's hardware, and "
"the bundle contains a variant (arbitrary C++ type) tensor. "
"Byte-swapping of variant tensors is not currently implemented.");
}
TF_RETURN_IF_ERROR(ReadVariantTensor(buffered_file, ret, entry.offset(),
entry.size(), &actual_crc32c));
} else {
TF_RETURN_IF_ERROR(ReadStringTensor(
buffered_file, ret->NumElements(), entry.offset(), entry.size(),
GetStringBackingBuffer(*ret), &actual_crc32c, need_to_swap_bytes_));
}
if (crc32c::Unmask(entry.crc32c()) != actual_crc32c) {
return errors::DataLoss(
"TensorBundle at ", prefix_, " shard ", entry.shard_id(), " (",
entry.size(), " bytes): Checksum does not match: stored ",
strings::Printf("%08u", crc32c::Unmask(entry.crc32c())),
" vs. calculated on the restored bytes ", actual_crc32c);
}
*val = *ret;
if (ret != val) delete ret;
return absl::OkStatus();
}
Status BundleReader::Lookup(StringPiece key, Tensor* val) {
CHECK(val != nullptr);
BundleEntryProto entry;
TF_RETURN_IF_ERROR(GetBundleEntryProto(key, &entry));
if (entry.slices().empty()) {
return GetValue(entry, val);
} else {
return GetSliceValue(
key, entry,
TensorSlice(TensorShape(entry.shape()).dims()), val);
}
}
Status BundleReader::ReadCurrent(Tensor* val) {
CHECK(val != nullptr);
BundleEntryProto entry;
TF_RETURN_IF_ERROR(ParseEntryProto(iter_->key(), iter_->value(), &entry));
if (!TensorShape::IsValid(entry.shape())) {
return errors::DataLoss("Invalid tensor shape: ", iter_->key(), " ",
entry.shape().ShortDebugString());
}
if (entry.slices().empty()) {
return GetValue(entry, val);
} else {
return GetSliceValue(
iter_->key(), entry,
TensorSlice(TensorShape(entry.shape()).dims()), val);
}
}
Status BundleReader::LookupTensorSlices(StringPiece key,
std::vector<TensorSlice>* slices) {
slices->clear();
BundleEntryProto entry;
TF_RETURN_IF_ERROR(GetBundleEntryProto(key, &entry));
slices->reserve(entry.slices_size());
for (const auto& slice : entry.slices()) {
slices->emplace_back(slice);
}
return absl::OkStatus();
}
Status BundleReader::LookupSlice(StringPiece full_tensor_key,
const TensorSlice& slice_spec, Tensor* val) {
CHECK(val != nullptr);
BundleEntryProto entry;
TF_RETURN_IF_ERROR(GetBundleEntryProto(full_tensor_key, &entry));
return GetSliceValue(full_tensor_key, entry, slice_spec, val);
}
Status BundleReader::GetSliceValue(StringPiece full_tensor_key,
const BundleEntryProto& full_tensor_entry,
const TensorSlice& slice_spec, Tensor* val) {
using checkpoint::RegisterTensorSlice;
using checkpoint::TensorSliceSet;
DCHECK_GE(full_tensor_entry.slices_size(), 0);
const TensorShape full_shape(TensorShape(full_tensor_entry.shape()));
std::vector<std::pair<TensorSlice, string>> details;
const string full_tensor_key_string(full_tensor_key);
const TensorSliceSet* tss =
gtl::FindPtrOrNull(tensor_slices_, full_tensor_key_string);
if (tss == nullptr) {
if (full_tensor_entry.slices().empty()) {
TF_RETURN_IF_ERROR(RegisterTensorSlice(
full_tensor_key_string, full_shape, full_tensor_entry.dtype(),
"",
TensorSlice(full_shape.dims()), &tensor_slices_));
}
for (const TensorSliceProto& slice : full_tensor_entry.slices()) {
TF_RETURN_IF_ERROR(RegisterTensorSlice(
full_tensor_key_string, full_shape, full_tensor_entry.dtype(),
"", TensorSlice(slice), &tensor_slices_));
}
tss = gtl::FindPtrOrNull(tensor_slices_, full_tensor_key_string);
CHECK_NE(tss, nullptr);
}
if (!tss->QueryMeta(slice_spec, &details)) {
return errors::InvalidArgument(
"Does not have sufficient slices for partitioned tensor ",
full_tensor_key,
" to restore in slice_spec: ", slice_spec.DebugString());
}
BundleEntryProto stored_slice_entry = full_tensor_entry;
for (const auto& slice_tag_pair : details) {
const TensorSlice& stored_slice = slice_tag_pair.first;
if (!stored_slice.IsFull()) {
const string encoded_stored_slice_name =
checkpoint::EncodeTensorNameSlice(full_tensor_key_string,
stored_slice);
status_ =
GetBundleEntryProto(encoded_stored_slice_name, &stored_slice_entry);
if (!status_.ok()) return status_;
}
TensorShape stored_slice_shape(stored_slice_entry.shape());
if (stored_slice == slice_spec ||
(stored_slice_shape == val->shape() &&
IsFullSlice(stored_slice, stored_slice_shape) &&
IsFullSlice(slice_spec, stored_slice_shape))) {
VLOG(1) << "Optimized for common case: directly copying into "
"pre-allocated buffer; spec: "
<< slice_spec.DebugString();
status_ = GetValue(stored_slice_entry, val);
return status_;
}
Tensor stored_slice_tensor(stored_slice_entry.dtype(), stored_slice_shape);
status_ = GetValue(stored_slice_entry, &stored_slice_tensor);
if (!status_.ok()) return status_;
const DataType common_dtype = full_tensor_entry.dtype();
switch (common_dtype) {
#define HANDLE_COPY(T) \
case DataTypeToEnum<T>::value: \
CHECK(CopyDataFromTensorSliceToTensorSlice( \
full_shape, stored_slice, slice_spec, \
stored_slice_tensor.flat<T>().data(), val->flat<T>().data())); \
break;
HANDLE_COPY(float)
HANDLE_COPY(double)
HANDLE_COPY(int32)
HANDLE_COPY(uint8)
HANDLE_COPY(int16)
HANDLE_COPY(int8)
HANDLE_COPY(complex64)
HANDLE_COPY(complex128)
HANDLE_COPY(int64_t)
HANDLE_COPY(bool)
HANDLE_COPY(qint32)
HANDLE_COPY(quint8)
HANDLE_COPY(qint8)
HANDLE_COPY(bfloat16)
HANDLE_COPY(int4)
HANDLE_COPY(uint4)
default:
return errors::InvalidArgument("Dtype ", DataTypeString(common_dtype),
" not supported.");
}
#undef HANDLE_COPY
}
return absl::OkStatus();
}
bool BundleReader::Contains(StringPiece key) {
Seek(key);
return Valid() && (this->key() == key);
}
Status BundleReader::LookupDtypeAndShape(StringPiece key, DataType* dtype,
TensorShape* shape) {
BundleEntryProto entry;
TF_RETURN_IF_ERROR(GetBundleEntryProto(key, &entry));
*dtype = entry.dtype();
*shape = TensorShape(entry.shape());
return absl::OkStatus();
}
Status BundleReader::LookupTensorShape(StringPiece key, TensorShape* shape) {
DataType ignored;
return LookupDtypeAndShape(key, &ignored, shape);
}
string BundleReader::DebugString() {
string shape_str;
BundleEntryProto entry;
Seek(kHeaderEntryKey);
for (Next(); Valid(); Next()) {
CHECK(entry.ParseFromArray(value().data(), value().size()));
if (entry.slices_size() > 0) continue;
strings::StrAppend(&shape_str, key(), " (", DataType_Name(entry.dtype()),
") ", TensorShape(entry.shape()).DebugString());
strings::StrAppend(&shape_str, "\n");
}
return shape_str;
}
BundleCache::BundleCache(Env* env) : env_(env) {}
BundleCache::FileState* BundleCache::EnsureOpened(std::string name) {
FileState* f;
{
absl::MutexLock l(&mu_);
auto& slot = opened_files_[name];
if (slot == nullptr) {
slot = std::make_unique<FileState>();
}
f = slot.get();
}
absl::call_once(f->once, [this, name = std::move(name), f] {
f->open_status = env_->NewRandomAccessFile(name, &f->file);
});
return f;
}
Status BundleCache::GetFile(const std::string& fname, RandomAccessFile** file) {
FileState* f = EnsureOpened(fname);
*file = f->file.get();
return f->open_status;
}
namespace {
inline char* AlignedMalloc(size_t size) {
char* buffer = static_cast<char*>(port::AlignedMalloc(size, 64));
DCHECK(buffer);
return buffer;
}
}
} | #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include <random>
#include <string>
#include <vector>
#if defined(_WIN32)
#include <windows.h>
#endif
#include "absl/status/status.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/tensor_bundle.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using ::testing::ElementsAre;
namespace {
string Prefix(const string& prefix) {
return strings::StrCat(testing::TmpDir(), "/", prefix);
}
string TestdataPrefix(const string& prefix) {
return strings::StrCat(testing::TensorFlowSrcRoot(),
"/core/util/tensor_bundle/testdata/", prefix);
}
template <typename T>
Tensor Constant(T v, TensorShape shape) {
Tensor ret(DataTypeToEnum<T>::value, shape);
ret.flat<T>().setConstant(v);
return ret;
}
template <typename T>
Tensor Constant_2x3(T v) {
return Constant(v, TensorShape({2, 3}));
}
template <typename T>
Tensor Constant_100x100(T v) {
return Constant(v, TensorShape({100, 100}));
}
Tensor ByteSwap(Tensor t) {
Tensor ret = tensor::DeepCopy(t);
TF_EXPECT_OK(ByteSwapTensor(&ret));
return ret;
}
template <typename T>
void Expect(BundleReader* reader, const string& key,
const Tensor& expected_val) {
EXPECT_TRUE(reader->Contains(key));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader->LookupDtypeAndShape(key, &dtype, &shape));
EXPECT_EQ(expected_val.dtype(), dtype);
EXPECT_EQ(expected_val.shape(), shape);
Tensor val(expected_val.dtype(), shape);
TF_ASSERT_OK(reader->Lookup(key, &val));
test::ExpectTensorEqual<T>(val, expected_val);
}
template <class T>
void ExpectVariant(BundleReader* reader, const string& key,
const Tensor& expected_t) {
EXPECT_TRUE(reader->Contains(key));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader->LookupDtypeAndShape(key, &dtype, &shape));
EXPECT_EQ(expected_t.dtype(), dtype);
EXPECT_EQ(expected_t.shape(), shape);
Tensor actual_t(dtype, shape);
TF_ASSERT_OK(reader->Lookup(key, &actual_t));
for (int i = 0; i < expected_t.NumElements(); i++) {
Variant actual_var = actual_t.flat<Variant>()(i);
Variant expected_var = expected_t.flat<Variant>()(i);
EXPECT_EQ(actual_var.TypeName(), expected_var.TypeName());
auto* actual_val = actual_var.get<T>();
auto* expected_val = expected_var.get<T>();
EXPECT_EQ(*expected_val, *actual_val);
}
}
template <typename T>
void ExpectNext(BundleReader* reader, const Tensor& expected_val) {
EXPECT_TRUE(reader->Valid());
reader->Next();
TF_ASSERT_OK(reader->status());
Tensor val;
TF_ASSERT_OK(reader->ReadCurrent(&val));
test::ExpectTensorEqual<T>(val, expected_val);
}
std::vector<string> AllTensorKeys(BundleReader* reader) {
std::vector<string> ret;
reader->Seek(kHeaderEntryKey);
reader->Next();
for (; reader->Valid(); reader->Next()) {
ret.emplace_back(reader->key());
}
return ret;
}
Status FlipEndiannessBit(const string& prefix) {
Env* env = Env::Default();
const string metadata_tmp_path = Prefix("some_tmp_path");
std::unique_ptr<WritableFile> metadata_file;
TF_RETURN_IF_ERROR(env->NewWritableFile(metadata_tmp_path, &metadata_file));
std::unique_ptr<table::TableBuilder> builder;
{
const string filename = MetaFilename(prefix);
uint64 file_size;
TF_RETURN_IF_ERROR(env->GetFileSize(filename, &file_size));
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename, &file));
table::Table* table = nullptr;
TF_RETURN_IF_ERROR(
table::Table::Open(table::Options(), file.get(), file_size, &table));
std::unique_ptr<table::Table> table_deleter(table);
std::unique_ptr<table::Iterator> iter(table->NewIterator());
iter->Seek(kHeaderEntryKey);
CHECK(iter->Valid());
BundleHeaderProto header;
CHECK(header.ParseFromArray(iter->value().data(), iter->value().size()));
if (header.endianness() == BundleHeaderProto::LITTLE) {
header.set_endianness(BundleHeaderProto::BIG);
} else {
header.set_endianness(BundleHeaderProto::LITTLE);
}
builder.reset(
new table::TableBuilder(table::Options(), metadata_file.get()));
builder->Add(iter->key(), header.SerializeAsString());
iter->Next();
for (; iter->Valid(); iter->Next())
builder->Add(iter->key(), iter->value());
}
TF_RETURN_IF_ERROR(builder->Finish());
TF_RETURN_IF_ERROR(env->RenameFile(metadata_tmp_path, MetaFilename(prefix)));
return metadata_file->Close();
}
template <typename T>
void TestBasic() {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("foo_003", Constant_2x3(T(3))));
TF_EXPECT_OK(writer.Add("foo_000", Constant_2x3(T(0))));
TF_EXPECT_OK(writer.Add("foo_002", Constant_2x3(T(2))));
TF_EXPECT_OK(writer.Add("foo_001", Constant_2x3(T(1))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "foo_000", Constant_2x3(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
{
BundleWriter writer(Env::Default(), Prefix("bar"));
TF_EXPECT_OK(writer.Add("bar_003", Constant_2x3(T(3))));
TF_EXPECT_OK(writer.Add("bar_000", Constant_2x3(T(0))));
TF_EXPECT_OK(writer.Add("bar_002", Constant_2x3(T(2))));
TF_EXPECT_OK(writer.Add("bar_001", Constant_2x3(T(1))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003"}));
Expect<T>(&reader, "bar_003", Constant_2x3(T(3)));
Expect<T>(&reader, "bar_002", Constant_2x3(T(2)));
Expect<T>(&reader, "bar_001", Constant_2x3(T(1)));
Expect<T>(&reader, "bar_000", Constant_2x3(T(0)));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
TF_ASSERT_OK(MergeBundles(Env::Default(), {Prefix("foo"), Prefix("bar")},
Prefix("merged")));
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003",
"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "bar_000", Constant_2x3(T(0)));
Expect<T>(&reader, "bar_001", Constant_2x3(T(1)));
Expect<T>(&reader, "bar_002", Constant_2x3(T(2)));
Expect<T>(&reader, "bar_003", Constant_2x3(T(3)));
Expect<T>(&reader, "foo_000", Constant_2x3(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
}
template <typename T>
void TestByteSwap(const T* forward, const T* swapped, int array_len) {
auto bytes_per_elem = sizeof(T);
std::unique_ptr<T[]> forward_copy(new T[array_len]);
std::memcpy(forward_copy.get(), forward, array_len * bytes_per_elem);
TF_EXPECT_OK(ByteSwapArray(reinterpret_cast<char*>(forward_copy.get()),
bytes_per_elem, array_len));
for (int i = 0; i < array_len; i++) {
EXPECT_EQ(forward_copy.get()[i], swapped[i]);
}
auto shape = TensorShape({array_len});
auto dtype = DataTypeToEnum<T>::value;
Tensor forward_tensor(dtype, shape);
Tensor swapped_tensor(dtype, shape);
std::memcpy(const_cast<char*>(forward_tensor.tensor_data().data()), forward,
array_len * bytes_per_elem);
std::memcpy(const_cast<char*>(swapped_tensor.tensor_data().data()), swapped,
array_len * bytes_per_elem);
TF_EXPECT_OK(ByteSwapTensor(&forward_tensor));
test::ExpectTensorEqual<T>(forward_tensor, swapped_tensor);
}
TEST(TensorBundleTest, SwapBytes) {
ByteSwap(Constant_2x3<int>(42));
EXPECT_NE(absl::OkStatus(), FlipEndiannessBit(Prefix("not_a_valid_prefix")));
const int arr_len_16 = 4;
const uint16_t forward_16[] = {0x1de5, 0xd017, 0xf1ea, 0xc0a1};
const uint16_t swapped_16[] = {0xe51d, 0x17d0, 0xeaf1, 0xa1c0};
const int arr_len_32 = 2;
const uint32_t forward_32[] = {0x0ddba115, 0xf01dab1e};
const uint32_t swapped_32[] = {0x15a1db0d, 0x1eab1df0};
const int arr_len_64 = 2;
const uint64_t forward_64[] = {0xf005ba11caba1000, 0x5ca1ab1ecab005e5};
const uint64_t swapped_64[] = {0x0010baca11ba05f0, 0xe505b0ca1eaba15c};
TestByteSwap(forward_16, swapped_16, arr_len_16);
TestByteSwap(reinterpret_cast<const int16_t*>(forward_16),
reinterpret_cast<const int16_t*>(swapped_16), arr_len_16);
TestByteSwap(reinterpret_cast<const bfloat16*>(forward_16),
reinterpret_cast<const bfloat16*>(swapped_16), arr_len_16);
TestByteSwap(forward_32, swapped_32, arr_len_32);
TestByteSwap(reinterpret_cast<const int32_t*>(forward_32),
reinterpret_cast<const int32_t*>(swapped_32), arr_len_32);
TestByteSwap(reinterpret_cast<const float*>(forward_32),
reinterpret_cast<const float*>(swapped_32), arr_len_32);
TestByteSwap(reinterpret_cast<const uint64*>(forward_64),
reinterpret_cast<const uint64*>(swapped_64), arr_len_64);
TestByteSwap(reinterpret_cast<const int64_t*>(forward_64),
reinterpret_cast<const int64_t*>(swapped_64), arr_len_64);
TestByteSwap(reinterpret_cast<const double*>(forward_64),
reinterpret_cast<const double*>(swapped_64), arr_len_64);
const float* forward_float = reinterpret_cast<const float*>(forward_32);
const float* swapped_float = reinterpret_cast<const float*>(swapped_32);
const double* forward_double = reinterpret_cast<const double*>(forward_64);
const double* swapped_double = reinterpret_cast<const double*>(swapped_64);
Tensor forward_complex64 = Constant_2x3<complex64>(
std::complex<float>(forward_float[0], forward_float[1]));
Tensor swapped_complex64 = Constant_2x3<complex64>(
std::complex<float>(swapped_float[0], swapped_float[1]));
Tensor forward_complex128 = Constant_2x3<complex128>(
std::complex<double>(forward_double[0], forward_double[1]));
Tensor swapped_complex128 = Constant_2x3<complex128>(
std::complex<double>(swapped_double[0], swapped_double[1]));
TF_EXPECT_OK(ByteSwapTensor(&forward_complex64));
test::ExpectTensorEqual<complex64>(forward_complex64, swapped_complex64);
TF_EXPECT_OK(ByteSwapTensor(&forward_complex128));
test::ExpectTensorEqual<complex128>(forward_complex128, swapped_complex128);
}
template <typename T>
void TestEndianness() {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("foo_003", ByteSwap(Constant_2x3<T>(T(3)))));
TF_EXPECT_OK(writer.Add("foo_000", ByteSwap(Constant_2x3<T>(T(0)))));
TF_EXPECT_OK(writer.Add("foo_002", ByteSwap(Constant_2x3<T>(T(2)))));
TF_EXPECT_OK(writer.Add("foo_001", ByteSwap(Constant_2x3<T>(T(1)))));
TF_ASSERT_OK(writer.Finish());
TF_ASSERT_OK(FlipEndiannessBit(Prefix("foo")));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "foo_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3<T>(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
{
BundleWriter writer(Env::Default(), Prefix("bar"));
TF_EXPECT_OK(writer.Add("bar_003", ByteSwap(Constant_2x3<T>(T(3)))));
TF_EXPECT_OK(writer.Add("bar_000", ByteSwap(Constant_2x3<T>(T(0)))));
TF_EXPECT_OK(writer.Add("bar_002", ByteSwap(Constant_2x3<T>(T(2)))));
TF_EXPECT_OK(writer.Add("bar_001", ByteSwap(Constant_2x3<T>(T(1)))));
TF_ASSERT_OK(writer.Finish());
TF_ASSERT_OK(FlipEndiannessBit(Prefix("bar")));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003"}));
Expect<T>(&reader, "bar_003", Constant_2x3<T>(T(3)));
Expect<T>(&reader, "bar_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "bar_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "bar_000", Constant_2x3<T>(T(0)));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
TF_ASSERT_OK(MergeBundles(Env::Default(), {Prefix("foo"), Prefix("bar")},
Prefix("merged")));
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003",
"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "bar_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "bar_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "bar_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "bar_003", Constant_2x3<T>(T(3)));
Expect<T>(&reader, "foo_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3<T>(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
}
template <typename T>
void TestNonStandardShapes() {
{
BundleWriter writer(Env::Default(), Prefix("nonstandard"));
TF_EXPECT_OK(writer.Add("scalar", Constant(T(0), TensorShape())));
TF_EXPECT_OK(
writer.Add("non_standard0", Constant(T(0), TensorShape({0, 1618}))));
TF_EXPECT_OK(
writer.Add("non_standard1", Constant(T(0), TensorShape({16, 0, 18}))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("nonstandard"));
TF_ASSERT_OK(reader.status());
Expect<T>(&reader, "scalar", Constant(T(0), TensorShape()));
Expect<T>(&reader, "non_standard0", Constant(T(0), TensorShape({0, 1618})));
Expect<T>(&reader, "non_standard1",
Constant(T(0), TensorShape({16, 0, 18})));
}
}
void VersionTest(const VersionDef& version, StringPiece expected_error) {
const string path = Prefix("version_test");
{
BundleHeaderProto header;
*header.mutable_version() = version;
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewWritableFile(MetaFilename(path), &file));
table::TableBuilder builder(table::Options(), file.get());
builder.Add(kHeaderEntryKey, header.SerializeAsString());
TF_ASSERT_OK(builder.Finish());
}
BundleReader reader(Env::Default(), path);
EXPECT_TRUE(errors::IsInvalidArgument(reader.status()));
EXPECT_TRUE(absl::StartsWith(reader.status().message(), expected_error));
}
}
TEST(TensorBundleTest, Basic) {
TestBasic<float>();
TestBasic<double>();
TestBasic<int32>();
TestBasic<uint8>();
TestBasic<int16>();
TestBasic<int8>();
TestBasic<complex64>();
TestBasic<complex128>();
TestBasic<int64_t>();
TestBasic<bool>();
TestBasic<qint32>();
TestBasic<quint8>();
TestBasic<qint8>();
TestBasic<bfloat16>();
}
TEST(TensorBundleTest, Endianness) {
TestEndianness<float>();
TestEndianness<double>();
TestEndianness<int32>();
TestEndianness<uint8>();
TestEndianness<int16>();
TestEndianness<int8>();
TestEndianness<complex64>();
TestEndianness<complex128>();
TestEndianness<int64_t>();
TestEndianness<bool>();
TestEndianness<qint32>();
TestEndianness<quint8>();
TestEndianness<qint8>();
TestEndianness<bfloat16>();
}
TEST(TensorBundleTest, PartitionedVariables) {
const TensorShape kFullShape({5, 10});
TensorSlice slice1 = TensorSlice::ParseOrDie("-:0,1");
TensorSlice slice2 = TensorSlice::ParseOrDie("-:1,9");
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(writer.AddSlice("foo", kFullShape, slice1,
Constant<float>(0., TensorShape({5, 1}))));
TF_ASSERT_OK(writer.AddSlice("foo", kFullShape, slice2,
Constant<float>(1., TensorShape({5, 9}))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
Tensor expected_val(DT_FLOAT, kFullShape);
test::FillFn<float>(&expected_val, [](int offset) -> float {
if (offset % 10 == 0) {
return 0;
}
return 1;
});
Tensor val(DT_FLOAT, kFullShape);
TF_ASSERT_OK(reader.Lookup("foo", &val));
test::ExpectTensorEqual<float>(val, expected_val);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
std::vector<TensorSlice> slices;
TF_ASSERT_OK(reader.LookupTensorSlices("foo", &slices));
EXPECT_EQ(2, slices.size());
EXPECT_EQ(slice1.DebugString(), slices[0].DebugString());
EXPECT_EQ(slice2.DebugString(), slices[1].DebugString());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice distinct_slice = TensorSlice::ParseOrDie("-:0,2");
Tensor expected_val(DT_FLOAT, TensorShape({5, 2}));
test::FillFn<float>(&expected_val, [](int offset) -> float {
if (offset % 2 == 0) {
return 0;
}
return 1;
});
Tensor val(DT_FLOAT, TensorShape({5, 2}));
TF_ASSERT_OK(reader.LookupSlice("foo", distinct_slice, &val));
test::ExpectTensorEqual<float>(val, expected_val);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice distinct_slice = TensorSlice::ParseOrDie("-:2,2");
Tensor val(DT_FLOAT, TensorShape({5, 2}));
TF_ASSERT_OK(reader.LookupSlice("foo", distinct_slice, &val));
test::ExpectTensorEqual<float>(val,
Constant<float>(1., TensorShape({5, 2})));
}
}
TEST(TensorBundleTest, EquivalentSliceTest) {
const TensorShape kFullShape({5, 10});
const Tensor kExpected(Constant<float>(1., kFullShape));
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(writer.AddSlice("no_extents", kFullShape,
TensorSlice::ParseOrDie("-:-"), kExpected));
TF_ASSERT_OK(writer.AddSlice("both_extents", kFullShape,
TensorSlice::ParseOrDie("0,5:0,10"),
kExpected));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("-:-");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("no_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("0,5:0,10");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("both_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("0,5:0,10");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("no_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("-:-");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("both_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
}
TEST(TensorBundleTest, NonStandardShapes) {
TestNonStandardShapes<float>();
TestNonStandardShapes<double>();
TestNonStandardShapes<int32>();
TestNonStandardShapes<uint8>();
TestNonStandardShapes<int16>();
TestNonStandardShapes<int8>();
TestNonStandardShapes<complex64>();
TestNonStandardShapes<complex128>();
TestNonStandardShapes<int64_t>();
TestNonStandardShapes<bool>();
TestNonStandardShapes<qint32>();
TestNonStandardShapes<quint8>();
TestNonStandardShapes<qint8>();
TestNonStandardShapes<bfloat16>();
}
TEST(TensorBundleTest, StringTensorsOldFormat) {
BundleReader reader(Env::Default(), TestdataPrefix("old_string_tensors/foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(AllTensorKeys(&reader),
std::vector<string>({"floats", "scalar", "string_tensor", "strs"}));
Expect<tstring>(&reader, "string_tensor",
Tensor(DT_STRING, TensorShape({1})));
Expect<tstring>(&reader, "scalar", test::AsTensor<tstring>({"hello"}));
Expect<tstring>(
&reader, "strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 10, 'c')}));
Expect<float>(&reader, "floats", Constant_2x3<float>(16.18));
}
size_t GetPageSize() {
#ifdef _WIN32
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
#elif defined(__wasm__) || defined(__asmjs__)
return getpagesize();
#else
return sysconf(_SC_PAGESIZE);
#endif
}
TEST(TensorBundleTest, StringTensors) {
constexpr size_t kLongLength = static_cast<size_t>(UINT32_MAX) + 1;
Tensor long_string_tensor(DT_STRING, TensorShape({1}));
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("string_tensor",
Tensor(DT_STRING, TensorShape({1}))));
TF_EXPECT_OK(writer.Add("scalar", test::AsTensor<tstring>({"hello"})));
TF_EXPECT_OK(writer.Add(
"strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 25, 'c')})));
tstring* backing_string = long_string_tensor.flat<tstring>().data();
backing_string->resize_uninitialized(kLongLength);
std::char_traits<char>::assign(backing_string->data(), kLongLength, 'd');
TF_EXPECT_OK(writer.Add("long_scalar", long_string_tensor));
TF_EXPECT_OK(writer.Add("floats", Constant_2x3<float>(16.18)));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(AllTensorKeys(&reader),
std::vector<string>({"floats", "long_scalar", "scalar",
"string_tensor", "strs"}));
Expect<tstring>(&reader, "string_tensor",
Tensor(DT_STRING, TensorShape({1})));
Expect<tstring>(&reader, "scalar", test::AsTensor<tstring>({"hello"}));
Expect<tstring>(
&reader, "strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 25, 'c')}));
Expect<float>(&reader, "floats", Constant_2x3<float>(16.18));
EXPECT_TRUE(reader.Contains("long_scalar"));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader.LookupDtypeAndShape("long_scalar", &dtype, &shape));
EXPECT_EQ(DT_STRING, dtype);
EXPECT_EQ(TensorShape({1}), shape);
tstring* backing_string = long_string_tensor.flat<tstring>().data();
std::char_traits<char>::assign(backing_string->data(), kLongLength, 'e');
TF_ASSERT_OK(reader.Lookup("long_scalar", &long_string_tensor));
ASSERT_EQ(backing_string, long_string_tensor.flat<tstring>().data());
EXPECT_EQ(kLongLength, backing_string->length());
const size_t kPageSize = GetPageSize();
char* testblock = new char[kPageSize];
memset(testblock, 'd', sizeof(char) * kPageSize);
for (size_t i = 0; i < kLongLength; i += kPageSize) {
if (memcmp(testblock, backing_string->data() + i, kPageSize) != 0) {
FAIL() << "long_scalar is not full of 'd's as expected.";
break;
}
}
delete[] testblock;
}
}
class VariantObject {
public:
VariantObject() {}
VariantObject(const string& metadata, int64_t value)
: metadata_(metadata), value_(value) {}
string TypeName() const { return "TEST VariantObject"; }
void Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
data->set_metadata(metadata_);
Tensor val_t = Tensor(DT_INT64, TensorShape({}));
val_t.scalar<int64_t>()() = value_;
*(data->add_tensors()) = val_t;
}
bool Decode(const VariantTensorData& data) {
EXPECT_EQ(data.type_name(), TypeName());
data.get_metadata(&metadata_);
EXPECT_EQ(data.tensors_size(), 1);
value_ = data.tensors(0).scalar<int64_t>()();
return true;
}
bool operator==(const VariantObject other) const {
return metadata_ == other.metadata_ && value_ == other.value_;
}
string metadata_;
int64_t value_;
};
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(VariantObject, "TEST VariantObject");
TEST(TensorBundleTest, VariantTensors) {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(
writer.Add("variant_tensor",
test::AsTensor<Variant>({VariantObject("test", 10),
VariantObject("test1", 20)})));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectVariant<VariantObject>(
&reader, "variant_tensor",
test::AsTensor<Variant>(
{VariantObject("test", 10), VariantObject("test1", 20)}));
}
}
TEST(TensorBundleTest, DirectoryStructure) {
Env* env = Env::Default();
const std::vector<string> kBundlePrefixes = {Prefix("worker0"),
Prefix("worker1")};
for (int i = 0; i < 2; ++i) {
BundleWriter writer(env, kBundlePrefixes[i]);
TF_EXPECT_OK(
writer.Add(strings::StrCat("tensor", i), Constant_2x3<float>(0.)));
TF_ASSERT_OK(writer.Finish());
}
auto CheckDirFiles = [env](const string& bundle_prefix,
absl::Span<const string> expected_files) {
StringPiece dir = io::Dirname(bundle_prefix);
for (const string& expected_file : expected_files) {
TF_EXPECT_OK(env->FileExists(io::JoinPath(dir, expected_file)));
}
};
CheckDirFiles(kBundlePrefixes[0],
{"worker0.index", "worker0.data-00000-of-00001"});
CheckDirFiles(kBundlePrefixes[1],
{"worker1.index", "worker1.data-00000-of-00001"});
const string kAnotherPrefix = Prefix("another");
TF_ASSERT_OK(MergeBundles(env, {kBundlePrefixes[0]}, kAnotherPrefix));
CheckDirFiles(kAnotherPrefix,
{"another.index", "another.data-00000-of-00001"});
const string kMerged = Prefix("merged");
TF_ASSERT_OK(
MergeBundles(env, {kAnotherPrefix, kBundlePrefixes[1]}, kMerged));
CheckDirFiles(kMerged, {"merged.index", "merged.data-00000-of-00002",
"merged.data-00001-of-00002"});
}
TEST(TensorBundleTest, SortForSequentialAccess) {
Env* env = Env::Default();
const std::vector<string> kBundlePrefixes = {Prefix("worker0"),
Prefix("worker1")};
BundleWriter writer0(env, kBundlePrefixes[0]);
for (int i = 0; i < 3; ++i) {
TF_EXPECT_OK(
writer0.Add(strings::StrCat("tensor-0-", i), Constant_2x3<float>(0.)));
}
TF_ASSERT_OK(writer0.Finish());
BundleWriter writer1(env, kBundlePrefixes[1]);
for (int i = 2; i >= 0; --i) {
TF_EXPECT_OK(
writer1.Add(strings::StrCat("tensor-1-", i), Constant_2x3<float>(0.)));
}
TF_ASSERT_OK(writer1.Finish());
const string kMerged = Prefix("merged");
TF_ASSERT_OK(
MergeBundles(env, {kBundlePrefixes[0], kBundlePrefixes[1]}, kMerged));
BundleReader reader(env, kMerged);
TF_ASSERT_OK(reader.status());
std::vector<string> tensor_names = {"tensor-1-0", "tensor-0-1", "tensor-1-2",
"tensor-0-0", "tensor-1-1", "tensor-0-2"};
TF_ASSERT_OK(reader.SortForSequentialAccess<string>(
tensor_names, [](const string& element) { return element; }));
EXPECT_THAT(tensor_names,
ElementsAre("tensor-0-0", "tensor-0-1", "tensor-0-2",
"tensor-1-2", "tensor-1-1", "tensor-1-0"));
}
TEST(TensorBundleTest, Error) {
{
BundleWriter writer(Env::Default(), Prefix("dup"));
TF_EXPECT_OK(writer.Add("foo", Constant_2x3(1.f)));
EXPECT_FALSE(writer.Add("foo", Constant_2x3(2.f)).ok());
EXPECT_TRUE(absl::StrContains(writer.status().ToString(), "duplicate key"));
EXPECT_FALSE(writer.Finish().ok());
}
{
BundleWriter writer(Env::Default(), Prefix("bad"));
EXPECT_TRUE(writer.Finish().ok());
EXPECT_FALSE(writer.Finish().ok());
}
{
BundleReader reader(Env::Default(), Prefix("nonexist"));
EXPECT_EQ(reader.status().code(), error::NOT_FOUND);
}
}
TEST(TensorBundleTest, Checksum) {
auto FlipByte = [](const string& prefix, int pos_lhs,
bool exact_pos = false) {
DCHECK_GE(pos_lhs, 0);
const string& datafile = DataFilename(Prefix(prefix), 0, 1);
string data;
TF_ASSERT_OK(ReadFileToString(Env::Default(), datafile, &data));
int byte_pos = 0;
if (!exact_pos) {
std::mt19937 rng;
std::uniform_int_distribution<int> dist(pos_lhs, data.size() - 1);
byte_pos = dist(rng);
} else {
byte_pos = pos_lhs;
}
data[byte_pos] = ~data[byte_pos];
TF_ASSERT_OK(WriteStringToFile(Env::Default(), datafile, data));
};
auto ExpectLookupFails = [](const string& prefix, const string& key,
const string& expected_msg, Tensor& val) {
BundleReader reader(Env::Default(), Prefix(prefix));
Status status = reader.Lookup(key, &val);
EXPECT_TRUE(errors::IsDataLoss(status));
EXPECT_TRUE(absl::StrContains(status.ToString(), expected_msg));
};
{
BundleWriter writer(Env::Default(), Prefix("singleton"));
TF_EXPECT_OK(writer.Add("foo", Constant_2x3(1.f)));
TF_ASSERT_OK(writer.Finish());
FlipByte("singleton", 0 );
Tensor val(DT_FLOAT, TensorShape({2, 3}));
ExpectLookupFails("singleton", "foo",
"Checksum does not match" , val);
}
{
auto WriteStrings = []() {
BundleWriter writer(Env::Default(), Prefix("strings"));
TF_EXPECT_OK(
writer.Add("foo", test::AsTensor<tstring>({"hello", "world"})));
TF_ASSERT_OK(writer.Finish());
};
for (int i = 0; i < 2; ++i) {
WriteStrings();
FlipByte("strings", i, true );
Tensor val(DT_STRING, TensorShape({2}));
ExpectLookupFails(
"strings", "foo",
"length checksum does not match" , val);
}
WriteStrings();
FlipByte("strings", 2 );
Tensor val(DT_STRING, TensorShape({2}));
ExpectLookupFails("strings", "foo",
"Checksum does not match" , val);
}
}
TEST(TensorBundleTest, TruncatedTensorContents) {
Env* env = Env::Default();
BundleWriter writer(env, Prefix("end"));
TF_EXPECT_OK(writer.Add("key", Constant_2x3<float>(1.0)));
TF_ASSERT_OK(writer.Finish());
const string datafile = DataFilename(Prefix("end"), 0, 1);
string data;
TF_ASSERT_OK(ReadFileToString(env, datafile, &data));
ASSERT_TRUE(!data.empty());
TF_ASSERT_OK(WriteStringToFile(env, datafile,
StringPiece(data.data(), data.size() - 1)));
BundleReader reader(env, Prefix("end"));
TF_ASSERT_OK(reader.status());
Tensor val(DT_FLOAT, TensorShape({2, 3}));
EXPECT_TRUE(errors::IsOutOfRange(reader.Lookup("key", &val)));
}
TEST(TensorBundleTest, HeaderEntry) {
{
BundleWriter writer(Env::Default(), Prefix("b"));
TF_EXPECT_OK(writer.Add("key", Constant_2x3<float>(1.0)));
TF_ASSERT_OK(writer.Finish());
}
BundleHeaderProto header;
{
BundleReader reader(Env::Default(), Prefix("b"));
TF_ASSERT_OK(reader.status());
reader.Seek(kHeaderEntryKey);
ASSERT_TRUE(reader.Valid());
ASSERT_TRUE(ParseProtoUnlimited(&header, reader.value().data(),
reader.value().size()));
}
EXPECT_EQ(1, header.num_shards());
if (port::kLittleEndian) {
EXPECT_EQ(BundleHeaderProto::LITTLE, header.endianness());
} else {
EXPECT_EQ(BundleHeaderProto::BIG, header.endianness());
}
EXPECT_GT(kTensorBundleVersion, 0);
EXPECT_EQ(kTensorBundleVersion, header.version().producer());
EXPECT_EQ(kTensorBundleMinConsumer, header.version().min_consumer());
}
TEST(TensorBundleTest, VersionTest) {
{
VersionDef versions;
versions.set_producer(kTensorBundleVersion + 1);
versions.set_min_consumer(kTensorBundleVersion + 1);
VersionTest(
versions,
strings::StrCat("Checkpoint min consumer version ",
kTensorBundleVersion + 1, " above current version ",
kTensorBundleVersion, " for TensorFlow"));
}
{
VersionDef versions;
versions.set_producer(kTensorBundleMinProducer - 1);
VersionTest(
versions,
strings::StrCat("Checkpoint producer version ",
kTensorBundleMinProducer - 1, " below min producer ",
kTensorBundleMinProducer, " supported by TensorFlow"));
}
{
VersionDef versions;
versions.set_producer(kTensorBundleVersion + 1);
versions.add_bad_consumers(kTensorBundleVersion);
VersionTest(
versions,
strings::StrCat(
"Checkpoint disallows consumer version ", kTensorBundleVersion,
". Please upgrade TensorFlow: this version is likely buggy."));
}
}
TEST(TensorBundleTest, LargeVariableLoadingTest) {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("foo_003", Constant_100x100<float>(3)));
TF_EXPECT_OK(writer.Add("foo_000", Constant_100x100<float>(0)));
TF_EXPECT_OK(writer.Add("foo_002", Constant_100x100<float>(2)));
TF_EXPECT_OK(writer.Add("foo_001", Constant_100x100<float>(1)));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"),
true);
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<float>(&reader, "foo_000", Constant_100x100<float>(0));
Expect<float>(&reader, "foo_001", Constant_100x100<float>(1));
Expect<float>(&reader, "foo_002", Constant_100x100<float>(2));
Expect<float>(&reader, "foo_003", Constant_100x100<float>(3));
}
}
absl::Status CreateFile(Env* env, const std::string& fname) {
std::unique_ptr<WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(fname, &file));
return file->Close();
}
TEST(BundleCacheTest, SameFile) {
Env* env = Env::Default();
BundleCache cache(env);
const std::string fname = Prefix("foo");
TF_EXPECT_OK(CreateFile(env, fname));
RandomAccessFile* f1;
RandomAccessFile* f2;
TF_EXPECT_OK(cache.GetFile(fname, &f1));
TF_EXPECT_OK(cache.GetFile(fname, &f2));
EXPECT_EQ(f1, f2);
}
TEST(BundleCacheTest, DifferentFiles) {
Env* env = Env::Default();
BundleCache cache(env);
const std::string fname1 = Prefix("foo");
const std::string fname2 = Prefix("bar");
TF_EXPECT_OK(CreateFile(env, fname1));
TF_EXPECT_OK(CreateFile(env, fname2));
RandomAccessFile* f1;
RandomAccessFile* f2;
TF_EXPECT_OK(cache.GetFile(fname1, &f1));
TF_EXPECT_OK(cache.GetFile(fname2, &f2));
EXPECT_NE(f1, f2);
}
TEST(BundleCacheTest, OpenError) {
Env* env = Env::Default();
BundleCache cache(env);
const std::string fname = Prefix("no_such_file");
RandomAccessFile* f;
absl::Status s = cache.GetFile(fname, &f);
EXPECT_TRUE(absl::IsNotFound(s)) << s;
}
TEST(BundleCacheTest, ConcurrentGetFile) {
Env* env = Env::Default();
BundleCache cache(env);
const std::string fname = Prefix("foo");
TF_EXPECT_OK(CreateFile(env, fname));
constexpr int n = 10;
RandomAccessFile* files[n];
{
thread::ThreadPool threads(Env::Default(), "concurrent_reads", n);
for (int i = 0; i < n; i++) {
threads.Schedule(
[&, i] { TF_EXPECT_OK(cache.GetFile(fname, &files[i])); });
}
}
for (int i = 0; i < n; i++) {
EXPECT_EQ(files[i], files[0]);
}
}
class TensorBundleAlignmentTest : public ::testing::Test {
protected:
template <typename T>
void ExpectAlignment(BundleReader* reader, const string& key, int alignment) {
BundleEntryProto full_tensor_entry;
TF_ASSERT_OK(reader->GetBundleEntryProto(key, &full_tensor_entry));
EXPECT_EQ(0, full_tensor_entry.offset() % alignment);
}
};
TEST_F(TensorBundleAlignmentTest, AlignmentTest) {
{
BundleWriter::Options opts;
opts.data_alignment = 42;
BundleWriter writer(Env::Default(), Prefix("foo"), opts);
TF_EXPECT_OK(writer.Add("foo_003", Constant_2x3<float>(3)));
TF_EXPECT_OK(writer.Add("foo_000", Constant_2x3<float>(0)));
TF_EXPECT_OK(writer.Add("foo_002", Constant_2x3<float>(2)));
TF_EXPECT_OK(writer.Add("foo_001", Constant_2x3<float>(1)));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<float>(&reader, "foo_000", Constant_2x3<float>(0));
Expect<float>(&reader, "foo_001", Constant_2x3<float>(1));
Expect<float>(&reader, "foo_002", Constant_2x3<float>(2));
Expect<float>(&reader, "foo_003", Constant_2x3<float>(3));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectNext<float>(&reader, Constant_2x3<float>(0));
ExpectNext<float>(&reader, Constant_2x3<float>(1));
ExpectNext<float>(&reader, Constant_2x3<float>(2));
ExpectNext<float>(&reader, Constant_2x3<float>(3));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectAlignment<float>(&reader, "foo_000", 42);
ExpectAlignment<float>(&reader, "foo_001", 42);
ExpectAlignment<float>(&reader, "foo_002", 42);
ExpectAlignment<float>(&reader, "foo_003", 42);
}
}
static void BM_BundleAlignment(::testing::benchmark::State& state) {
{
const int alignment = state.range(0);
const int tensor_size = state.range(1);
BundleWriter::Options opts;
opts.data_alignment = alignment;
BundleWriter writer(Env::Default(), Prefix("foo"), opts);
TF_CHECK_OK(writer.Add("small", Constant(true, TensorShape({1}))));
TF_CHECK_OK(writer.Add("big", Constant(32.1, TensorShape({tensor_size}))));
TF_CHECK_OK(writer.Finish());
}
BundleReader reader(Env::Default(), Prefix("foo"));
TF_CHECK_OK(reader.status());
for (auto s : state) {
Tensor t;
TF_CHECK_OK(reader.Lookup("big", &t));
}
}
BENCHMARK(BM_BundleAlignment)->ArgPair(1, 512);
BENCHMARK(BM_BundleAlignment)->ArgPair(1, 4096);
BENCHMARK(BM_BundleAlignment)->ArgPair(1, 1048576);
BENCHMARK(BM_BundleAlignment)->ArgPair(4096, 512);
BENCHMARK(BM_BundleAlignment)->ArgPair(4096, 4096);
BENCHMARK(BM_BundleAlignment)->ArgPair(4096, 1048576);
static void BM_BundleWriterSmallTensor(::testing::benchmark::State& state) {
const int64_t bytes = state.range(0);
Tensor t = Constant(static_cast<int8>('a'), TensorShape{bytes});
BundleWriter writer(Env::Default(), Prefix("foo"));
int suffix = 0;
for (auto s : state) {
TF_CHECK_OK(writer.Add(strings::StrCat("small", suffix++), t));
}
}
BENCHMARK(BM_BundleWriterSmallTensor)->Range(1, 1 << 20);
static void BM_BundleWriterLargeTensor(::testing::benchmark::State& state) {
const int mb = state.range(0);
const int64_t bytes = static_cast<int64_t>(mb) * (1 << 20);
Tensor t = Constant(static_cast<int8>('a'), TensorShape{bytes});
for (auto s : state) {
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_CHECK_OK(writer.Add("big", t));
}
}
BENCHMARK(BM_BundleWriterLargeTensor)->Arg(1 << 10);
BENCHMARK(BM_BundleWriterLargeTensor)->Arg(4 << 10);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_bundle/tensor_bundle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_bundle/tensor_bundle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c69dc71-9b09-41e8-9d9b-4d75be731448 | cpp | tensorflow/tensorflow | sparse_tensor | tensorflow/core/util/sparse/sparse_tensor.cc | tensorflow/core/util/sparse/sparse_tensor_test.cc | #include "tensorflow/core/util/sparse/sparse_tensor.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace sparse {
namespace {
int UnsafeGetDimsFromIx(const Tensor& ix) {
DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
return ix.dim_size(1);
}
Status GetDimsFromIx(const Tensor& ix, int* result) {
if (!TensorShapeUtils::IsMatrix(ix.shape())) {
return errors::InvalidArgument("indices must be a matrix, but got: ",
ix.shape().DebugString());
}
*result = UnsafeGetDimsFromIx(ix);
return Status();
}
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
const VarDimArray order,
SparseTensor* result) {
if (ix.dtype() != DT_INT64) {
return errors::InvalidArgument("indices must be type int64 but got: ",
ix.dtype());
}
if (!TensorShapeUtils::IsVector(vals.shape())) {
return errors::InvalidArgument("vals must be a vec, but got: ",
vals.shape().DebugString());
}
if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
return errors::InvalidArgument(
"indices and values rows (indexing "
"dimension) must match. (indices = ",
ix.shape().dim_size(0), ", values = ", vals.shape().dim_size(0), ")");
}
int dims = 0;
TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
if (order.size() != dims) {
return errors::InvalidArgument("Order length must be SparseTensor rank.");
}
if (shape.size() != dims) {
return errors::InvalidArgument("Shape rank must be SparseTensor rank.");
}
result->ix_ = std::move(ix);
result->vals_ = std::move(vals);
result->shape_.assign(shape.begin(), shape.end());
result->order_.assign(order.begin(), order.end());
result->dims_ = dims;
return absl::OkStatus();
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape)), result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), shape, UndefinedOrder(shape),
result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
const VarDimArray order,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
order, result);
}
SparseTensor::SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order)
: ix_(std::move(ix)),
vals_(std::move(vals)),
shape_(shape.begin(), shape.end()),
order_(order.begin(), order.end()),
dims_(UnsafeGetDimsFromIx(ix_)) {
DCHECK_EQ(ix_.dtype(), DT_INT64)
<< "indices must be type int64 but got: " << ix_.dtype();
DCHECK(TensorShapeUtils::IsVector(vals_.shape()))
<< "vals must be a vec, but got: " << vals_.shape().DebugString();
DCHECK_EQ(ix_.shape().dim_size(0), vals_.shape().dim_size(0))
<< "indices and values rows (indexing dimension) must match.";
DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
}
bool SparseTensor::IndicesValidVectorFastPath() const {
DCHECK_EQ(shape_.size(), 1);
DCHECK_EQ(order_[0], 0);
const int64_t max_index = shape_[0];
bool index_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const index_base_ptr = ix_t.data();
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int64_t index = index_base_ptr[n];
index_in_range_valid = index_in_range_valid & (index < max_index);
order_valid = order_valid & (index > prev_index);
prev_index = index;
}
return index_in_range_valid & order_valid;
}
bool SparseTensor::IndicesValidMatrix32BitFastPath() const {
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const shape_ptr = shape_.data();
DCHECK_EQ(shape_.size(), 2);
DCHECK_EQ(order_[0], 0);
DCHECK_EQ(order_[1], 1);
DCHECK_LE(shape_ptr[0], std::numeric_limits<int32>::max());
DCHECK_LE(shape_ptr[1], std::numeric_limits<int32>::max());
const int32_t max_rows = static_cast<int32>(shape_ptr[0]);
const int32_t max_cols = static_cast<int32>(shape_ptr[1]);
bool row_zeros_valid = true;
bool row_in_range_valid = true;
bool col_zeros_valid = true;
bool col_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const int32* const index_base_ptr =
reinterpret_cast<const int32*>(ix_t.data());
const size_t kInt32ElementsPerRow = 4;
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int32* const index_ptr = index_base_ptr + n * kInt32ElementsPerRow;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int32 row_zeros = index_ptr[0];
const int32 row_32 = index_ptr[1];
const int32 col_zeros = index_ptr[2];
const int32 col_32 = index_ptr[3];
#else
const int32_t row_32 = index_ptr[0];
const int32_t row_zeros = index_ptr[1];
const int32_t col_32 = index_ptr[2];
const int32_t col_zeros = index_ptr[3];
#endif
row_zeros_valid = row_zeros_valid & (row_zeros == 0);
col_zeros_valid = col_zeros_valid & (col_zeros == 0);
row_in_range_valid =
row_in_range_valid & (row_32 >= 0) & (row_32 < max_rows);
col_in_range_valid =
col_in_range_valid & (col_32 >= 0) & (col_32 < max_cols);
const int64_t concatenated_index =
(static_cast<int64_t>(row_32) << 32) + col_32;
order_valid = order_valid & (concatenated_index > prev_index);
prev_index = concatenated_index;
}
return row_zeros_valid & row_in_range_valid & col_zeros_valid &
col_in_range_valid & order_valid;
}
template <bool standard_order>
Status SparseTensor::IndicesValidHelper() const {
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const shape_ptr = shape_.data();
for (std::size_t n = 0; n < num_entries(); ++n) {
bool valid = true;
bool different = false;
bool increasing = true;
if (n == 0) {
for (int di = 0; di < dims_; ++di) {
if (ix_t(n, di) < 0 || ix_t(n, di) >= shape_ptr[di]) valid = false;
}
different = true;
} else {
for (int di = 0; di < dims_; ++di) {
if (ix_t(n, di) < 0 || ix_t(n, di) >= shape_ptr[di]) valid = false;
int ordered_dim;
if (standard_order) {
ordered_dim = di;
} else {
ordered_dim = order_[di];
}
int64_t diff = ix_t(n, ordered_dim) - ix_t(n - 1, ordered_dim);
if (diff > 0) different = true;
if (!different && diff < 0) increasing = false;
}
}
if (TF_PREDICT_FALSE(!valid || !increasing || !different)) {
string index = strings::StrCat("indices[", n, "] = [");
for (int di = 0; di < dims_; ++di) {
strings::StrAppend(&index, ix_t(n, di), di < dims_ - 1 ? "," : "]");
}
if (!valid) {
return errors::InvalidArgument(index,
" is out of bounds: need 0 <= index < [",
absl::StrJoin(shape_, ","), "]");
}
if (!increasing) {
return errors::InvalidArgument(
index,
" is out of order. Many sparse ops require sorted indices.\n"
" Use `tf.sparse.reorder` to create a correctly ordered copy."
"\n\n");
}
if (!different) {
return errors::InvalidArgument(index, " is repeated");
}
}
}
return absl::OkStatus();
}
Status SparseTensor::IndicesValid() const {
if (shape_.size() == 1 && IndicesValidVectorFastPath()) {
return absl::OkStatus();
}
bool standard_order = true;
for (size_t i = 0; i < order_.size(); ++i) {
if (order_[i] < 0) {
return errors::FailedPrecondition(
"Order was not provided. Provide an order at "
"construction time or run ReorderInPlace");
}
standard_order = standard_order && order_[i] == i;
}
if (standard_order) {
if (shape_.size() == 1) {
if (IndicesValidVectorFastPath()) {
return absl::OkStatus();
}
} else if (shape_.size() == 2 &&
shape_[0] <= std::numeric_limits<int32>::max() &&
shape_[1] <= std::numeric_limits<int32>::max()) {
if (IndicesValidMatrix32BitFastPath()) {
return absl::OkStatus();
}
}
return IndicesValidHelper<true>();
} else {
return IndicesValidHelper<false>();
}
}
}
} | #include "tensorflow/core/util/sparse/sparse_tensor.h"
#include <string>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace sparse {
namespace {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex>
GetSimpleIndexTensor(int N, const int NDIM) {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex> ix(N, NDIM);
ix(0, 0) = 0;
ix(0, 1) = 0;
ix(0, 2) = 0;
ix(1, 0) = 3;
ix(1, 1) = 0;
ix(1, 2) = 0;
ix(2, 0) = 2;
ix(2, 1) = 0;
ix(2, 2) = 0;
ix(3, 0) = 0;
ix(3, 1) = 1;
ix(3, 2) = 0;
ix(4, 0) = 0;
ix(4, 1) = 0;
ix(4, 2) = 2;
return ix;
}
TEST(SparseTensorTest, DimComparatorSorts) {
int64_t N = 5;
const int NDIM = 3;
auto ix = GetSimpleIndexTensor(N, NDIM);
TTypes<int64_t>::Matrix map(ix.data(), N, NDIM);
std::vector<int64_t> sorting(N);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::vector<int64_t> order{0, 1, 2};
std::vector<int64_t> shape{N, N, N};
DimComparator sorter(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
FixedDimComparator<3> sorter_fixed(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
std::vector<int64_t> order1{2, 0, 1};
DimComparator sorter1(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
FixedDimComparator<3> sorter1_fixed(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesType) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT32, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidValues) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N, 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidN) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N - 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidOrder) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorConstruction) {
int N = 5;
const int NDIM = 3;
auto ix_c = GetSimpleIndexTensor(N, NDIM);
Eigen::Tensor<tstring, 1, Eigen::RowMajor> vals_c(N);
vals_c(0) = "hi0";
vals_c(1) = "hi1";
vals_c(2) = "hi2";
vals_c(3) = "hi3";
vals_c(4) = "hi4";
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
vals_t = vals_c;
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ(
"indices[2] = [2,0,0] is out of order. "
"Many sparse ops require sorted indices.\n"
" Use `tf.sparse.reorder` to create a correctly ordered copy."
"\n\n",
st_indices_valid.message());
st.Reorder<tstring>({2, 0, 1});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi3");
EXPECT_EQ(vals_t(2), "hi2");
EXPECT_EQ(vals_t(3), "hi1");
EXPECT_EQ(vals_t(4), "hi4");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({0, 1, 2});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi4");
EXPECT_EQ(vals_t(2), "hi3");
EXPECT_EQ(vals_t(3), "hi2");
EXPECT_EQ(vals_t(4), "hi1");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, EmptySparseTensorAllowed) {
int N = 0;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
std::vector<int64_t> shape{10, 10, 10};
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), order);
std::vector<int64_t> new_order{1, 0, 2};
st.Reorder<tstring>(new_order);
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), new_order);
}
TEST(SparseTensorTest, SortingWorksCorrectly) {
int N = 30;
const int NDIM = 4;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({1000, 1000, 1000, 1000});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
auto ix_t = ix.matrix<int64_t>();
for (int n = 0; n < 100; ++n) {
ix_t = ix_t.random(Eigen::internal::UniformRandomGenerator<int64_t>(n + 1));
ix_t = ix_t.abs() % 1000;
st.Reorder<tstring>({0, 1, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({1, 0, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 0, 2, 1});
TF_EXPECT_OK(st.IndicesValid());
}
}
TEST(SparseTensorTest, ValidateIndicesFindsInvalid) {
int N = 2;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
Eigen::Tensor<int64_t, 2, Eigen::RowMajor> ix_orig(N, NDIM);
ix_orig(0, 0) = 0;
ix_orig(0, 1) = 0;
ix_orig(0, 2) = 0;
ix_orig(1, 0) = 0;
ix_orig(1, 1) = 0;
ix_orig(1, 2) = 0;
auto ix_t = ix.matrix<int64_t>();
ix_t = ix_orig;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,0] is repeated", st_indices_valid.message());
ix_orig(1, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_orig(0, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,1] is repeated", st_indices_valid.message());
}
TEST(SparseTensorTest, SparseTensorCheckBoundaries) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
ix.matrix<int64_t>() = ix_t;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_t(0, 0) = 11;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[11,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = -1;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[-1,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = 0;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, SparseTensorToDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
}
TEST(SparseTensorTest, SparseTensorToLargerDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
EXPECT_EQ(dense_t(9, 0, 0), "");
EXPECT_EQ(dense_t(9, 0, 9), "");
EXPECT_EQ(dense_t(9, 9, 9), "");
}
TEST(SparseTensorTest, SparseTensorGroup) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_INT32, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<int32>();
ix_t = GetSimpleIndexTensor(N, NDIM);
vals_t(0) = 1;
vals_t(1) = 2;
vals_t(2) = 3;
vals_t(3) = 4;
vals_t(4) = 5;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<int32>(order);
std::vector<std::vector<int64_t> > groups;
std::vector<TTypes<int64_t>::UnalignedConstMatrix> grouped_indices;
std::vector<TTypes<int32>::UnalignedVec> grouped_values;
auto gi = st.group({0});
for (const auto& g : gi) {
groups.push_back(g.group());
VLOG(1) << "Group: " << absl::StrJoin(g.group(), ",");
VLOG(1) << "Indices: " << g.indices();
VLOG(1) << "Values: " << g.values<int32>();
grouped_indices.push_back(g.indices());
grouped_values.push_back(g.values<int32>());
}
EXPECT_EQ(groups.size(), 3);
EXPECT_EQ(groups[0], std::vector<int64_t>({0}));
EXPECT_EQ(groups[1], std::vector<int64_t>({2}));
EXPECT_EQ(groups[2], std::vector<int64_t>({3}));
std::vector<Eigen::Tensor<int64_t, 2, Eigen::RowMajor> > expected_indices;
std::vector<Eigen::Tensor<int32, 1, Eigen::RowMajor> > expected_vals;
expected_indices.emplace_back(3, NDIM);
expected_vals.emplace_back(3);
expected_indices[0].setZero();
expected_indices[0](1, 2) = 2;
expected_indices[0](2, 1) = 1;
expected_vals[0].setConstant(-1);
expected_vals[0](0) = 1;
expected_vals[0](1) = 5;
expected_vals[0](2) = 4;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[1].setZero();
expected_indices[1](0, 0) = 2;
expected_vals[1](0) = 3;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[2].setZero();
expected_indices[2](0, 0) = 3;
expected_vals[2](0) = 2;
for (std::size_t gix = 0; gix < groups.size(); ++gix) {
auto gi_t = grouped_indices[gix];
Eigen::Tensor<bool, 0, Eigen::RowMajor> eval =
(gi_t == expected_indices[gix]).all();
EXPECT_TRUE(eval()) << gix << " indices: " << gi_t << " vs. "
<< expected_indices[gix];
auto gv_t = grouped_values[gix];
eval = (gv_t == expected_vals[gix]).all();
EXPECT_TRUE(eval()) << gix << " values: " << gv_t << " vs. "
<< expected_vals[gix];
}
}
TEST(SparseTensorTest, Concat) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_c = GetSimpleIndexTensor(N, NDIM);
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st, st});
EXPECT_EQ(concatted.order(), st.order());
absl::InlinedVector<int64_t, 8UL> expected_shape{40, 10, 10};
EXPECT_EQ(concatted.shape(), expected_shape);
EXPECT_EQ(concatted.num_entries(), 4 * N);
TF_EXPECT_OK(concatted.IndicesValid());
auto conc_ix_t = concatted.indices().matrix<int64_t>();
auto conc_vals_t = concatted.values().vec<tstring>();
for (int n = 0; n < 4; ++n) {
for (int i = 0; i < N; ++i) {
EXPECT_EQ(conc_ix_t(n * N + i, 0), 10 * n + ix_t(i, 0));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_vals_t(n * N + i), vals_t(i));
}
}
SparseTensor st_ooo;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
&st_ooo));
SparseTensor conc_ooo = SparseTensor::Concat<tstring>({st, st, st, st_ooo});
std::vector<int64_t> expected_ooo{-1, -1, -1};
EXPECT_EQ(conc_ooo.order(), expected_ooo);
EXPECT_EQ(conc_ooo.shape(), expected_shape);
EXPECT_EQ(conc_ooo.num_entries(), 4 * N);
}
TEST(SparseTensorTest, ConcatEmptyN) {
constexpr int N = 0;
constexpr int NDIM = 2;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({10, 10});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 1}, &st));
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st});
EXPECT_EQ(concatted.num_entries(), 0);
}
TEST(SparseTensorTest, Split) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<SparseTensor> st_list;
TF_ASSERT_OK(SparseTensor::Split<int64_t>(st, 0, 2, &st_list));
EXPECT_EQ(st_list.size(), 2);
auto expected_shape = absl::InlinedVector<int64_t, 8UL>{2, 3};
EXPECT_EQ(st_list[0].shape(), expected_shape);
EXPECT_EQ(st_list[0].values().NumElements(), 3);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(0), 1);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(1), 2);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(2), 3);
EXPECT_EQ(st_list[0].indices().NumElements(), 6);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 1), 2);
EXPECT_EQ(st_list[1].shape(), expected_shape);
EXPECT_EQ(st_list[1].values().NumElements(), 1);
EXPECT_EQ(st_list[1].values().vec<int64_t>()(0), 4);
EXPECT_EQ(st_list[1].indices().NumElements(), 2);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 0), 1);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 1), 0);
}
TEST(SparseTensorTest, Slice) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<int64_t> start(2, 0);
std::vector<int64_t> size(2);
size[0] = 2;
size[1] = 3;
TF_ASSERT_OK_AND_ASSIGN(SparseTensor slice,
SparseTensor::Slice<int64_t>(st, start, size));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({2, 3}));
EXPECT_EQ(slice.values().NumElements(), 3);
EXPECT_EQ(slice.values().vec<int64_t>()(0), 1);
EXPECT_EQ(slice.values().vec<int64_t>()(1), 2);
EXPECT_EQ(slice.values().vec<int64_t>()(2), 3);
EXPECT_EQ(slice.indices().NumElements(), 6);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 1), 2);
}
TEST(SparseTensorTest, SliceReducesOutputDimension) {
const int num_rows = 2;
const int num_columns = 2;
Tensor ids(DT_INT64, TensorShape({num_rows, num_columns}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
Tensor vals(DT_INT64, TensorShape({2}));
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals,
TensorShape({num_rows, num_columns}), &st));
TF_ASSERT_OK_AND_ASSIGN(
SparseTensor slice,
SparseTensor::Slice<int64_t>(st, {num_rows + 1, 1}, {1, num_columns}));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({0, 1}));
}
TEST(SparseTensorTest, Dim0SparseTensorToDenseTensor) {
Tensor ix(DT_INT64, TensorShape({1, 0}));
Tensor vals(DT_INT32, TensorShape({1}));
vals.scalar<int32>()() = 5;
TensorShape shape({});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
Tensor dense(DT_INT32, TensorShape({}));
st.ToDense<int32>(&dense);
EXPECT_EQ(dense.scalar<int32>()(), 5);
}
static void BM_SparseReorderFloat(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_FLOAT, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
auto ix_t = ix.matrix<int64_t>();
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<float>(reorder);
}
}
static void BM_SparseReorderString(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
for (int i = 0; i < N32; ++i) {
int len = rnd.Rand32() % 1000;
vals_t(i).resize(len);
}
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<tstring>(reorder);
}
}
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/sparse/sparse_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/sparse/sparse_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cb8cdf4-cddc-43a3-bd8c-aee1ea0653d4 | cpp | tensorflow/tensorflow | op_gen_lib | tensorflow/core/framework/op_gen_lib.cc | tensorflow/core/framework/op_gen_lib_test.cc | #include "tensorflow/core/framework/op_gen_lib.h"
#include <algorithm>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
string WordWrap(StringPiece prefix, StringPiece str, int width) {
const string indent_next_line = "\n" + Spaces(prefix.size());
width -= prefix.size();
string result;
strings::StrAppend(&result, prefix);
while (!str.empty()) {
if (static_cast<int>(str.size()) <= width) {
strings::StrAppend(&result, str);
break;
}
auto space = str.rfind(' ', width);
if (space == StringPiece::npos) {
space = str.find(' ');
if (space == StringPiece::npos) {
strings::StrAppend(&result, str);
break;
}
}
StringPiece to_append = str.substr(0, space);
str.remove_prefix(space + 1);
while (absl::EndsWith(to_append, " ")) {
to_append.remove_suffix(1);
}
while (absl::ConsumePrefix(&str, " ")) {
}
strings::StrAppend(&result, to_append);
if (!str.empty()) strings::StrAppend(&result, indent_next_line);
}
return result;
}
bool ConsumeEquals(StringPiece* description) {
if (absl::ConsumePrefix(description, "=")) {
while (absl::ConsumePrefix(description,
" ")) {
}
return true;
}
return false;
}
static bool SplitAt(char split_ch, StringPiece* orig,
StringPiece* before_split) {
auto pos = orig->find(split_ch);
if (pos == StringPiece::npos) {
*before_split = *orig;
*orig = StringPiece();
return false;
} else {
*before_split = orig->substr(0, pos);
orig->remove_prefix(pos + 1);
return true;
}
}
static bool StartsWithFieldName(StringPiece line,
const std::vector<string>& multi_line_fields) {
StringPiece up_to_colon;
if (!SplitAt(':', &line, &up_to_colon)) return false;
while (absl::ConsumePrefix(&up_to_colon, " "))
;
for (const auto& field : multi_line_fields) {
if (up_to_colon == field) {
return true;
}
}
return false;
}
static bool ConvertLine(StringPiece line,
const std::vector<string>& multi_line_fields,
string* ml) {
if (!StartsWithFieldName(line, multi_line_fields)) {
return false;
}
StringPiece up_to_colon;
StringPiece after_colon = line;
SplitAt(':', &after_colon, &up_to_colon);
while (absl::ConsumePrefix(&after_colon, " "))
;
if (!absl::ConsumePrefix(&after_colon, "\"")) {
return false;
}
auto last_quote = after_colon.rfind('\"');
if (last_quote == StringPiece::npos) {
return false;
}
StringPiece escaped = after_colon.substr(0, last_quote);
StringPiece suffix = after_colon.substr(last_quote + 1);
string unescaped;
if (!absl::CUnescape(escaped, &unescaped, nullptr)) {
return false;
}
string end = "END";
for (int s = 0; unescaped.find(end) != string::npos; ++s) {
end = strings::StrCat("END", s);
}
strings::StrAppend(ml, up_to_colon, ": <<", end, "\n", unescaped, "\n", end);
if (!suffix.empty()) {
strings::StrAppend(ml, suffix);
}
strings::StrAppend(ml, "\n");
return true;
}
string PBTxtToMultiline(StringPiece pbtxt,
const std::vector<string>& multi_line_fields) {
string ml;
ml.reserve(pbtxt.size() * (17. / 16));
StringPiece line;
while (!pbtxt.empty()) {
SplitAt('\n', &pbtxt, &line);
if (!ConvertLine(line, multi_line_fields, &ml)) {
strings::StrAppend(&ml, line, "\n");
}
}
return ml;
}
static bool FindMultiline(StringPiece line, size_t colon, string* end) {
if (colon == StringPiece::npos) return false;
line.remove_prefix(colon + 1);
while (absl::ConsumePrefix(&line, " ")) {
}
if (absl::ConsumePrefix(&line, "<<")) {
*end = string(line);
return true;
}
return false;
}
string PBTxtFromMultiline(StringPiece multiline_pbtxt) {
string pbtxt;
pbtxt.reserve(multiline_pbtxt.size() * (33. / 32));
StringPiece line;
while (!multiline_pbtxt.empty()) {
if (!SplitAt('\n', &multiline_pbtxt, &line)) {
strings::StrAppend(&pbtxt, line);
break;
}
string end;
auto colon = line.find(':');
if (!FindMultiline(line, colon, &end)) {
strings::StrAppend(&pbtxt, line, "\n");
continue;
}
strings::StrAppend(&pbtxt, line.substr(0, colon + 1));
string unescaped;
bool first = true;
while (!multiline_pbtxt.empty()) {
SplitAt('\n', &multiline_pbtxt, &line);
if (absl::ConsumePrefix(&line, end)) break;
if (first) {
first = false;
} else {
unescaped.push_back('\n');
}
strings::StrAppend(&unescaped, line);
line = StringPiece();
}
strings::StrAppend(&pbtxt, " \"", absl::CEscape(unescaped), "\"", line,
"\n");
}
return pbtxt;
}
static void StringReplace(const string& from, const string& to, string* s) {
std::vector<string> split;
string::size_type pos = 0;
while (pos < s->size()) {
auto found = s->find(from, pos);
if (found == string::npos) {
split.push_back(s->substr(pos));
break;
} else {
split.push_back(s->substr(pos, found - pos));
pos = found + from.size();
if (pos == s->size()) {
split.push_back("");
}
}
}
*s = absl::StrJoin(split, to);
}
static void RenameInDocs(const string& from, const string& to,
ApiDef* api_def) {
const string from_quoted = strings::StrCat("`", from, "`");
const string to_quoted = strings::StrCat("`", to, "`");
for (int i = 0; i < api_def->in_arg_size(); ++i) {
if (!api_def->in_arg(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_in_arg(i)->mutable_description());
}
}
for (int i = 0; i < api_def->out_arg_size(); ++i) {
if (!api_def->out_arg(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_out_arg(i)->mutable_description());
}
}
for (int i = 0; i < api_def->attr_size(); ++i) {
if (!api_def->attr(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_attr(i)->mutable_description());
}
}
if (!api_def->summary().empty()) {
StringReplace(from_quoted, to_quoted, api_def->mutable_summary());
}
if (!api_def->description().empty()) {
StringReplace(from_quoted, to_quoted, api_def->mutable_description());
}
}
namespace {
void InitApiDefFromOpDef(const OpDef& op_def, ApiDef* api_def) {
api_def->set_graph_op_name(op_def.name());
api_def->set_visibility(ApiDef::VISIBLE);
auto* endpoint = api_def->add_endpoint();
endpoint->set_name(op_def.name());
for (const auto& op_in_arg : op_def.input_arg()) {
auto* api_in_arg = api_def->add_in_arg();
api_in_arg->set_name(op_in_arg.name());
api_in_arg->set_rename_to(op_in_arg.name());
api_in_arg->set_description(op_in_arg.description());
*api_def->add_arg_order() = op_in_arg.name();
}
for (const auto& op_out_arg : op_def.output_arg()) {
auto* api_out_arg = api_def->add_out_arg();
api_out_arg->set_name(op_out_arg.name());
api_out_arg->set_rename_to(op_out_arg.name());
api_out_arg->set_description(op_out_arg.description());
}
for (const auto& op_attr : op_def.attr()) {
auto* api_attr = api_def->add_attr();
api_attr->set_name(op_attr.name());
api_attr->set_rename_to(op_attr.name());
if (op_attr.has_default_value()) {
*api_attr->mutable_default_value() = op_attr.default_value();
}
api_attr->set_description(op_attr.description());
}
api_def->set_summary(op_def.summary());
api_def->set_description(op_def.description());
}
void MergeArg(ApiDef::Arg* base_arg, const ApiDef::Arg& new_arg) {
if (!new_arg.rename_to().empty()) {
base_arg->set_rename_to(new_arg.rename_to());
}
if (!new_arg.description().empty()) {
base_arg->set_description(new_arg.description());
}
}
void MergeAttr(ApiDef::Attr* base_attr, const ApiDef::Attr& new_attr) {
if (!new_attr.rename_to().empty()) {
base_attr->set_rename_to(new_attr.rename_to());
}
if (new_attr.has_default_value()) {
*base_attr->mutable_default_value() = new_attr.default_value();
}
if (!new_attr.description().empty()) {
base_attr->set_description(new_attr.description());
}
}
Status MergeApiDefs(ApiDef* base_api_def, const ApiDef& new_api_def) {
if (new_api_def.visibility() != ApiDef::DEFAULT_VISIBILITY) {
base_api_def->set_visibility(new_api_def.visibility());
}
if (new_api_def.endpoint_size() > 0) {
base_api_def->clear_endpoint();
std::copy(
new_api_def.endpoint().begin(), new_api_def.endpoint().end(),
protobuf::RepeatedFieldBackInserter(base_api_def->mutable_endpoint()));
}
for (const auto& new_arg : new_api_def.in_arg()) {
bool found_base_arg = false;
for (int i = 0; i < base_api_def->in_arg_size(); ++i) {
auto* base_arg = base_api_def->mutable_in_arg(i);
if (base_arg->name() == new_arg.name()) {
MergeArg(base_arg, new_arg);
found_base_arg = true;
break;
}
}
if (!found_base_arg) {
return errors::FailedPrecondition("Argument ", new_arg.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
for (const auto& new_arg : new_api_def.out_arg()) {
bool found_base_arg = false;
for (int i = 0; i < base_api_def->out_arg_size(); ++i) {
auto* base_arg = base_api_def->mutable_out_arg(i);
if (base_arg->name() == new_arg.name()) {
MergeArg(base_arg, new_arg);
found_base_arg = true;
break;
}
}
if (!found_base_arg) {
return errors::FailedPrecondition("Argument ", new_arg.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
if (new_api_def.arg_order_size() > 0) {
if (new_api_def.arg_order_size() != base_api_def->arg_order_size()) {
return errors::FailedPrecondition(
"Invalid number of arguments ", new_api_def.arg_order_size(), " for ",
base_api_def->graph_op_name(),
". Expected: ", base_api_def->arg_order_size());
}
if (!std::is_permutation(new_api_def.arg_order().begin(),
new_api_def.arg_order().end(),
base_api_def->arg_order().begin())) {
return errors::FailedPrecondition(
"Invalid arg_order: ", absl::StrJoin(new_api_def.arg_order(), ", "),
" for ", base_api_def->graph_op_name(),
". All elements in arg_order override must match base arg_order: ",
absl::StrJoin(base_api_def->arg_order(), ", "));
}
base_api_def->clear_arg_order();
std::copy(
new_api_def.arg_order().begin(), new_api_def.arg_order().end(),
protobuf::RepeatedFieldBackInserter(base_api_def->mutable_arg_order()));
}
for (const auto& new_attr : new_api_def.attr()) {
bool found_base_attr = false;
for (int i = 0; i < base_api_def->attr_size(); ++i) {
auto* base_attr = base_api_def->mutable_attr(i);
if (base_attr->name() == new_attr.name()) {
MergeAttr(base_attr, new_attr);
found_base_attr = true;
break;
}
}
if (!found_base_attr) {
return errors::FailedPrecondition("Attribute ", new_attr.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
if (!new_api_def.summary().empty()) {
base_api_def->set_summary(new_api_def.summary());
}
auto description = new_api_def.description().empty()
? base_api_def->description()
: new_api_def.description();
if (!new_api_def.description_prefix().empty()) {
description =
strings::StrCat(new_api_def.description_prefix(), "\n", description);
}
if (!new_api_def.description_suffix().empty()) {
description =
strings::StrCat(description, "\n", new_api_def.description_suffix());
}
base_api_def->set_description(description);
return absl::OkStatus();
}
}
ApiDefMap::ApiDefMap(const OpList& op_list) {
for (const auto& op : op_list.op()) {
ApiDef api_def;
InitApiDefFromOpDef(op, &api_def);
map_[op.name()] = api_def;
}
}
ApiDefMap::~ApiDefMap() {}
Status ApiDefMap::LoadFileList(Env* env, const std::vector<string>& filenames) {
for (const auto& filename : filenames) {
TF_RETURN_IF_ERROR(LoadFile(env, filename));
}
return absl::OkStatus();
}
Status ApiDefMap::LoadFile(Env* env, const string& filename) {
if (filename.empty()) return absl::OkStatus();
string contents;
TF_RETURN_IF_ERROR(ReadFileToString(env, filename, &contents));
Status status = LoadApiDef(contents);
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status, strings::StrCat("Error parsing ApiDef file ", filename, ": ",
status.message()));
}
return absl::OkStatus();
}
Status ApiDefMap::LoadApiDef(const string& api_def_file_contents) {
const string contents = PBTxtFromMultiline(api_def_file_contents);
ApiDefs api_defs;
TF_RETURN_IF_ERROR(
proto_utils::ParseTextFormatFromString(contents, &api_defs));
for (const auto& api_def : api_defs.op()) {
if (map_.find(api_def.graph_op_name()) != map_.end()) {
TF_RETURN_IF_ERROR(MergeApiDefs(&map_[api_def.graph_op_name()], api_def));
}
}
return absl::OkStatus();
}
void ApiDefMap::UpdateDocs() {
for (auto& name_and_api_def : map_) {
auto& api_def = name_and_api_def.second;
CHECK_GT(api_def.endpoint_size(), 0);
const string canonical_name = api_def.endpoint(0).name();
if (api_def.graph_op_name() != canonical_name) {
RenameInDocs(api_def.graph_op_name(), canonical_name, &api_def);
}
for (const auto& in_arg : api_def.in_arg()) {
if (in_arg.name() != in_arg.rename_to()) {
RenameInDocs(in_arg.name(), in_arg.rename_to(), &api_def);
}
}
for (const auto& out_arg : api_def.out_arg()) {
if (out_arg.name() != out_arg.rename_to()) {
RenameInDocs(out_arg.name(), out_arg.rename_to(), &api_def);
}
}
for (const auto& attr : api_def.attr()) {
if (attr.name() != attr.rename_to()) {
RenameInDocs(attr.name(), attr.rename_to(), &api_def);
}
}
}
}
const tensorflow::ApiDef* ApiDefMap::GetApiDef(const string& name) const {
return gtl::FindOrNull(map_, name);
}
} | #include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
constexpr char kTestOpList[] = R"(op {
name: "testop"
input_arg {
name: "arg_a"
}
input_arg {
name: "arg_b"
}
output_arg {
name: "arg_c"
}
attr {
name: "attr_a"
}
deprecation {
version: 123
explanation: "foo"
}
})";
constexpr char kTestApiDef[] = R"(op {
graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop1"
}
in_arg {
name: "arg_a"
}
in_arg {
name: "arg_b"
}
out_arg {
name: "arg_c"
}
attr {
name: "attr_a"
}
summary: "Mock op for testing."
description: <<END
Description for the
testop.
END
arg_order: "arg_a"
arg_order: "arg_b"
}
)";
TEST(OpGenLibTest, MultilinePBTxt) {
const string pbtxt = R"(foo: "abc"
foo: ""
foo: "\n\n"
foo: "abc\nEND"
foo: "ghi\njkl\n"
bar: "quotes:\""
)";
const string ml_foo = R"(foo: <<END
abc
END
foo: <<END
END
foo: <<END
END
foo: <<END0
abc
END
END0
foo: <<END
ghi
jkl
END
bar: "quotes:\""
)";
const string ml_foo_bar = R"(foo: <<END
abc
END
foo: <<END
END
foo: <<END
END
foo: <<END0
abc
END
END0
foo: <<END
ghi
jkl
END
bar: <<END
quotes:"
END
)";
EXPECT_EQ(ml_foo, PBTxtToMultiline(pbtxt, {"foo"}));
EXPECT_EQ(pbtxt, PBTxtToMultiline(pbtxt, {"baz"}));
EXPECT_EQ(ml_foo_bar, PBTxtToMultiline(pbtxt, {"foo", "bar"}));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(pbtxt));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml_foo));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml_foo_bar));
}
TEST(OpGenLibTest, PBTxtToMultilineErrorCases) {
EXPECT_EQ("f: <<END\n7\nEND\n", PBTxtToMultiline("f: \"7\"\n", {"f"}));
EXPECT_EQ("f \"7\"\n", PBTxtToMultiline("f \"7\"\n", {"f"}));
EXPECT_EQ("f: 7\n", PBTxtToMultiline("f: 7\n", {"f"}));
EXPECT_EQ("f: 7\"\n", PBTxtToMultiline("f: 7\"\n", {"f"}));
EXPECT_EQ("f: \"7\n", PBTxtToMultiline("f: \"7\n", {"f"}));
EXPECT_EQ("f: \"7\\\"\n", PBTxtToMultiline("f: \"7\\\"\n", {"f"}));
}
TEST(OpGenLibTest, PBTxtToMultilineComments) {
const string pbtxt = R"(f: "bar" # Comment 1
f: "\n" # Comment 2
)";
const string ml = R"(f: <<END
bar
END # Comment 1
f: <<END
END # Comment 2
)";
EXPECT_EQ(ml, PBTxtToMultiline(pbtxt, {"f"}));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml));
}
TEST(OpGenLibTest, ApiDefAccessInvalidName) {
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
ASSERT_EQ(nullptr, api_map.GetApiDef("testop5"));
}
TEST(OpGenLibTest, ApiDefInitializedFromOpDef) {
tensorflow::ApiDef expected_api_def;
protobuf::TextFormat::ParseFromString(
R"(graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop"
}
in_arg {
name: "arg_a"
rename_to: "arg_a"
}
in_arg {
name: "arg_b"
rename_to: "arg_b"
}
out_arg {
name: "arg_c"
rename_to: "arg_c"
}
attr {
name: "attr_a"
rename_to: "attr_a"
}
arg_order: "arg_a"
arg_order: "arg_b"
)",
&expected_api_def);
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
const auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(api_def->DebugString(), expected_api_def.DebugString());
}
TEST(OpGenLibTest, ApiDefLoadSingleApiDef) {
tensorflow::ApiDefs expected_api_defs;
protobuf::TextFormat::ParseFromString(R"(op {
graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop1"
}
in_arg {
name: "arg_a"
rename_to: "arg_a"
}
in_arg {
name: "arg_b"
rename_to: "arg_b"
}
out_arg {
name: "arg_c"
rename_to: "arg_c"
}
attr {
name: "attr_a"
rename_to: "attr_a"
}
summary: "Mock op for testing."
description: "Description for the\ntestop."
arg_order: "arg_a"
arg_order: "arg_b"
}
)",
&expected_api_defs);
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
const auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop1", api_def->endpoint(0).name());
ApiDefs api_defs;
*api_defs.add_op() = *api_def;
EXPECT_EQ(api_defs.DebugString(), expected_api_defs.DebugString());
}
TEST(OpGenLibTest, ApiDefOverrideVisibility) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
visibility: HIDDEN
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ(ApiDef::VISIBLE, api_def->visibility());
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
EXPECT_EQ(ApiDef::VISIBLE, api_def->visibility());
TF_CHECK_OK(api_map.LoadApiDef(api_def2));
EXPECT_EQ(ApiDef::HIDDEN, api_def->visibility());
}
TEST(OpGenLibTest, ApiDefOverrideEndpoints) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop1", api_def->endpoint(0).name());
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
ASSERT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop2", api_def->endpoint(0).name());
}
TEST(OpGenLibTest, ApiDefOverrideArgs) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
in_arg {
name: "arg_a"
rename_to: "arg_aa"
}
out_arg {
name: "arg_c"
rename_to: "arg_cc"
}
arg_order: "arg_b"
arg_order: "arg_a"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
const auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(2, api_def->in_arg_size());
EXPECT_EQ("arg_aa", api_def->in_arg(0).rename_to());
EXPECT_EQ("arg_b", api_def->in_arg(1).rename_to());
ASSERT_EQ(1, api_def->out_arg_size());
EXPECT_EQ("arg_cc", api_def->out_arg(0).rename_to());
ASSERT_EQ(2, api_def->arg_order_size());
EXPECT_EQ("arg_b", api_def->arg_order(0));
EXPECT_EQ("arg_a", api_def->arg_order(1));
}
TEST(OpGenLibTest, ApiDefOverrideDescriptions) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
summary: "New summary"
description: <<END
New description
END
description_prefix: "A"
description_suffix: "Z"
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
description_prefix: "B"
description_suffix: "Y"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
const auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ("New summary", api_def->summary());
EXPECT_EQ("A\nNew description\nZ", api_def->description());
EXPECT_EQ("", api_def->description_prefix());
EXPECT_EQ("", api_def->description_suffix());
TF_CHECK_OK(api_map.LoadApiDef(api_def2));
EXPECT_EQ("B\nA\nNew description\nZ\nY", api_def->description());
EXPECT_EQ("", api_def->description_prefix());
EXPECT_EQ("", api_def->description_suffix());
}
TEST(OpGenLibTest, ApiDefInvalidOpInOverride) {
const string api_def1 = R"(
op {
graph_op_name: "different_testop"
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
ASSERT_EQ(nullptr, api_map.GetApiDef("different_testop"));
}
TEST(OpGenLibTest, ApiDefInvalidArgOrder) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
arg_order: "unexpected_arg"
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
}
)";
const string api_def3 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
arg_order: "arg_a"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto status = api_map.LoadApiDef(api_def1);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
status = api_map.LoadApiDef(api_def2);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
status = api_map.LoadApiDef(api_def3);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
}
TEST(OpGenLibTest, ApiDefInvalidSyntax) {
const string api_def = R"pb(
op { bad_op_name: "testop" }
)pb";
OpList op_list;
ApiDefMap api_map(op_list);
auto status = api_map.LoadApiDef(api_def);
ASSERT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
TEST(OpGenLibTest, ApiDefUpdateDocs) {
const string op_list1 = R"(op {
name: "testop"
input_arg {
name: "arg_a"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
output_arg {
name: "arg_c"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
attr {
name: "attr_a"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
)";
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
in_arg {
name: "arg_a"
rename_to: "arg_aa"
}
out_arg {
name: "arg_c"
rename_to: "arg_cc"
description: "New description: `arg_a`, `arg_c`, `attr_a`, `testop`"
}
attr {
name: "attr_a"
rename_to: "attr_aa"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(op_list1, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
api_map.UpdateDocs();
const string expected_description =
"`arg_aa`, `arg_cc`, `attr_aa`, `testop2`";
EXPECT_EQ(expected_description, api_map.GetApiDef("testop")->description());
EXPECT_EQ(expected_description,
api_map.GetApiDef("testop")->in_arg(0).description());
EXPECT_EQ("New description: " + expected_description,
api_map.GetApiDef("testop")->out_arg(0).description());
EXPECT_EQ(expected_description,
api_map.GetApiDef("testop")->attr(0).description());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_gen_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_gen_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb279d63-e77e-4a87-b5de-785907834cb5 | cpp | tensorflow/tensorflow | resource_var | tensorflow/core/framework/resource_var.cc | tensorflow/core/framework/resource_var_test.cc | #include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/graph/graph_def_builder.h"
namespace tensorflow {
Status Var::AsGraphDef(GraphDefBuilder* builder, Node** out) const {
Node* var = ops::SourceOp(
"VarHandleOp",
builder->opts()
.WithAttr("dtype", tensor_.dtype())
.WithAttr("shape", tensor_.shape())
.WithAttr("shared_name", ResourceHandle::ANONYMOUS_NAME));
Node* value = ops::SourceOp("Const", builder->opts()
.WithAttr("dtype", tensor_.dtype())
.WithAttr("value", tensor_));
Node* assign =
ops::BinaryOp("AssignVariableOp", var, value,
builder->opts().WithAttr("dtype", tensor_.dtype()));
*out =
ops::UnaryOp("Identity", var, builder->opts().WithControlInput(assign));
return absl::OkStatus();
}
std::string Var::MakeRefCountingHandleName(int64_t resource_id) const {
std::string handle_name = absl::StrFormat("%s%d", debug_name_, resource_id);
return handle_name;
}
} | #include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
TEST(ResourceVarTest, Uninitialize) {
RefCountPtr<Var> var{new Var(DT_INT32)};
EXPECT_FALSE(var->is_initialized);
EXPECT_TRUE(var->tensor()->data() == nullptr);
*(var->tensor()) = Tensor(DT_INT32, TensorShape({1}));
var->is_initialized = true;
EXPECT_TRUE(var->tensor()->data() != nullptr);
var->Uninitialize();
EXPECT_FALSE(var->is_initialized);
EXPECT_TRUE(var->tensor()->data() == nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_var.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_var_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d8c9145a-2a75-4896-86ec-d23b9a0316cf | cpp | tensorflow/tensorflow | node_properties | tensorflow/core/framework/node_properties.cc | tensorflow/core/framework/node_properties_test.cc | #include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
Status NodeProperties::CreateFromNodeDef(
NodeDef node_def, const OpRegistryInterface* op_registry,
std::shared_ptr<const NodeProperties>* props) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry->LookUpOpDef(node_def.op(), &op_def));
DataTypeVector input_types;
DataTypeVector output_types;
TF_RETURN_IF_ERROR(
InOutTypesForNode(node_def, *op_def, &input_types, &output_types));
props->reset(new NodeProperties(op_def, std::move(node_def),
std::move(input_types),
std::move(output_types)));
return absl::OkStatus();
}
} | #include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef ToOpDef(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
EXPECT_TRUE(builder.Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
class MockOpRegistry : public OpRegistryInterface {
public:
MockOpRegistry()
: op_reg_(ToOpDef(OpDefBuilder("Foo")
.Input("f: float")
.Input("i: int32")
.Output("of: double"))) {}
~MockOpRegistry() override {}
Status LookUp(const string& op_type_name,
const OpRegistrationData** op_reg_data) const override {
if (op_type_name == "Foo") {
*op_reg_data = &op_reg_;
return absl::OkStatus();
} else {
*op_reg_data = nullptr;
return errors::InvalidArgument("Op type named ", op_type_name,
" not found");
}
}
const OpDef* get_op_def_addr() { return &op_reg_.op_def; }
private:
const OpRegistrationData op_reg_;
};
void ValidateNodeProperties(const NodeProperties& props, const OpDef* op_def,
const NodeDef& node_def,
const DataTypeVector& input_types,
const DataTypeVector& output_types) {
EXPECT_EQ(props.op_def, op_def);
EXPECT_EQ(props.node_def.name(), node_def.name());
ASSERT_EQ(props.input_types.size(), input_types.size());
for (int i = 0; i < input_types.size(); ++i) {
EXPECT_EQ(props.input_types[i], input_types[i]);
EXPECT_EQ(props.input_types_slice[i], input_types[i]);
}
ASSERT_EQ(props.output_types.size(), output_types.size());
for (int i = 0; i < output_types.size(); ++i) {
EXPECT_EQ(props.output_types[i], output_types[i]);
EXPECT_EQ(props.output_types_slice[i], output_types[i]);
}
}
}
TEST(NodeProperties, Contructors) {
OpDef op_def;
NodeDef node_def;
node_def.set_name("foo");
DataTypeVector input_types{DT_FLOAT, DT_INT32};
DataTypeVector output_types{DT_DOUBLE};
DataTypeSlice input_types_slice(input_types);
DataTypeSlice output_types_slice(output_types);
NodeProperties props_from_slices(&op_def, node_def, input_types_slice,
output_types_slice);
ValidateNodeProperties(props_from_slices, &op_def, node_def, input_types,
output_types);
NodeProperties props_from_vectors(&op_def, node_def, input_types,
output_types);
ValidateNodeProperties(props_from_vectors, &op_def, node_def, input_types,
output_types);
}
TEST(NodeProperties, CreateFromNodeDef) {
MockOpRegistry op_registry;
NodeDef node_def;
node_def.set_name("bar");
node_def.set_op("Foo");
node_def.add_input("f_in");
node_def.add_input("i_in");
std::shared_ptr<const NodeProperties> props;
EXPECT_TRUE(
NodeProperties::CreateFromNodeDef(node_def, &op_registry, &props).ok());
DataTypeVector input_types{DT_FLOAT, DT_INT32};
DataTypeVector output_types{DT_DOUBLE};
ValidateNodeProperties(*props, op_registry.get_op_def_addr(), node_def,
input_types, output_types);
node_def.set_op("Baz");
std::shared_ptr<const NodeProperties> props_bad;
EXPECT_FALSE(
NodeProperties::CreateFromNodeDef(node_def, &op_registry, &props_bad)
.ok());
EXPECT_EQ(props_bad, nullptr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_properties.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_properties_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
473bd387-adef-43eb-b015-07209f125840 | cpp | tensorflow/tensorflow | op_def_builder | tensorflow/core/framework/op_def_builder.cc | tensorflow/core/framework/op_def_builder_test.cc | #include "tensorflow/core/framework/op_def_builder.h"
#include <limits>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
using ::tensorflow::strings::Scanner;
namespace tensorflow {
namespace {
string AttrError(StringPiece orig, const string& op_name) {
return strings::StrCat(" from Attr(\"", orig, "\") for Op ", op_name);
}
bool ConsumeAttrName(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.StopCapture()
.AnySpace()
.OneLiteral(":")
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeListPrefix(StringPiece* sp) {
return Scanner(*sp)
.OneLiteral("list")
.AnySpace()
.OneLiteral("(")
.AnySpace()
.GetResult(sp);
}
bool ConsumeQuotedString(char quote_ch, StringPiece* sp, StringPiece* out) {
const string quote_str(1, quote_ch);
return Scanner(*sp)
.OneLiteral(quote_str.c_str())
.RestartCapture()
.ScanEscapedUntil(quote_ch)
.StopCapture()
.OneLiteral(quote_str.c_str())
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeAttrType(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.Many(Scanner::LOWERLETTER_DIGIT)
.StopCapture()
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeAttrNumber(StringPiece* sp, int64_t* out) {
Scanner scan(*sp);
StringPiece match;
StringPiece remaining;
scan.AnySpace().RestartCapture();
if (scan.Peek() == '-') {
scan.OneLiteral("-");
}
if (!scan.Many(Scanner::DIGIT)
.StopCapture()
.AnySpace()
.GetResult(&remaining, &match)) {
return false;
}
int64_t value = 0;
if (!strings::safe_strto64(match, &value)) {
return false;
}
*out = value;
*sp = remaining;
return true;
}
#define VERIFY(expr, ...) \
do { \
if (!(expr)) { \
errors->push_back( \
strings::StrCat(__VA_ARGS__, AttrError(orig, op_def->name()))); \
return; \
} \
} while (false)
bool ConsumeCompoundAttrType(StringPiece* sp, StringPiece* out) {
auto capture_data = sp->data();
auto capture_begin = sp->begin();
if (absl::ConsumePrefix(sp, "numbertype") ||
absl::ConsumePrefix(sp, "numerictype") ||
absl::ConsumePrefix(sp, "quantizedtype") ||
absl::ConsumePrefix(sp, "realnumbertype") ||
absl::ConsumePrefix(sp, "realnumberictype")) {
*out = StringPiece(capture_data, sp->begin() - capture_begin);
return true;
}
return false;
}
bool ProcessCompoundType(const StringPiece type_string, AttrValue* allowed) {
if (type_string == "numbertype" || type_string == "numerictype") {
for (DataType dt : NumberTypes()) {
allowed->mutable_list()->add_type(dt);
}
} else if (type_string == "quantizedtype") {
for (DataType dt : QuantizedTypes()) {
allowed->mutable_list()->add_type(dt);
}
} else if (type_string == "realnumbertype" ||
type_string == "realnumerictype") {
for (DataType dt : RealNumberTypes()) {
allowed->mutable_list()->add_type(dt);
}
} else {
return false;
}
return true;
}
void FinalizeAttr(StringPiece spec, bool allow_attr_type_any, OpDef* op_def,
std::vector<string>* errors) {
OpDef::AttrDef* attr = op_def->add_attr();
StringPiece orig(spec);
StringPiece tmp_name;
VERIFY(ConsumeAttrName(&spec, &tmp_name), "Trouble parsing '<name>:'");
attr->set_name(tmp_name.data(), tmp_name.size());
bool is_list = ConsumeListPrefix(&spec);
string type;
StringPiece type_string;
if (absl::ConsumePrefix(&spec, "string")) {
type = "string";
} else if (absl::ConsumePrefix(&spec, "int")) {
type = "int";
} else if (absl::ConsumePrefix(&spec, "float")) {
type = "float";
} else if (absl::ConsumePrefix(&spec, "bool")) {
type = "bool";
} else if (absl::ConsumePrefix(&spec, "type")) {
type = "type";
} else if (absl::ConsumePrefix(&spec, "shape")) {
type = "shape";
} else if (absl::ConsumePrefix(&spec, "tensor")) {
type = "tensor";
} else if (absl::ConsumePrefix(&spec, "func")) {
type = "func";
} else if (absl::ConsumePrefix(&spec, "any") && allow_attr_type_any) {
type = "any";
} else if (ConsumeCompoundAttrType(&spec, &type_string)) {
type = "type";
AttrValue* allowed = attr->mutable_allowed_values();
VERIFY(ProcessCompoundType(type_string, allowed),
"Expected to see a compound type, saw: ", type_string);
} else if (absl::ConsumePrefix(&spec, "{")) {
AttrValue* allowed = attr->mutable_allowed_values();
str_util::RemoveLeadingWhitespace(&spec);
if (absl::StartsWith(spec, "\"") || absl::StartsWith(spec, "'")) {
type = "string";
while (true) {
StringPiece escaped_string;
VERIFY(ConsumeQuotedString('"', &spec, &escaped_string) ||
ConsumeQuotedString('\'', &spec, &escaped_string),
"Trouble parsing allowed string at '", spec, "'");
string unescaped;
string error;
VERIFY(absl::CUnescape(escaped_string, &unescaped, &error),
"Trouble unescaping \"", escaped_string,
"\", got error: ", error);
allowed->mutable_list()->add_s(unescaped);
if (absl::ConsumePrefix(&spec, ",")) {
str_util::RemoveLeadingWhitespace(&spec);
if (absl::ConsumePrefix(&spec, "}"))
break;
} else {
VERIFY(absl::ConsumePrefix(&spec, "}"),
"Expected , or } after strings in list, not: '", spec, "'");
break;
}
}
} else {
type = "type";
while (true) {
VERIFY(ConsumeAttrType(&spec, &type_string),
"Trouble parsing type string at '", spec, "'");
if (ProcessCompoundType(type_string, allowed)) {
} else {
DataType dt;
VERIFY(DataTypeFromString(type_string, &dt),
"Unrecognized type string '", type_string, "'");
allowed->mutable_list()->add_type(dt);
}
if (absl::ConsumePrefix(&spec, ",")) {
str_util::RemoveLeadingWhitespace(&spec);
if (absl::ConsumePrefix(&spec, "}"))
break;
} else {
VERIFY(absl::ConsumePrefix(&spec, "}"),
"Expected , or } after types in list, not: '", spec, "'");
break;
}
}
}
} else {
VERIFY(false, "Trouble parsing type string at '", spec, "'");
}
str_util::RemoveLeadingWhitespace(&spec);
if (is_list) {
VERIFY(absl::ConsumePrefix(&spec, ")"),
"Expected ) to close 'list(', not: '", spec, "'");
str_util::RemoveLeadingWhitespace(&spec);
attr->set_type(strings::StrCat("list(", type, ")"));
} else {
attr->set_type(type);
}
if ((is_list || type == "int") && absl::ConsumePrefix(&spec, ">=")) {
int64_t min_limit = -999;
VERIFY(ConsumeAttrNumber(&spec, &min_limit),
"Could not parse integer lower limit after '>=', found '", spec,
"' instead");
attr->set_has_minimum(true);
attr->set_minimum(min_limit);
}
if (absl::ConsumePrefix(&spec, "=")) {
str_util::RemoveLeadingWhitespace(&spec);
VERIFY(ParseAttrValue(attr->type(), spec, attr->mutable_default_value()),
"Could not parse default value '", spec, "'");
} else {
VERIFY(spec.empty(), "Extra '", spec, "' unparsed at the end");
}
}
#undef VERIFY
string InOutError(bool is_output, StringPiece orig, const string& op_name) {
return strings::StrCat(" from ", is_output ? "Output" : "Input", "(\"", orig,
"\") for Op ", op_name);
}
bool ConsumeInOutName(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.One(Scanner::LOWERLETTER)
.Any(Scanner::LOWERLETTER_DIGIT_UNDERSCORE)
.StopCapture()
.AnySpace()
.OneLiteral(":")
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeInOutRefOpen(StringPiece* sp) {
return Scanner(*sp)
.OneLiteral("Ref")
.AnySpace()
.OneLiteral("(")
.AnySpace()
.GetResult(sp);
}
bool ConsumeInOutRefClose(StringPiece* sp) {
return Scanner(*sp).OneLiteral(")").AnySpace().GetResult(sp);
}
bool ConsumeInOutNameOrType(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.StopCapture()
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeInOutTimesType(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.OneLiteral("*")
.AnySpace()
.RestartCapture()
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.StopCapture()
.AnySpace()
.GetResult(sp, out);
}
bool ConsumeControlOutName(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.StopCapture()
.GetResult(sp, out);
}
#define VERIFY(expr, ...) \
do { \
if (!(expr)) { \
errors->push_back(strings::StrCat( \
__VA_ARGS__, InOutError(is_output, orig, op_def->name()))); \
return; \
} \
} while (false)
void FinalizeInputOrOutput(StringPiece spec, bool is_output, OpDef* op_def,
std::vector<string>* errors) {
OpDef::ArgDef* arg =
is_output ? op_def->add_output_arg() : op_def->add_input_arg();
StringPiece orig(spec);
StringPiece tmp_name;
VERIFY(ConsumeInOutName(&spec, &tmp_name), "Trouble parsing 'name:'");
arg->set_name(tmp_name.data(), tmp_name.size());
if (ConsumeInOutRefOpen(&spec)) {
arg->set_is_ref(true);
}
{
StringPiece first, second, type_or_attr;
VERIFY(ConsumeInOutNameOrType(&spec, &first),
"Trouble parsing either a type or an attr name at '", spec, "'");
if (ConsumeInOutTimesType(&spec, &second)) {
arg->set_number_attr(first.data(), first.size());
type_or_attr = second;
} else {
type_or_attr = first;
}
DataType dt;
if (DataTypeFromString(type_or_attr, &dt)) {
arg->set_type(dt);
} else {
const OpDef::AttrDef* attr = FindAttr(type_or_attr, *op_def);
VERIFY(attr != nullptr, "Reference to unknown attr '", type_or_attr, "'");
if (attr->type() == "type") {
arg->set_type_attr(type_or_attr.data(), type_or_attr.size());
} else {
VERIFY(attr->type() == "list(type)", "Reference to attr '",
type_or_attr, "' with type ", attr->type(),
" that isn't type or list(type)");
arg->set_type_list_attr(type_or_attr.data(), type_or_attr.size());
}
}
}
if (arg->is_ref()) {
VERIFY(ConsumeInOutRefClose(&spec),
"Did not find closing ')' for 'Ref(', instead found: '", spec, "'");
}
VERIFY(spec.empty(), "Extra '", spec, "' unparsed at the end");
if (!arg->number_attr().empty()) {
OpDef::AttrDef* attr = FindAttrMutable(arg->number_attr(), op_def);
if (attr != nullptr && !attr->has_minimum()) {
attr->set_has_minimum(true);
attr->set_minimum(1);
}
} else if (!arg->type_list_attr().empty()) {
OpDef::AttrDef* attr = FindAttrMutable(arg->type_list_attr(), op_def);
if (attr != nullptr && attr->type() == "list(type)" &&
!attr->has_minimum()) {
attr->set_has_minimum(true);
attr->set_minimum(1);
}
}
if (arg->type() == DT_RESOURCE) {
op_def->set_is_stateful(true);
}
}
#undef VERIFY
string ControlOutError(StringPiece orig, const string& op_name) {
return strings::StrCat(" from ControlOutput(\"", orig, "\") for Op ",
op_name);
}
void FinalizeControlOutput(StringPiece name, OpDef* op_def,
std::vector<string>* errors) {
StringPiece orig(name);
StringPiece tmp_name;
if (!ConsumeControlOutName(&orig, &tmp_name)) {
errors->push_back(strings::StrCat("Trouble parsing 'name:'",
ControlOutError(orig, op_def->name())));
}
*op_def->add_control_output() = string(tmp_name.data(), tmp_name.size());
}
int num_leading_spaces(StringPiece s) {
size_t i = 0;
while (i < s.size() && s[i] == ' ') {
++i;
}
return i;
}
bool ConsumeDocNameColon(StringPiece* sp, StringPiece* out) {
return Scanner(*sp)
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.StopCapture()
.AnySpace()
.OneLiteral(":")
.AnySpace()
.GetResult(sp, out);
}
bool IsDocNameColon(StringPiece s) {
return ConsumeDocNameColon(&s, nullptr );
}
void FinalizeDoc(const string& text, OpDef* op_def,
std::vector<string>* errors) {
std::vector<string> lines = str_util::Split(text, '\n');
for (string& line : lines) {
absl::StripTrailingAsciiWhitespace(&line);
}
int l = 0;
while (static_cast<size_t>(l) < lines.size() && lines[l].empty()) ++l;
if (static_cast<size_t>(l) < lines.size()) {
op_def->set_summary(lines[l]);
++l;
}
while (static_cast<size_t>(l) < lines.size() && lines[l].empty()) ++l;
int start_l = l;
while (static_cast<size_t>(l) < lines.size() && !IsDocNameColon(lines[l])) {
++l;
}
int end_l = l;
while (start_l < end_l && lines[end_l - 1].empty()) --end_l;
string desc = absl::StrJoin(
absl::Span<const string>(lines.data() + start_l, end_l - start_l), "\n");
if (!desc.empty()) op_def->set_description(desc);
StringPiece name;
std::vector<StringPiece> description;
while (static_cast<size_t>(l) < lines.size()) {
description.clear();
description.push_back(lines[l]);
ConsumeDocNameColon(&description.back(), &name);
++l;
while (static_cast<size_t>(l) < lines.size() && !IsDocNameColon(lines[l])) {
description.push_back(lines[l]);
++l;
}
while (!description.empty() && description.back().empty()) {
description.pop_back();
}
int min_indent = -1;
for (size_t i = 1; i < description.size(); ++i) {
if (!description[i].empty()) {
int indent = num_leading_spaces(description[i]);
if (min_indent < 0 || indent < min_indent) min_indent = indent;
}
}
for (size_t i = 1; i < description.size(); ++i) {
if (!description[i].empty()) description[i].remove_prefix(min_indent);
}
const string complete(absl::StrJoin(description, "\n"));
bool found = false;
for (int i = 0; !found && i < op_def->input_arg_size(); ++i) {
if (op_def->input_arg(i).name() == name) {
op_def->mutable_input_arg(i)->set_description(complete);
found = true;
}
}
for (int i = 0; !found && i < op_def->output_arg_size(); ++i) {
if (op_def->output_arg(i).name() == name) {
op_def->mutable_output_arg(i)->set_description(complete);
found = true;
}
}
for (int i = 0; !found && i < op_def->attr_size(); ++i) {
if (op_def->attr(i).name() == name) {
op_def->mutable_attr(i)->set_description(complete);
found = true;
}
}
if (!found) {
errors->push_back(
strings::StrCat("No matching input/output/attr for name '", name,
"' from Doc() for Op ", op_def->name()));
return;
}
}
}
}
OpDefBuilder::OpDefBuilder(string op_name) {
op_def()->set_name(std::move(op_name));
}
OpDefBuilder& OpDefBuilder::Attr(string spec) {
attrs_.push_back(std::move(spec));
return *this;
}
OpDefBuilder& OpDefBuilder::Input(string spec) {
inputs_.push_back(std::move(spec));
return *this;
}
OpDefBuilder& OpDefBuilder::Output(string spec) {
outputs_.push_back(std::move(spec));
return *this;
}
OpDefBuilder& OpDefBuilder::ControlOutput(string name) {
control_outputs_.push_back(std::move(name));
return *this;
}
OpDefBuilder& OpDefBuilder::Doc(string text) {
#ifndef TF_LEAN_BINARY
if (!doc_.empty()) {
errors_.push_back(
strings::StrCat("Extra call to Doc() for Op ", op_def()->name()));
} else {
doc_ = std::move(text);
}
#endif
return *this;
}
OpDefBuilder& OpDefBuilder::SetIsCommutative() {
op_def()->set_is_commutative(true);
return *this;
}
OpDefBuilder& OpDefBuilder::SetIsAggregate() {
op_def()->set_is_aggregate(true);
return *this;
}
OpDefBuilder& OpDefBuilder::SetIsStateful() {
op_def()->set_is_stateful(true);
return *this;
}
OpDefBuilder& OpDefBuilder::SetAllowsUninitializedInput() {
op_def()->set_allows_uninitialized_input(true);
return *this;
}
OpDefBuilder& OpDefBuilder::SetIsDistributedCommunication() {
op_def()->set_is_distributed_communication(true);
return *this;
}
OpDefBuilder& OpDefBuilder::Deprecated(int version, string explanation) {
if (op_def()->has_deprecation()) {
errors_.push_back(
strings::StrCat("Deprecated called twice for Op ", op_def()->name()));
} else {
OpDeprecation* deprecation = op_def()->mutable_deprecation();
deprecation->set_version(version);
deprecation->set_explanation(std::move(explanation));
}
return *this;
}
OpDefBuilder& OpDefBuilder::SetTypeConstructor(OpTypeConstructor c) {
op_reg_data_.type_ctor = c;
return *this;
}
OpDefBuilder& OpDefBuilder::SetForwardTypeFn(TypeInferenceFn f) {
op_reg_data_.fwd_type_fn = f;
return *this;
}
OpDefBuilder& OpDefBuilder::SetReverseTypeFn(int input_number,
TypeInferenceFn f) {
op_reg_data_.rev_type_fn = f;
op_reg_data_.rev_type_input = input_number;
return *this;
}
OpDefBuilder& OpDefBuilder::SetShapeFn(OpShapeInferenceFn fn) {
if (op_reg_data_.shape_inference_fn != nullptr) {
errors_.push_back(
strings::StrCat("SetShapeFn called twice for Op ", op_def()->name()));
} else {
op_reg_data_.shape_inference_fn = OpShapeInferenceFn(fn);
}
return *this;
}
OpDefBuilder& OpDefBuilder::AllowAttrTypeAny() {
allow_attr_type_any_ = true;
return *this;
}
Status OpDefBuilder::Finalize(OpRegistrationData* op_reg_data) const {
std::vector<string> errors = errors_;
*op_reg_data = op_reg_data_;
OpDef* op_def = &op_reg_data->op_def;
for (StringPiece attr : attrs_) {
FinalizeAttr(attr, allow_attr_type_any_, op_def, &errors);
}
for (StringPiece input : inputs_) {
FinalizeInputOrOutput(input, false, op_def, &errors);
}
for (StringPiece output : outputs_) {
FinalizeInputOrOutput(output, true, op_def, &errors);
}
for (StringPiece control_output : control_outputs_) {
FinalizeControlOutput(control_output, op_def, &errors);
}
FinalizeDoc(doc_, op_def, &errors);
if (op_reg_data->type_ctor != nullptr) {
TF_RETURN_IF_ERROR(op_reg_data->type_ctor(op_def));
}
if (errors.empty()) return absl::OkStatus();
return errors::InvalidArgument(absl::StrJoin(errors, "\n"));
}
} | #include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void CanonicalizeAttrTypeListOrder(OpDef* def) {
for (int i = 0; i < def->attr_size(); i++) {
AttrValue* a = def->mutable_attr(i)->mutable_allowed_values();
std::sort(a->mutable_list()->mutable_type()->begin(),
a->mutable_list()->mutable_type()->end());
}
}
class OpDefBuilderTest : public ::testing::Test {
protected:
OpDefBuilder b() { return OpDefBuilder("Test"); }
void ExpectSuccess(const OpDefBuilder& builder, StringPiece proto,
OpShapeInferenceFn* shape_fn_out = nullptr) {
OpRegistrationData op_reg_data;
Status status = builder.Finalize(&op_reg_data);
TF_EXPECT_OK(status);
OpDef& op_def = op_reg_data.op_def;
if (status.ok()) {
OpDef expected;
protobuf::TextFormat::ParseFromString(
strings::StrCat("name: 'Test' ", proto), &expected);
CanonicalizeAttrTypeListOrder(&op_def);
CanonicalizeAttrTypeListOrder(&expected);
EXPECT_EQ(op_def.ShortDebugString(), expected.ShortDebugString());
if (shape_fn_out) {
*shape_fn_out = op_reg_data.shape_inference_fn;
}
}
}
void ExpectOrdered(const OpDefBuilder& builder, StringPiece proto) {
OpRegistrationData op_reg_data;
Status status = builder.Finalize(&op_reg_data);
TF_EXPECT_OK(status);
OpDef& op_def = op_reg_data.op_def;
if (status.ok()) {
OpDef expected;
protobuf::TextFormat::ParseFromString(
strings::StrCat("name: 'Test' ", proto), &expected);
EXPECT_EQ(op_def.ShortDebugString(), expected.ShortDebugString());
}
}
void ExpectFailure(const OpDefBuilder& builder, const string& error) {
OpRegistrationData op_reg_data;
Status status = builder.Finalize(&op_reg_data);
EXPECT_FALSE(status.ok());
if (!status.ok()) {
EXPECT_EQ(status.message(), error);
}
}
};
TEST_F(OpDefBuilderTest, Attr) {
ExpectSuccess(b().Attr("a:string"), "attr: { name: 'a' type: 'string' }");
ExpectSuccess(b().Attr("A: int"), "attr: { name: 'A' type: 'int' }");
ExpectSuccess(b().Attr("a1 :float"), "attr: { name: 'a1' type: 'float' }");
ExpectSuccess(b().Attr("a_a : bool"), "attr: { name: 'a_a' type: 'bool' }");
ExpectSuccess(b().Attr("aB : type"), "attr: { name: 'aB' type: 'type' }");
ExpectSuccess(b().Attr("aB_3\t: shape"),
"attr: { name: 'aB_3' type: 'shape' }");
ExpectSuccess(b().Attr("t: tensor"), "attr: { name: 't' type: 'tensor' }");
ExpectSuccess(b().Attr("XYZ\t:\tlist(type)"),
"attr: { name: 'XYZ' type: 'list(type)' }");
ExpectSuccess(b().Attr("f: func"), "attr { name: 'f' type: 'func'}");
}
TEST_F(OpDefBuilderTest, AttrFailure) {
ExpectFailure(
b().Attr("_:string"),
"Trouble parsing '<name>:' from Attr(\"_:string\") for Op Test");
ExpectFailure(
b().Attr("9:string"),
"Trouble parsing '<name>:' from Attr(\"9:string\") for Op Test");
ExpectFailure(b().Attr(":string"),
"Trouble parsing '<name>:' from Attr(\":string\") for Op Test");
ExpectFailure(b().Attr("string"),
"Trouble parsing '<name>:' from Attr(\"string\") for Op Test");
ExpectFailure(b().Attr("a:invalid"),
"Trouble parsing type string at 'invalid' from "
"Attr(\"a:invalid\") for Op Test");
ExpectFailure(
b().Attr("b:"),
"Trouble parsing type string at '' from Attr(\"b:\") for Op Test");
}
TEST_F(OpDefBuilderTest, AttrWithRestrictions) {
ExpectSuccess(
b().Attr("a:numbertype"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, "
"DT_QINT32, DT_UINT32, DT_UINT64, DT_BFLOAT16, DT_QINT16, DT_QUINT16] } "
"} }");
ExpectSuccess(
b().Attr("a:{numbertype, variant}"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
"DT_UINT16, DT_INT8, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, "
"DT_QINT32, DT_UINT32, DT_UINT64, DT_BFLOAT16, DT_VARIANT, DT_QINT16, "
"DT_QUINT16] } } }");
ExpectSuccess(b().Attr("a:realnumbertype"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, "
"DT_INT16, DT_UINT16, DT_INT8, DT_UINT32, DT_UINT64, "
"DT_BFLOAT16] } } }");
ExpectSuccess(b().Attr("a:{realnumbertype, variant , string, }"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_HALF, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, "
"DT_INT16, DT_UINT16, DT_INT8, DT_UINT32, DT_UINT64, "
"DT_BFLOAT16, DT_VARIANT, DT_STRING] } } }");
ExpectSuccess(b().Attr("a:quantizedtype"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_QINT8, DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16]} } }");
ExpectSuccess(b().Attr("a:{quantizedtype ,string}"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_QINT8, DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, "
"DT_STRING]} } }");
ExpectSuccess(b().Attr("a:{string,int32}"),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_STRING, DT_INT32] } } }");
ExpectSuccess(b().Attr("a: { float , complex64 } "),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_FLOAT, DT_COMPLEX64] } } }");
ExpectSuccess(b().Attr("a: {float, complex64,} "),
"attr: { name: 'a' type: 'type' allowed_values { list { type: "
"[DT_FLOAT, DT_COMPLEX64] } }");
ExpectSuccess(b().Attr(R"(a: { "X", "yz" })"),
"attr: { name: 'a' type: 'string' allowed_values { list { s: "
"['X', 'yz'] } } }");
ExpectSuccess(b().Attr(R"(a: { "X", "yz", })"),
"attr: { name: 'a' type: 'string' allowed_values { list { s: "
"['X', 'yz'] } } }");
ExpectSuccess(
b().Attr("i: int >= -5"),
"attr: { name: 'i' type: 'int' has_minimum: true minimum: -5 }");
ExpectSuccess(b().Attr("i: int >= 9223372036854775807"),
("attr: { name: 'i' type: 'int' has_minimum: true "
"minimum: 9223372036854775807 }"));
ExpectSuccess(b().Attr("i: int >= -9223372036854775808"),
("attr: { name: 'i' type: 'int' has_minimum: true "
"minimum: -9223372036854775808 }"));
}
TEST_F(OpDefBuilderTest, AttrRestrictionFailure) {
ExpectFailure(
b().Attr("a:{}"),
"Trouble parsing type string at '}' from Attr(\"a:{}\") for Op Test");
ExpectFailure(
b().Attr("a:{,}"),
"Trouble parsing type string at ',}' from Attr(\"a:{,}\") for Op Test");
ExpectFailure(b().Attr("a:{invalid}"),
"Unrecognized type string 'invalid' from Attr(\"a:{invalid}\") "
"for Op Test");
ExpectFailure(b().Attr("a:{\"str\", float}"),
"Trouble parsing allowed string at 'float}' from "
"Attr(\"a:{\"str\", float}\") for Op Test");
ExpectFailure(b().Attr("a:{ float, \"str\" }"),
"Trouble parsing type string at '\"str\" }' from Attr(\"a:{ "
"float, \"str\" }\") for Op Test");
ExpectFailure(b().Attr("a:{float,,string}"),
"Trouble parsing type string at ',string}' from "
"Attr(\"a:{float,,string}\") for Op Test");
ExpectFailure(b().Attr("a:{float,,}"),
"Trouble parsing type string at ',}' from "
"Attr(\"a:{float,,}\") for Op Test");
ExpectFailure(b().Attr("i: int >= a"),
"Could not parse integer lower limit after '>=', "
"found ' a' instead from Attr(\"i: int >= a\") for Op Test");
ExpectFailure(b().Attr("i: int >= -a"),
"Could not parse integer lower limit after '>=', found ' -a' "
"instead from Attr(\"i: int >= -a\") for Op Test");
ExpectFailure(b().Attr("i: int >= 9223372036854775808"),
"Could not parse integer lower limit after '>=', found "
"' 9223372036854775808' instead from "
"Attr(\"i: int >= 9223372036854775808\") for Op Test");
ExpectFailure(b().Attr("i: int >= -9223372036854775809"),
"Could not parse integer lower limit after '>=', found "
"' -9223372036854775809' instead from "
"Attr(\"i: int >= -9223372036854775809\") for Op Test");
}
TEST_F(OpDefBuilderTest, AttrListOfRestricted) {
ExpectSuccess(
b().Attr("a:list(realnumbertype)"),
"attr: { name: 'a' type: 'list(type)' allowed_values { list { type: "
"[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
"DT_UINT16, DT_INT8, DT_HALF, DT_BFLOAT16, DT_UINT32, DT_UINT64"
"] } } }");
ExpectSuccess(
b().Attr("a:list({realnumbertype, variant})"),
"attr: { name: 'a' type: 'list(type)' allowed_values { list { type: "
"[DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8, DT_INT16, "
"DT_UINT16, DT_INT8, DT_HALF, DT_BFLOAT16, DT_UINT32, DT_UINT64, "
"DT_VARIANT] } } }");
ExpectSuccess(
b().Attr("a:list(quantizedtype)"),
"attr: { name: 'a' type: 'list(type)' allowed_values { list { type: "
"[DT_QINT8, DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16] } } }");
ExpectSuccess(
b().Attr("a: list({float, string, bool})"),
"attr: { name: 'a' type: 'list(type)' allowed_values { list { type: "
"[DT_FLOAT, DT_STRING, DT_BOOL] } } }");
ExpectSuccess(
b().Attr(R"(a: list({ "one fish", "two fish" }))"),
"attr: { name: 'a' type: 'list(string)' allowed_values { list { s: "
"['one fish', 'two fish'] } } }");
ExpectSuccess(
b().Attr(R"(a: list({ 'red fish', 'blue fish' }))"),
"attr: { name: 'a' type: 'list(string)' allowed_values { list { s: "
"['red fish', 'blue fish'] } } }");
ExpectSuccess(
b().Attr(R"(a: list({ "single' ", 'double"' }))"),
"attr: { name: 'a' type: 'list(string)' allowed_values { list { s: "
"[\"single' \", 'double\"'] } } }");
ExpectSuccess(
b().Attr(R"(a: list({ 'escape\'\n', "from\\\"NY" }))"),
"attr: { name: 'a' type: 'list(string)' allowed_values { list { s: "
"[\"escape'\\n\", 'from\\\\\"NY'] } } }");
}
TEST_F(OpDefBuilderTest, AttrListWithMinLength) {
ExpectSuccess(
b().Attr("i: list(bool) >= 4"),
"attr: { name: 'i' type: 'list(bool)' has_minimum: true minimum: 4 }");
}
TEST_F(OpDefBuilderTest, AttrWithDefaults) {
ExpectSuccess(b().Attr(R"(a:string="foo")"),
"attr: { name: 'a' type: 'string' default_value { s:'foo' } }");
ExpectSuccess(b().Attr(R"(a:string='foo')"),
"attr: { name: 'a' type: 'string' default_value { s:'foo' } }");
ExpectSuccess(b().Attr("a:float = 1.25"),
"attr: { name: 'a' type: 'float' default_value { f: 1.25 } }");
ExpectSuccess(b().Attr("a:tensor = { dtype: DT_INT32 int_val: 5 }"),
"attr: { name: 'a' type: 'tensor' default_value { tensor {"
" dtype: DT_INT32 int_val: 5 } } }");
ExpectSuccess(b().Attr("a:shape = { dim { size: 3 } dim { size: 4 } }"),
"attr: { name: 'a' type: 'shape' default_value { shape {"
" dim { size: 3 } dim { size: 4 } } } }");
ExpectSuccess(b().Attr("a:shape = { dim { size: -1 } dim { size: 4 } }"),
"attr: { name: 'a' type: 'shape' default_value { shape {"
" dim { size: -1 } dim { size: 4 } } } }");
}
TEST_F(OpDefBuilderTest, AttrFailedDefaults) {
ExpectFailure(b().Attr(R"(a:int="foo")"),
"Could not parse default value '\"foo\"' from "
"Attr(\"a:int=\"foo\"\") for Op Test");
ExpectFailure(b().Attr("a:float = [1.25]"),
"Could not parse default value '[1.25]' from Attr(\"a:float = "
"[1.25]\") for Op Test");
}
TEST_F(OpDefBuilderTest, AttrListWithDefaults) {
ExpectSuccess(b().Attr(R"(a:list(string)=["foo", "bar"])"),
"attr: { name: 'a' type: 'list(string)' "
"default_value { list { s: ['foo', 'bar'] } } }");
ExpectSuccess(b().Attr("a:list(bool)=[true, false, true]"),
"attr: { name: 'a' type: 'list(bool)' "
"default_value { list { b: [true, false, true] } } }");
ExpectSuccess(b().Attr(R"(a:list(int)=[0, -1, 2, -4, 8])"),
"attr: { name: 'a' type: 'list(int)' "
"default_value { list { i: [0, -1, 2, -4, 8] } } }");
ExpectSuccess(b().Attr(R"(a:list(int)=[ ])"),
"attr: { name: 'a' type: 'list(int)' "
"default_value { list { i: [] } } }");
}
TEST_F(OpDefBuilderTest, AttrFailedListDefaults) {
ExpectFailure(b().Attr(R"(a:list(int)=["foo"])"),
"Could not parse default value '[\"foo\"]' from "
"Attr(\"a:list(int)=[\"foo\"]\") for Op Test");
ExpectFailure(b().Attr(R"(a:list(int)=[7, "foo"])"),
"Could not parse default value '[7, \"foo\"]' from "
"Attr(\"a:list(int)=[7, \"foo\"]\") for Op Test");
ExpectFailure(b().Attr("a:list(float) = [[1.25]]"),
"Could not parse default value '[[1.25]]' from "
"Attr(\"a:list(float) = [[1.25]]\") for Op Test");
ExpectFailure(b().Attr("a:list(float) = 1.25"),
"Could not parse default value '1.25' from "
"Attr(\"a:list(float) = 1.25\") for Op Test");
ExpectFailure(b().Attr(R"(a:list(string)='foo')"),
"Could not parse default value ''foo'' from "
"Attr(\"a:list(string)='foo'\") for Op Test");
ExpectFailure(b().Attr("a:list(float) = ["),
"Could not parse default value '[' from "
"Attr(\"a:list(float) = [\") for Op Test");
ExpectFailure(b().Attr("a:list(float) = "),
"Could not parse default value '' from "
"Attr(\"a:list(float) = \") for Op Test");
}
TEST_F(OpDefBuilderTest, InputOutput) {
ExpectSuccess(b().Input("a: int32"),
"input_arg: { name: 'a' type: DT_INT32 }");
ExpectSuccess(b().Output("b: string"),
"output_arg: { name: 'b' type: DT_STRING }");
ExpectSuccess(b().Input("c: float "),
"input_arg: { name: 'c' type: DT_FLOAT }");
ExpectSuccess(b().Output("d: Ref ( bool ) "),
"output_arg: { name: 'd' type: DT_BOOL is_ref: true }");
ExpectOrdered(b().Input("a: bool")
.Output("c: complex64")
.Input("b: int64")
.Output("d: string"),
"input_arg: { name: 'a' type: DT_BOOL } "
"input_arg: { name: 'b' type: DT_INT64 } "
"output_arg: { name: 'c' type: DT_COMPLEX64 } "
"output_arg: { name: 'd' type: DT_STRING }");
}
TEST_F(OpDefBuilderTest, PolymorphicInputOutput) {
ExpectSuccess(b().Input("a: foo").Attr("foo: type"),
"input_arg: { name: 'a' type_attr: 'foo' } "
"attr: { name: 'foo' type: 'type' }");
ExpectSuccess(b().Output("a: foo").Attr("foo: { bool, int32 }"),
"output_arg: { name: 'a' type_attr: 'foo' } "
"attr: { name: 'foo' type: 'type' "
"allowed_values: { list { type: [DT_BOOL, DT_INT32] } } }");
}
TEST_F(OpDefBuilderTest, InputOutputListSameType) {
ExpectSuccess(b().Input("a: n * int32").Attr("n: int"),
"input_arg: { name: 'a' number_attr: 'n' type: DT_INT32 } "
"attr: { name: 'n' type: 'int' has_minimum: true minimum: 1 }");
ExpectSuccess(b().Output("b: n * foo").Attr("n: int").Attr("foo: type"),
"output_arg: { name: 'b' number_attr: 'n' type_attr: 'foo' } "
"attr: { name: 'n' type: 'int' has_minimum: true minimum: 1 } "
"attr: { name: 'foo' type: 'type' }");
}
TEST_F(OpDefBuilderTest, InputOutputListAnyType) {
ExpectSuccess(
b().Input("c: foo").Attr("foo: list(type)"),
"input_arg: { name: 'c' type_list_attr: 'foo' } "
"attr: { name: 'foo' type: 'list(type)' has_minimum: true minimum: 1 }");
ExpectSuccess(
b().Output("c: foo").Attr("foo: list({string, float})"),
"output_arg: { name: 'c' type_list_attr: 'foo' } "
"attr: { name: 'foo' type: 'list(type)' has_minimum: true minimum: 1 "
"allowed_values: { list { type: [DT_STRING, DT_FLOAT] } } }");
}
TEST_F(OpDefBuilderTest, InputOutputFailure) {
ExpectFailure(b().Input("9: int32"),
"Trouble parsing 'name:' from Input(\"9: int32\") for Op Test");
ExpectFailure(
b().Output("_: int32"),
"Trouble parsing 'name:' from Output(\"_: int32\") for Op Test");
ExpectFailure(b().Input(": int32"),
"Trouble parsing 'name:' from Input(\": int32\") for Op Test");
ExpectFailure(b().Output("int32"),
"Trouble parsing 'name:' from Output(\"int32\") for Op Test");
ExpectFailure(
b().Input("CAPS: int32"),
"Trouble parsing 'name:' from Input(\"CAPS: int32\") for Op Test");
ExpectFailure(
b().Input("_underscore: int32"),
"Trouble parsing 'name:' from Input(\"_underscore: int32\") for Op Test");
ExpectFailure(
b().Input("0digit: int32"),
"Trouble parsing 'name:' from Input(\"0digit: int32\") for Op Test");
ExpectFailure(b().Input("a: _"),
"Trouble parsing either a type or an attr name at '_' from "
"Input(\"a: _\") for Op Test");
ExpectFailure(b().Input("a: 9"),
"Trouble parsing either a type or an attr name at '9' from "
"Input(\"a: 9\") for Op Test");
ExpectFailure(b().Input("a: 9 * int32"),
"Trouble parsing either a type or an attr name at '9 * int32' "
"from Input(\"a: 9 * int32\") for Op Test");
ExpectFailure(
b().Input("a: x * _").Attr("x: type"),
"Extra '* _' unparsed at the end from Input(\"a: x * _\") for Op Test");
ExpectFailure(b().Input("a: x * y extra").Attr("x: int").Attr("y: type"),
"Extra 'extra' unparsed at the end from Input(\"a: x * y "
"extra\") for Op Test");
ExpectFailure(b().Input("a: Ref(int32"),
"Did not find closing ')' for 'Ref(', instead found: '' from "
"Input(\"a: Ref(int32\") for Op Test");
ExpectFailure(
b().Input("a: Ref"),
"Reference to unknown attr 'Ref' from Input(\"a: Ref\") for Op Test");
ExpectFailure(b().Input("a: Ref(x y").Attr("x: type"),
"Did not find closing ')' for 'Ref(', instead found: 'y' from "
"Input(\"a: Ref(x y\") for Op Test");
ExpectFailure(
b().Input("a: x"),
"Reference to unknown attr 'x' from Input(\"a: x\") for Op Test");
ExpectFailure(
b().Input("a: x * y").Attr("x: int"),
"Reference to unknown attr 'y' from Input(\"a: x * y\") for Op Test");
ExpectFailure(b().Input("a: x").Attr("x: int"),
"Reference to attr 'x' with type int that isn't type or "
"list(type) from Input(\"a: x\") for Op Test");
}
TEST_F(OpDefBuilderTest, Set) {
ExpectSuccess(b().SetIsStateful(), "is_stateful: true");
ExpectSuccess(b().SetIsCommutative().SetIsAggregate(),
"is_commutative: true is_aggregate: true");
}
TEST_F(OpDefBuilderTest, DocUnpackSparseFeatures) {
ExpectOrdered(b().Input("sf: string")
.Output("indices: int32")
.Output("ids: int64")
.Output("weights: float")
.Doc(R"doc(
Converts a vector of strings with dist_belief::SparseFeatures to tensors.
Note that indices, ids and weights are vectors of the same size and have
one-to-one correspondence between their elements. ids and weights are each
obtained by sequentially concatenating sf[i].id and sf[i].weight, for i in
1...size(sf). Note that if sf[i].weight is not set, the default value for the
weight is assumed to be 1.0. Also for any j, if ids[j] and weights[j] were
extracted from sf[i], then index[j] is set to i.
sf: vector of string, where each element is the string encoding of
SparseFeatures proto.
indices: vector of indices inside sf
ids: vector of id extracted from the SparseFeatures proto.
weights: vector of weight extracted from the SparseFeatures proto.
)doc"),
R"proto(
input_arg {
name: "sf"
description: "vector of string, where each element is the string encoding of\nSparseFeatures proto."
type: DT_STRING
}
output_arg {
name: "indices"
description: "vector of indices inside sf"
type: DT_INT32
}
output_arg {
name: "ids"
description: "vector of id extracted from the SparseFeatures proto."
type: DT_INT64
}
output_arg {
name: "weights"
description: "vector of weight extracted from the SparseFeatures proto."
type: DT_FLOAT
}
summary: "Converts a vector of strings with dist_belief::SparseFeatures to tensors."
description: "Note that indices, ids and weights are vectors of the same size and have\none-to-one correspondence between their elements. ids and weights are each\nobtained by sequentially concatenating sf[i].id and sf[i].weight, for i in\n1...size(sf). Note that if sf[i].weight is not set, the default value for the\nweight is assumed to be 1.0. Also for any j, if ids[j] and weights[j] were\nextracted from sf[i], then index[j] is set to i."
)proto");
}
TEST_F(OpDefBuilderTest, DocConcat) {
ExpectOrdered(b().Input("concat_dim: int32")
.Input("values: num_values * dtype")
.Output("output: dtype")
.Attr("dtype: type")
.Attr("num_values: int >= 2")
.Doc(R"doc(
Concatenate N Tensors along one dimension.
concat_dim: The (scalar) dimension along which to concatenate. Must be
in the range [0, rank(values...)).
values: The N Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except concat_dim.
output: A Tensor with the concatenation of values stacked along the
concat_dim dimension. This Tensor's shape matches the Tensors in
values, except in concat_dim where it has the sum of the sizes.
)doc"),
R"proto(
input_arg {
name: "concat_dim"
description: "The (scalar) dimension along which to concatenate. Must be\nin the range [0, rank(values...))."
type: DT_INT32
}
input_arg {
name: "values"
description: "The N Tensors to concatenate. Their ranks and types must match,\nand their sizes must match in all dimensions except concat_dim."
type_attr: "dtype"
number_attr: "num_values"
}
output_arg {
name: "output"
description: "A Tensor with the concatenation of values stacked along the\nconcat_dim dimension. This Tensor\'s shape matches the Tensors in\nvalues, except in concat_dim where it has the sum of the sizes."
type_attr: "dtype"
}
summary: "Concatenate N Tensors along one dimension."
attr {
name: "dtype"
type: "type"
}
attr {
name: "num_values"
type: "int"
has_minimum: true
minimum: 2
}
)proto");
}
TEST_F(OpDefBuilderTest, DocAttr) {
ExpectOrdered(b().Attr("i: int").Doc(R"doc(
Summary
i: How much to operate.
)doc"),
R"proto(
summary: "Summary"
attr {
name: "i"
type: "int"
description: "How much to operate."
}
)proto");
}
TEST_F(OpDefBuilderTest, DocCalledTwiceFailure) {
ExpectFailure(b().Doc("What's").Doc("up, doc?"),
"Extra call to Doc() for Op Test");
}
TEST_F(OpDefBuilderTest, DocFailureMissingName) {
ExpectFailure(
b().Input("a: int32").Doc(R"doc(
Summary
a: Something for a.
b: b is not defined.
)doc"),
"No matching input/output/attr for name 'b' from Doc() for Op Test");
ExpectFailure(
b().Input("a: int32").Doc(R"doc(
Summary
b: b is not defined and by itself.
)doc"),
"No matching input/output/attr for name 'b' from Doc() for Op Test");
}
TEST_F(OpDefBuilderTest, DefaultMinimum) {
ExpectSuccess(b().Input("values: num_values * dtype")
.Output("output: anything")
.Attr("anything: list(type)")
.Attr("dtype: type")
.Attr("num_values: int"),
R"proto(
input_arg {
name: "values"
type_attr: "dtype"
number_attr: "num_values"
}
output_arg {
name: "output"
type_list_attr: "anything"
}
attr {
name: "anything"
type: "list(type)"
has_minimum: true
minimum: 1
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "num_values"
type: "int"
has_minimum: true
minimum: 1
}
)proto");
}
TEST_F(OpDefBuilderTest, SetShapeFn) {
auto fn = [](shape_inference::InferenceContext* c) {
return errors::Unknown("ShapeFn was called");
};
OpShapeInferenceFn fn_out;
ExpectSuccess(
b().SetShapeFn(fn).Attr("dtype: type"),
"attr { name: \"dtype\" type: \"type\" allowed_values { list { } } }",
&fn_out);
ASSERT_TRUE(fn_out != nullptr);
EXPECT_EQ("ShapeFn was called", fn_out(nullptr).message());
}
TEST_F(OpDefBuilderTest, SetShapeFnCalledTwiceFailure) {
auto fn = [](shape_inference::InferenceContext* c) {
return errors::Unknown("ShapeFn was called");
};
ExpectFailure(b().SetShapeFn(fn).SetShapeFn(fn),
"SetShapeFn called twice for Op Test");
}
TEST_F(OpDefBuilderTest, ResourceIsStateful) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(b().Input("a: resource").Finalize(&op_reg_data));
const OpDef& op_def = op_reg_data.op_def;
EXPECT_TRUE(op_def.is_stateful());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8866eb13-7cf5-4174-912e-2006853b5606 | cpp | tensorflow/tensorflow | function | tensorflow/compiler/mlir/tfrt/function/function.cc | tensorflow/core/tfrt/mlrt/bytecode/function_test.cc | #include "tensorflow/compiler/mlir/tfrt/function/function.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/passes.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tfrt_pipeline_options.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
namespace tensorflow {
Status CompileTFMLIRToBEF(const TfrtFunctionCompileOptions& options,
mlir::ModuleOp module, tfrt::BefBuffer* bef_buffer) {
mlir::OpPrintingFlags print_flags;
print_flags.elideLargeElementsAttrs();
if (VLOG_IS_ON(1)) {
VLOG(1) << "Input TF Executor dialect:";
DumpMlirOpToFile("tf_to_tfrt_tf_executor_dialect", module);
}
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
tensorflow::applyTensorflowAndCLOptions(pm);
tensorflow::TfrtPipelineOptions pass_options;
if (!options.default_device.empty()) {
pass_options.default_device = options.default_device;
}
if (!options.force_data_format.empty()) {
pass_options.force_data_format = options.force_data_format;
}
if (absl::StrContains(pass_options.default_device, "CPU")) {
pass_options.skip_fold_transpose_in_ops = true;
}
pass_options.enable_optimizer = options.enable_optimizer;
pass_options.target_tpurt = false;
pass_options.tpu_use_core_selector = options.tpu_use_core_selector;
pass_options.tpu_use_bundled_transfer = options.tpu_use_bundled_transfer;
pass_options.tpu_lower_to_fallback = options.tpu_lower_to_fallback;
pass_options.tpu_fuse_ops = options.tpu_fuse_ops;
pass_options.tpu_transfer_result_to_host =
options.tpu_transfer_result_to_host;
Status status = tensorflow::CreateTfExecutorToTfrtPipeline(pm, pass_options);
if (!status.ok()) {
return diag_handler.Combine(status);
}
if (mlir::failed(pm.run(module)))
return diag_handler.Combine(tensorflow::errors::Internal(
"failed to lower TF Dialect to CoreRT dialect."));
if (VLOG_IS_ON(1)) {
VLOG(1) << "TFRT dialect: ";
DumpMlirOpToFile("tf_to_tfrt_tfrt_dialect", module);
}
*bef_buffer =
tfrt::ConvertMLIRToBEF(module, true);
if (bef_buffer->empty())
return diag_handler.Combine(
tensorflow::errors::Internal("failed to convert MLIR to BEF."));
return absl::OkStatus();
}
} | #include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
namespace mlrt {
namespace bc {
namespace {
TEST(FunctionTest, Function) {
Buffer buffer;
Allocator allocator(&buffer);
Function::Constructor ctor = New<Function>(&allocator);
ctor.construct_name("main");
ctor.set_num_regs(10);
ctor.construct_input_regs(2).Assign({0, 1});
ctor.construct_output_regs(1).Assign({9});
ctor.construct_output_last_uses(1).Assign({true});
ctor.construct_kernels(3);
Function function(buffer.Get(ctor.address()));
EXPECT_EQ(function.name().Get(), "main");
EXPECT_EQ(function.num_regs(), 10);
EXPECT_THAT(function.input_regs(), ::testing::ElementsAreArray({0, 1}));
EXPECT_THAT(function.output_regs(), ::testing::ElementsAreArray({9}));
EXPECT_THAT(function.output_last_uses(), ::testing::ElementsAreArray({true}));
Vector<Kernel> kernels = function.kernels();
EXPECT_EQ(kernels.size(), 3);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/function/function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8bdefdc-6e3e-4dc4-b2fa-2d86de8dd804 | cpp | tensorflow/tensorflow | run_handler_util | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util_test.cc | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h"
#include <cmath>
#include <cstdlib>
#include <string>
#include <vector>
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/str_util.h"
namespace tfrt {
namespace tf {
double ParamFromEnvWithDefault(const char* var_name, double default_value) {
const char* val = std::getenv(var_name);
double num;
return (val && tensorflow::strings::safe_strtod(val, &num)) ? num
: default_value;
}
std::vector<double> ParamFromEnvWithDefault(const char* var_name,
std::vector<double> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<std::string> splits = tensorflow::str_util::Split(val, ",");
std::vector<double> result;
result.reserve(splits.size());
for (auto& split : splits) {
double num;
if (tensorflow::strings::safe_strtod(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
std::vector<int> ParamFromEnvWithDefault(const char* var_name,
std::vector<int> default_value) {
const char* val = std::getenv(var_name);
if (!val) {
return default_value;
}
std::vector<std::string> splits = tensorflow::str_util::Split(val, ",");
std::vector<int> result;
result.reserve(splits.size());
for (auto& split : splits) {
int num;
if (tensorflow::strings::safe_strto32(split, &num)) {
result.push_back(num);
} else {
LOG(ERROR) << "Wrong format for " << var_name << ". Use default value.";
return default_value;
}
}
return result;
}
bool ParamFromEnvBoolWithDefault(const char* var_name, bool default_value) {
const char* val = std::getenv(var_name);
return (val) ? absl::AsciiStrToLower(val) == "true" : default_value;
}
}
} | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h"
#include <vector>
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tfrt {
namespace tf {
namespace {
TEST(RunHandlerUtilTest, TestParamFromEnvWithDefault) {
std::vector<double> result = ParamFromEnvWithDefault(
"RUN_HANDLER_TEST_ENV", std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 0);
EXPECT_EQ(result[1], 0);
EXPECT_EQ(result[2], 0);
std::vector<int> result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result2.size(), 3);
EXPECT_EQ(result2[0], 0);
EXPECT_EQ(result2[1], 0);
EXPECT_EQ(result2[2], 0);
bool result3 =
ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, false);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV", "1,2,3", true), 0);
result = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<double>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result[0], 1);
EXPECT_EQ(result[1], 2);
EXPECT_EQ(result[2], 3);
result2 = ParamFromEnvWithDefault("RUN_HANDLER_TEST_ENV",
std::vector<int>{0, 0, 0});
EXPECT_EQ(result.size(), 3);
EXPECT_EQ(result2[0], 1);
EXPECT_EQ(result2[1], 2);
EXPECT_EQ(result2[2], 3);
EXPECT_EQ(setenv("RUN_HANDLER_TEST_ENV_BOOL", "true", true), 0);
result3 = ParamFromEnvBoolWithDefault("RUN_HANDLER_TEST_ENV_BOOL", false);
EXPECT_EQ(result3, true);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94696138-88ad-406c-84d3-0739ede2539f | cpp | tensorflow/tensorflow | full_type_util | tensorflow/core/framework/full_type_util.cc | tensorflow/core/framework/full_type_util_test.cc | #include "tensorflow/core/framework/full_type_util.h"
#include <algorithm>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace full_type {
OpTypeConstructor NoOp() {
return nullptr;
}
OpTypeConstructor NoOutputs() {
return [](OpDef* op_def) {
op_def->mutable_output_arg();
return absl::OkStatus();
};
}
OpTypeConstructor Nullary(FullTypeId t) {
return [t](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
return absl::OkStatus();
};
}
OpTypeConstructor Unary(FullTypeId t, const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s(var_name);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryGeneric(FullTypeId t) {
return [t](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_ANY);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryTensorContainer(FullTypeId t, FullTypeId dtype) {
return [t, dtype](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_TENSOR);
FullTypeDef* targ = arg->add_args();
targ->set_type_id(dtype);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryTensorContainer(FullTypeId t, const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* targ = tdef->add_args();
targ->set_type_id(TFT_TENSOR);
FullTypeDef* varg = targ->add_args();
varg->set_type_id(TFT_VAR);
varg->set_s(var_name);
return absl::OkStatus();
};
}
OpTypeConstructor VariadicTensorContainer(FullTypeId t,
const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* for_each = tdef->add_args();
for_each->set_type_id(TFT_FOR_EACH);
for_each->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* tpl = for_each->add_args();
tpl->set_type_id(TFT_TENSOR);
FullTypeDef* targ = tpl->add_args();
targ->set_type_id(TFT_VAR);
targ->set_s(var_name);
FullTypeDef* tvar = for_each->add_args();
tvar->set_type_id(TFT_VAR);
tvar->set_s(var_name);
return absl::OkStatus();
};
}
namespace {
typedef absl::flat_hash_map<StringPiece, const AttrValue*> AttrMap;
inline Status SubstituteFromAttrs(AttrMap& attrs, FullTypeDef& t);
Status SubstituteVar(AttrMap& attrs, FullTypeDef& t) {
if (t.args_size() != 0) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Unexpected Var type, expected args_size 0, found ",
t.args_size()));
}
StringPiece var_name = t.s();
if (!attrs.contains(var_name)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("could not find an attribute for key '", var_name, "'"));
}
const AttrValue* attr = attrs.at(var_name);
const auto attr_type = attr->value_case();
if (attr_type == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), t);
} else if (attr_type == AttrValue::kList) {
const auto& attr_list = attr->list();
if (attr_list.type_size() != 1) {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("lists or other than one type element\n",
attr_list.DebugString(), "\nkey=", var_name));
}
map_dtype_to_tensor(attr_list.type(0), t);
} else {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported attribute type ",
attr->DebugString(), " for name ", var_name));
}
t.clear_s();
return absl::OkStatus();
}
Status SubstituteForEach(AttrMap& attrs, FullTypeDef& t) {
if (t.args_size() != 3) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("illegal FOR_EACH type, expected 3 args, got ",
t.args_size()));
}
const auto& cont = t.args(0);
const auto& tmpl = t.args(1);
const auto& t_var = t.args(2);
StringPiece var_name = t_var.s();
if (!attrs.contains(var_name)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("could not find an attribute for key '", var_name, "'"));
}
const AttrValue* attr = attrs.at(var_name);
FullTypeDef result;
result.set_type_id(cont.type_id());
const auto attr_type = attr->value_case();
if (attr_type == AttrValue::kType) {
FullTypeDef* target = result.add_args();
*target = tmpl;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
SubstituteFromAttrs(attrs, *target), "while substituting '", var_name,
"' from\n", attr->DebugString(), "\ninto ", target->DebugString());
} else if (attr_type == AttrValue::kList) {
const auto& attr_list = attr->list();
int tsize = attr_list.type_size();
if (tsize == 0) {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported list attribute type\n",
attr_list.DebugString(), "\nkey=", var_name));
}
AttrValue replacement;
attrs[var_name] = &replacement;
for (int i = 0; i < tsize; i++) {
replacement.set_type(attr_list.type(i));
FullTypeDef* target = result.add_args();
*target = tmpl;
TF_RETURN_WITH_CONTEXT_IF_ERROR(SubstituteFromAttrs(attrs, *target),
"while substituting '", var_name,
"' from\n", attr->DebugString(), "\n[", i,
"] into\n", target->DebugString());
}
attrs[var_name] = attr;
} else {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported attribute type\n",
attr->DebugString(), "\nfor name ", var_name));
}
t = result;
return absl::OkStatus();
}
Status SubstituteGeneric(AttrMap& attrs, FullTypeDef& t) {
int nargs = t.args_size();
for (int j = 0; j < nargs; j++) {
FullTypeDef* arg_t = t.mutable_args(j);
TF_RETURN_WITH_CONTEXT_IF_ERROR(SubstituteFromAttrs(attrs, *arg_t),
"while substituting arg ", j, ": ",
arg_t->DebugString());
if (arg_t->type_id() == TFT_TENSOR && arg_t->args_size() &&
arg_t->args(0).type_id() == TFT_LEGACY_VARIANT) {
t.clear_args();
break;
}
}
return absl::OkStatus();
}
inline Status SubstituteFromAttrs(AttrMap& attrs, FullTypeDef& t) {
switch (t.type_id()) {
case TFT_VAR:
return SubstituteVar(attrs, t);
case TFT_FOR_EACH:
return SubstituteForEach(attrs, t);
default:
return SubstituteGeneric(attrs, t);
}
return absl::OkStatus();
}
}
Status SpecializeType(const AttrSlice& attrs, const OpDef& op_def,
FullTypeDef& target) {
target.Clear();
target.set_type_id(TFT_PRODUCT);
AttrMap map;
for (const auto& attr : attrs) {
map.emplace(attr.first, &attr.second);
}
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value() && !attrs.Find(attr_def.name())) {
map.emplace(attr_def.name(), &attr_def.default_value());
}
}
int nargs = op_def.output_arg_size();
for (int i = 0; i < nargs; i++) {
auto& t = *(target.add_args());
t = op_def.output_arg(i).experimental_full_type();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
SubstituteFromAttrs(map, t), "while expanding vars of\n",
t.DebugString(), "\nfrom\n", attrs.SummarizeNode());
}
return absl::OkStatus();
}
const FullTypeDef& GetArgDefaultUnset(const FullTypeDef& t, int i) {
static FullTypeDef* unset_type = []() {
FullTypeDef* t = new FullTypeDef();
return t;
}();
if (i < t.args_size()) {
return t.args(i);
}
return *unset_type;
}
const FullTypeDef& GetArgDefaultAny(const FullTypeDef& t, int i) {
static FullTypeDef* any_type = []() {
FullTypeDef* t = new FullTypeDef();
t->set_type_id(TFT_ANY);
return t;
}();
if (i < t.args_size()) {
const FullTypeDef& f_val = t.args(i);
if (f_val.type_id() == TFT_UNSET) {
return *any_type;
}
return f_val;
}
return *any_type;
}
bool IsEqual(const FullTypeDef& lhs, const FullTypeDef& rhs) {
if (lhs.type_id() != rhs.type_id()) {
return false;
}
const auto& lhs_s = lhs.s();
const auto& rhs_s = rhs.s();
if (lhs_s.empty()) {
if (!rhs_s.empty()) {
return false;
}
} else if (rhs_s != lhs_s) {
return false;
}
for (int i = 0; i < std::max(lhs.args_size(), rhs.args_size()); i++) {
const FullTypeDef& lhs_arg = GetArgDefaultAny(lhs, i);
const FullTypeDef& rhs_arg = GetArgDefaultAny(rhs, i);
if (!IsEqual(lhs_arg, rhs_arg)) {
return false;
}
}
return true;
}
uint64_t Hash(const FullTypeDef& arg) {
uint64_t val = Hash64Combine(arg.type_id(), 0);
const auto& arg_s = arg.s();
val = Hash64Combine(val, Hash64(arg_s));
for (int i = 0, e = arg.args_size(); i < e; ++i) {
const FullTypeDef& arg_arg = GetArgDefaultAny(arg, i);
val = Hash64Combine(val, Hash(arg_arg));
}
return val;
}
bool IsSubtype(const FullTypeDef& lhs, const FullTypeDef& rhs, bool covariant) {
if (rhs.type_id() == TFT_ANY) {
return true;
}
if (rhs.type_id() == TFT_UNSET) {
return true;
}
if ((rhs.type_id() == TFT_TENSOR) &&
(GetArgDefaultUnset(rhs, 0).type_id() == TFT_LEGACY_VARIANT)) {
return true;
}
if (lhs.type_id() == TFT_ENCODED) {
return IsSubtype(GetArgDefaultAny(lhs, 1), rhs, true);
}
if (lhs.type_id() != rhs.type_id()) {
return false;
}
for (int i = 0; i < std::max(lhs.args_size(), rhs.args_size()); i++) {
const FullTypeDef& lhs_arg = GetArgDefaultAny(lhs, i);
const FullTypeDef& rhs_arg = GetArgDefaultAny(rhs, i);
if (covariant) {
if (!IsSubtype(lhs_arg, rhs_arg)) {
return false;
}
} else {
if (!IsSubtype(rhs_arg, lhs_arg)) {
return false;
}
}
}
return true;
}
}
} | #include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace full_type {
namespace {
TEST(Nullary, Basic) {
OpTypeConstructor ctor = Nullary(TFT_TENSOR);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 0);
}
TEST(Unary, Basic) {
OpTypeConstructor ctor = Unary(TFT_TENSOR, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args_size(), 0);
EXPECT_EQ(t.args(0).s(), "T");
}
TEST(UnaryGeneric, Basic) {
OpTypeConstructor ctor = UnaryGeneric(TFT_TENSOR);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_ANY);
EXPECT_EQ(t.args(0).args_size(), 0);
}
TEST(UnaryTensorContainer, Fixed) {
OpTypeConstructor ctor = UnaryTensorContainer(TFT_ARRAY, TFT_INT32);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args_size(), 1);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
}
TEST(UnaryTensorContainer, Dependent) {
OpTypeConstructor ctor = UnaryTensorContainer(TFT_ARRAY, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args_size(), 1);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(0).s(), "T");
}
TEST(VariadicTensorContainer, Basic) {
OpTypeConstructor ctor = VariadicTensorContainer(TFT_ARRAY, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_FOR_EACH);
EXPECT_EQ(t.args(0).args_size(), 3);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args(1).args_size(), 1);
EXPECT_EQ(t.args(0).args(1).args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(1).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(1).args(0).s(), "T");
EXPECT_EQ(t.args(0).args(2).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(2).args_size(), 0);
EXPECT_EQ(t.args(0).args(2).s(), "T");
}
TEST(SpecializeType, Fixed) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_INT32);
t->add_args()->set_type_id(TFT_DATASET);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_DATASET);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
}
TEST(SpecializeType, Idempotence) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_INT32);
t->add_args()->set_type_id(TFT_DATASET);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
}
TEST(SpecializeType, VarExpandsFromSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarExpandsFromDefaultForSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
OpDef::AttrDef* attr_with_default = op.add_attr();
attr_with_default->set_name("T");
(*attr_with_default->mutable_default_value()) = attr;
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarExpandsFromSingleElementTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarRejectsMultipleElementTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_FLOAT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, VarRejectsEmptyTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list();
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, ForEachExpandsFromSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachExpandsFromListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_FLOAT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachDistributesNestedVar) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("ForEachTarget");
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(1)->set_s("GlobalVar");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("ForEachTarget");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_INT64);
(*ndef.mutable_attr())["ForEachTarget"] = attr;
attr.set_type(DT_FLOAT);
(*ndef.mutable_attr())["GlobalVar"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(0).args(1).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(1).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(1).args_size(), 2);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_INT64);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).args(1).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(1).args_size(), 0);
}
TEST(SpecializeType, ForEachDistributesNestedForEach) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* inner = t->add_args();
inner->set_type_id(TFT_FOR_EACH);
inner->add_args()->set_type_id(TFT_PRODUCT);
inner->add_args()->set_type_id(TFT_ARRAY);
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(0)->set_s("InnerForEach");
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(1)->set_s("OuterForEach");
inner->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(2)->set_s("InnerForEach");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("OuterForEach");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_INT64);
(*ndef.mutable_attr())["OuterForEach"] = attr;
attr.set_type(DT_FLOAT);
(*ndef.mutable_attr())["InnerForEach"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(0).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(0).args(0).args(1).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args(1).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(1).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).args(0).args(1).type_id(), TFT_INT64);
EXPECT_EQ(t_actual.args(1).args(0).args(1).args_size(), 0);
}
TEST(SpecializeType, ForEachOverridesTargetOfNestedForEach) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* inner = t->add_args();
inner->set_type_id(TFT_FOR_EACH);
inner->add_args()->set_type_id(TFT_PRODUCT);
inner->add_args()->set_type_id(TFT_ARRAY);
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(0)->set_s("T");
inner->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(2)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_FLOAT);
attr.mutable_list()->add_type(DT_DOUBLE);
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).args(0).type_id(), TFT_DOUBLE);
EXPECT_EQ(t_actual.args(1).args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachRejectsMalformedInput) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, VarShouldHaveNoArgs) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_VAR);
t->add_args()->set_type_id(TFT_PRODUCT);
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, RemovesLegacyVariant) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_LEGACY_VARIANT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 0);
}
TEST(SpecializeType, RemovesLegacyVariantAfterExpansion) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_VARIANT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 0);
}
TEST(GetArgDefaults, DefaultUnsetFromNoArgs) {
FullTypeDef t;
const auto& d = GetArgDefaultUnset(t, 0);
EXPECT_EQ(d.type_id(), TFT_UNSET);
}
TEST(GetArgDefaults, DefaultUnsetFromOutOfBounds) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
const auto& d = GetArgDefaultUnset(t, 1);
EXPECT_EQ(d.type_id(), TFT_UNSET);
}
TEST(GetArgDefaults, NoDefaultUnsetFromArg) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args();
const auto& d = GetArgDefaultUnset(t, 0);
EXPECT_EQ(d.type_id(), TFT_TENSOR);
EXPECT_EQ(d.args_size(), 1);
}
TEST(GetArgDefaults, DefaultAnyFromNoArgs) {
FullTypeDef t;
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, DefaultAnyFromOutOfBounds) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
const auto& d = GetArgDefaultAny(t, 1);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, DefaultAnyFromUnset) {
FullTypeDef t;
t.add_args();
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, NoDefaultAnyFromArg) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args();
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_TENSOR);
EXPECT_EQ(d.args_size(), 1);
}
TEST(IsEqual, Reflexivity) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
EXPECT_TRUE(IsEqual(t, t));
}
TEST(IsEqual, Copy) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
EXPECT_TRUE(IsEqual(t, u));
EXPECT_TRUE(IsEqual(u, t));
}
TEST(IsEqual, DifferentTypesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.set_type_id(TFT_ARRAY);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, DifferentAritiesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, MissingArgsEquivalentToAny) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_ANY);
EXPECT_TRUE(IsEqual(t, u));
EXPECT_TRUE(IsEqual(u, t));
}
TEST(IsEqual, DifferentArgsNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.mutable_args(1)->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, DifferentStringValuesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_VAR);
t.set_s("T");
FullTypeDef u;
u = t;
u.set_type_id(TFT_VAR);
u.set_s("U");
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsSubtype, Reflexivity) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
EXPECT_TRUE(IsSubtype(t, t));
}
TEST(IsSubtype, Copy) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
EXPECT_TRUE(IsSubtype(t, u));
}
TEST(IsSubtype, Any) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u.set_type_id(TFT_ANY);
EXPECT_TRUE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, Unset) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u.set_type_id(TFT_UNSET);
EXPECT_TRUE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, Covariance) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_ARRAY);
t.mutable_args(0)->add_args()->set_type_id(TFT_INT32);
FullTypeDef u;
u.set_type_id(TFT_TENSOR);
u.add_args()->set_type_id(TFT_ANY);
EXPECT_TRUE(IsSubtype(t, u, true));
EXPECT_FALSE(IsSubtype(u, t, true));
EXPECT_FALSE(IsSubtype(t, u, false));
EXPECT_TRUE(IsSubtype(u, t, false));
}
TEST(IsSubtype, DifferentTypesNotSubtype) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.set_type_id(TFT_ARRAY);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, DifferentAritiesDefaultToAny) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_TRUE(IsSubtype(u, t));
}
TEST(IsSubtype, DifferentArgsNotSubtype) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.mutable_args(1)->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b676bba-ee4c-4fe7-a438-1f7f7ad0d210 | cpp | tensorflow/tensorflow | variant | tensorflow/lite/core/async/interop/variant.cc | tensorflow/lite/core/async/interop/variant_test.cc | #include "tensorflow/lite/core/async/interop/variant.h"
#include <cstring>
#include <utility>
namespace tflite {
namespace interop {
Variant::Variant() {
type = kInvalid;
val.i = 0;
}
bool Variant::operator==(const Variant& other) const {
if (type != other.type) return false;
switch (type) {
case kInvalid:
return true;
case kInt:
return val.i == other.val.i;
case kSizeT:
return val.s == other.val.s;
case kString:
return (val.c == other.val.c) || (strcmp(val.c, other.val.c) == 0);
case kBool:
return val.b == other.val.b;
}
}
bool Variant::operator!=(const Variant& other) const {
return !(*this == other);
}
}
} | #include "tensorflow/lite/core/async/interop/variant.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
namespace tflite::interop {
namespace {
TEST(VariantTest, IntTest) {
{
Variant a(1);
EXPECT_EQ(1, *a.Get<int>());
}
{
Variant a(1);
a.Set(2);
EXPECT_EQ(2, *a.Get<int>());
}
{
Variant a(42);
Variant b(a);
EXPECT_EQ(42, *b.Get<int>());
}
{
Variant a(42);
EXPECT_EQ(42, *static_cast<const int*>(a.GetPtr()));
}
{
Variant a(42);
Variant b(42);
EXPECT_EQ(a, b);
b.Set(21);
EXPECT_NE(a, b);
}
}
TEST(VariantTest, SizeTTest) {
{
size_t v = 1;
Variant a(v);
EXPECT_EQ(1, *a.Get<size_t>());
}
{
size_t v = 1;
Variant a(v);
size_t t = 2;
a.Set(t);
EXPECT_EQ(2, *a.Get<size_t>());
}
{
size_t v = 42;
Variant a(v);
Variant b(a);
EXPECT_EQ(42, *b.Get<size_t>());
}
{
size_t v = 42;
Variant a(v);
EXPECT_EQ(42, *static_cast<const size_t*>(a.GetPtr()));
}
{
Variant a(size_t(42));
Variant b(size_t(42));
EXPECT_EQ(a, b);
b.Set(size_t(21));
EXPECT_NE(a, b);
}
}
TEST(VariantTest, StringTest) {
{
const char v[] = "string";
Variant a(v);
EXPECT_EQ(v, *a.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
const char t[] = "another string";
a.Set(t);
EXPECT_EQ(t, *a.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
Variant b(a);
EXPECT_EQ(v, *b.Get<const char*>());
}
{
const char v[] = "string";
Variant a(v);
EXPECT_EQ(v, *static_cast<const char* const*>(a.GetPtr()));
}
{
const char v[] = "string";
Variant a(v);
std::string str = "string";
Variant b(str.c_str());
EXPECT_EQ(a, b);
b.Set("another string");
EXPECT_NE(a, b);
}
}
TEST(VariantTest, TypeNotMatch) {
Variant a(1);
EXPECT_EQ(nullptr, a.Get<size_t>());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/variant.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/variant_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eef8dca3-7df6-482a-8512-d6ab41154110 | cpp | tensorflow/tensorflow | tensor_testutil | tensorflow/core/framework/tensor_testutil.cc | tensorflow/core/framework/tensor_testutil_test.cc | #include "tensorflow/core/framework/tensor_testutil.h"
#include <cmath>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace test {
::testing::AssertionResult IsSameType(const Tensor& x, const Tensor& y) {
if (x.dtype() != y.dtype()) {
return ::testing::AssertionFailure()
<< "Tensors have different dtypes (" << x.dtype() << " vs "
<< y.dtype() << ")";
}
return ::testing::AssertionSuccess();
}
::testing::AssertionResult IsSameShape(const Tensor& x, const Tensor& y) {
if (!x.IsSameSize(y)) {
return ::testing::AssertionFailure()
<< "Tensors have different shapes (" << x.shape().DebugString()
<< " vs " << y.shape().DebugString() << ")";
}
return ::testing::AssertionSuccess();
}
template <typename T>
static ::testing::AssertionResult EqualFailure(const T& x, const T& y) {
return ::testing::AssertionFailure()
<< std::setprecision(std::numeric_limits<T>::digits10 + 2) << x
<< " not equal to " << y;
}
template <>
::testing::AssertionResult EqualFailure<int8>(const int8& x, const int8& y) {
return EqualFailure(static_cast<int>(x), static_cast<int>(y));
}
static ::testing::AssertionResult IsEqual(float x, float y, Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (t == Tolerance::kNone) {
if (x == y) return ::testing::AssertionSuccess();
} else {
if (::testing::internal::CmpHelperFloatingPointEQ<float>("", "", x, y))
return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(double x, double y, Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (t == Tolerance::kNone) {
if (x == y) return ::testing::AssertionSuccess();
} else {
if (::testing::internal::CmpHelperFloatingPointEQ<double>("", "", x, y))
return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(Eigen::half x, Eigen::half y,
Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))
return EqualFailure(x, y);
auto sign_and_magnitude_to_biased = [](uint16_t sam) {
const uint16_t kSignBitMask = 0x8000;
if (kSignBitMask & sam) return ~sam + 1;
return kSignBitMask | sam;
};
auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x));
auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y));
if (t == Tolerance::kNone) {
if (xb == yb) return ::testing::AssertionSuccess();
} else {
auto distance = xb >= yb ? xb - yb : yb - xb;
const uint16_t kMaxUlps = 4;
if (distance <= kMaxUlps) return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(tsl::bfloat16 x, tsl::bfloat16 y,
Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))
return EqualFailure(x, y);
auto sign_and_magnitude_to_biased = [](uint16_t sam) {
const uint16_t kSignBitMask = 0x8000;
if (kSignBitMask & sam) return ~sam + 1;
return kSignBitMask | sam;
};
auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x));
auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y));
if (t == Tolerance::kNone) {
if (xb == yb) return ::testing::AssertionSuccess();
} else {
auto distance = xb >= yb ? xb - yb : yb - xb;
const uint16_t kMaxUlps = 4;
if (distance <= kMaxUlps) return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
template <typename T>
static ::testing::AssertionResult IsEqual(const T& x, const T& y, Tolerance t) {
if (::testing::internal::CmpHelperEQ<T>("", "", x, y))
return ::testing::AssertionSuccess();
return EqualFailure(x, y);
}
template <typename T>
static ::testing::AssertionResult IsEqual(const std::complex<T>& x,
const std::complex<T>& y,
Tolerance t) {
if (IsEqual(x.real(), y.real(), t) && IsEqual(x.imag(), y.imag(), t))
return ::testing::AssertionSuccess();
return EqualFailure(x, y);
}
template <typename T>
static void ExpectEqual(const Tensor& x, const Tensor& y,
Tolerance t = Tolerance::kDefault) {
const T* Tx = x.unaligned_flat<T>().data();
const T* Ty = y.unaligned_flat<T>().data();
auto size = x.NumElements();
int max_failures = 10;
int num_failures = 0;
for (decltype(size) i = 0; i < size; ++i) {
EXPECT_TRUE(IsEqual(Tx[i], Ty[i], t)) << "i = " << (++num_failures, i);
ASSERT_LT(num_failures, max_failures) << "Too many mismatches, giving up.";
}
}
template <typename T>
static ::testing::AssertionResult IsClose(const T& x, const T& y, const T& atol,
const T& rtol) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (x == y) return ::testing::AssertionSuccess();
auto tolerance = atol + rtol * Eigen::numext::abs(x);
if (Eigen::numext::abs(x - y) <= tolerance)
return ::testing::AssertionSuccess();
return ::testing::AssertionFailure() << x << " not close to " << y;
}
template <typename T>
static ::testing::AssertionResult IsClose(const std::complex<T>& x,
const std::complex<T>& y,
const T& atol, const T& rtol) {
if (IsClose(x.real(), y.real(), atol, rtol) &&
IsClose(x.imag(), y.imag(), atol, rtol))
return ::testing::AssertionSuccess();
return ::testing::AssertionFailure() << x << " not close to " << y;
}
template <typename T>
static auto GetTolerance(double tolerance) {
using Real = typename Eigen::NumTraits<T>::Real;
auto default_tol = static_cast<Real>(5.0) * Eigen::NumTraits<T>::epsilon();
auto result = tolerance < 0.0 ? default_tol : static_cast<Real>(tolerance);
EXPECT_GE(result, static_cast<Real>(0));
return result;
}
template <typename T>
static void ExpectClose(const Tensor& x, const Tensor& y, double atol,
double rtol) {
auto typed_atol = GetTolerance<T>(atol);
auto typed_rtol = GetTolerance<T>(rtol);
const T* Tx = x.unaligned_flat<T>().data();
const T* Ty = y.unaligned_flat<T>().data();
auto size = x.NumElements();
int max_failures = 10;
int num_failures = 0;
for (decltype(size) i = 0; i < size; ++i) {
EXPECT_TRUE(IsClose(Tx[i], Ty[i], typed_atol, typed_rtol))
<< "i = " << (++num_failures, i) << " Tx[i] = " << Tx[i]
<< " Ty[i] = " << Ty[i];
ASSERT_LT(num_failures, max_failures)
<< "Too many mismatches (atol = " << atol << " rtol = " << rtol
<< "), giving up.";
}
EXPECT_EQ(num_failures, 0)
<< "Mismatches detected (atol = " << atol << " rtol = " << rtol << ").";
}
void ExpectEqual(const Tensor& x, const Tensor& y, Tolerance t) {
ASSERT_TRUE(IsSameType(x, y));
ASSERT_TRUE(IsSameShape(x, y));
switch (x.dtype()) {
case DT_FLOAT:
return ExpectEqual<float>(x, y, t);
case DT_DOUBLE:
return ExpectEqual<double>(x, y, t);
case DT_INT32:
return ExpectEqual<int32>(x, y);
case DT_UINT32:
return ExpectEqual<uint32>(x, y);
case DT_UINT16:
return ExpectEqual<uint16>(x, y);
case DT_UINT8:
return ExpectEqual<uint8>(x, y);
case DT_INT16:
return ExpectEqual<int16>(x, y);
case DT_INT8:
return ExpectEqual<int8>(x, y);
case DT_STRING:
return ExpectEqual<tstring>(x, y);
case DT_COMPLEX64:
return ExpectEqual<complex64>(x, y, t);
case DT_COMPLEX128:
return ExpectEqual<complex128>(x, y, t);
case DT_INT64:
return ExpectEqual<int64_t>(x, y);
case DT_UINT64:
return ExpectEqual<uint64>(x, y);
case DT_BOOL:
return ExpectEqual<bool>(x, y);
case DT_QINT8:
return ExpectEqual<qint8>(x, y);
case DT_QUINT8:
return ExpectEqual<quint8>(x, y);
case DT_QINT16:
return ExpectEqual<qint16>(x, y);
case DT_QUINT16:
return ExpectEqual<quint16>(x, y);
case DT_QINT32:
return ExpectEqual<qint32>(x, y);
case DT_BFLOAT16:
return ExpectEqual<bfloat16>(x, y, t);
case DT_HALF:
return ExpectEqual<Eigen::half>(x, y, t);
case DT_FLOAT8_E5M2:
return ExpectEqual<float8_e5m2>(x, y, t);
case DT_FLOAT8_E4M3FN:
return ExpectEqual<float8_e4m3fn>(x, y, t);
case DT_INT4:
return ExpectEqual<int4>(x, y, t);
case DT_UINT4:
return ExpectEqual<uint4>(x, y, t);
default:
EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype());
}
}
void ExpectClose(const Tensor& x, const Tensor& y, double atol, double rtol) {
ASSERT_TRUE(IsSameType(x, y));
ASSERT_TRUE(IsSameShape(x, y));
switch (x.dtype()) {
case DT_HALF:
return ExpectClose<Eigen::half>(x, y, atol, rtol);
case DT_BFLOAT16:
return ExpectClose<Eigen::bfloat16>(x, y, atol, rtol);
case DT_FLOAT:
return ExpectClose<float>(x, y, atol, rtol);
case DT_DOUBLE:
return ExpectClose<double>(x, y, atol, rtol);
case DT_COMPLEX64:
return ExpectClose<complex64>(x, y, atol, rtol);
case DT_COMPLEX128:
return ExpectClose<complex128>(x, y, atol, rtol);
default:
EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype());
}
}
::testing::AssertionResult internal_test::IsClose(Eigen::half x, Eigen::half y,
double atol, double rtol) {
return test::IsClose(x, y, GetTolerance<Eigen::half>(atol),
GetTolerance<Eigen::half>(rtol));
}
::testing::AssertionResult internal_test::IsClose(float x, float y, double atol,
double rtol) {
return test::IsClose(x, y, GetTolerance<float>(atol),
GetTolerance<float>(rtol));
}
::testing::AssertionResult internal_test::IsClose(double x, double y,
double atol, double rtol) {
return test::IsClose(x, y, GetTolerance<double>(atol),
GetTolerance<double>(rtol));
}
}
} | #include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace test {
namespace {
using internal_test::IsClose;
template <typename T>
void TestEdgeCasesNear() {
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(),
Eigen::NumTraits<T>::infinity(), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_FALSE(
IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(),
static_cast<double>(Eigen::NumTraits<T>::highest()), 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<double>::infinity(), 0.0));
}
template <typename T, typename U>
void dumpFloatingPointStorage(T value) {
U* integral = reinterpret_cast<U*>(&value);
int shift_amount = (sizeof(U) << 3) - 1;
int exponent_bits = 2 + (log2(sizeof(U)) * 3);
U mask = static_cast<U>(1) << shift_amount;
for (int bits = 0; bits <= shift_amount; ++bits) {
std::cout << ((*integral & mask) > 0);
if (bits == 0 || bits == exponent_bits) std::cout << " ";
mask >>= 1;
}
std::cout << std::endl;
printf("%.20lf\n", static_cast<double>(value));
}
TEST(TensorTestUtilTest, ExpectTensorNearHalf) {
typedef Eigen::half T;
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0));
EXPECT_TRUE(IsClose(static_cast<T>(0.0f), static_cast<T>(-0.0f), 0.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141592f), 0.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.0078125, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.007, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.5, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.4, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(1234.f), static_cast<T>(1235.f), 1.0, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 0.5, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 1.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(-2.71f), static_cast<T>(-2.72f), 0.01, 0.0));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearFloat) {
typedef float T;
EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(0.0f, -0.0f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(3.14159265359f, 3.14159265359f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(8.9875f, 8.9876f, 0.0001002f, 0.0f));
EXPECT_FALSE(IsClose(8.9875f, 8.9876f, 0.0001f, 0.0f));
EXPECT_TRUE(IsClose(720.2017f, 720.2018f, 0.0001f, 0.0f));
EXPECT_FALSE(IsClose(720.20175f, 720.20185f, 0.0001f, 0.0f));
EXPECT_TRUE(IsClose(720.20175f, 720.20185f, 0.00013f, 0.0f));
EXPECT_FALSE(IsClose(123456788.f, 123456789.f, 4.0f, 0.0f));
EXPECT_TRUE(IsClose(123456788.f, 123456789.f, 8.0f, 0.0f));
EXPECT_TRUE(IsClose(-2.718281f, -2.718282f, 0.1f, 0.0f));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearDouble) {
typedef double T;
EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));
EXPECT_TRUE(IsClose(0.0, -0.0, 0.0, 0.0));
EXPECT_TRUE(IsClose(3.14159265359, 3.14159265359, 0.0, 0.0));
EXPECT_TRUE(IsClose(8.9875, 8.9876, 0.0001, 0.0));
EXPECT_FALSE(IsClose(100720.2018, 100720.2019, 0.0001, 0.0));
EXPECT_TRUE(IsClose(100720.2018, 100720.2019, 1.00000005e-4, 0.0));
EXPECT_FALSE(IsClose(12345678901234567., 12345678901234566., 1.0, 0.0));
EXPECT_TRUE(IsClose(12345678901234567., 12345678901234566., 2.0, 0.0));
EXPECT_FALSE(IsClose(-2.71828182846, -2.71828182847, 1.0e-11, 0.0));
EXPECT_TRUE(IsClose(-2.71828182846, -2.71828182847, 1.00000009e-11, 0.0));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearSlice) {
Tensor x(DT_FLOAT, TensorShape({7, 3}));
test::FillFn<float>(&x, [](int i) { return 1.0f; });
test::ExpectTensorNear<float>(
x.SubSlice(3), test::AsTensor<float>({1.0, 1.0, 1.0}, TensorShape({3})),
1e-10);
}
template <typename T>
void TestEdgeCasesClose() {
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(),
Eigen::NumTraits<T>::infinity(), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
Eigen::NumTraits<double>::infinity(),
Eigen::NumTraits<double>::infinity()));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
static_cast<double>(Eigen::NumTraits<T>::highest()),
static_cast<double>(Eigen::NumTraits<T>::highest())));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<double>::infinity(), 0.0));
}
TEST(TensorTestUtilTest, ExpectTensorCloseHalf) {
typedef Eigen::half T;
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.1, 0.1));
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0));
EXPECT_FALSE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.0, 0.0));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.234f)));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.233f)));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.235f)));
EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f)));
EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f)));
EXPECT_TRUE(
IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f), 8e-4f, 1e-3f));
EXPECT_TRUE(
IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f), 1.4e-3f, 5e-4f));
EXPECT_TRUE(
IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141593f), 0.0, 0.0));
EXPECT_FALSE(IsClose(static_cast<T>(1e4f), static_cast<T>(1e-4f)));
TestEdgeCasesClose<T>();
}
TEST(TensorTestUtilTest, ExpectTensorCloseFloat) {
typedef float T;
EXPECT_TRUE(IsClose(1.0f, 1.1f, 0.1f, 0.1f));
EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));
EXPECT_FALSE(IsClose(1.0f, 1.1f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(1.234567f, 1.234567f));
EXPECT_TRUE(IsClose(1.234567f, 1.234568f));
EXPECT_TRUE(IsClose(1.234567f, 1.234566f));
EXPECT_FALSE(IsClose(1.234567f, 1.234569f));
EXPECT_FALSE(IsClose(1.234567f, 1.234565f));
EXPECT_TRUE(IsClose(1.234567f, 1.234569f, 8e-7f, 1e-6f));
EXPECT_TRUE(IsClose(1.234567f, 1.234565f, 3e-7f, 1.5e-6f));
EXPECT_TRUE(IsClose(3.14159265f, 3.14159266f, 0.0f, 0.0f));
EXPECT_FALSE(IsClose(1e8f, 1e-8f));
EXPECT_FALSE(IsClose(1e15f, 1e-15f));
TestEdgeCasesClose<T>();
}
TEST(TensorTestUtilTest, ExpectTensorCloseDouble) {
typedef double T;
EXPECT_TRUE(IsClose(1.0, 1.1, 0.1, 0.1));
EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));
EXPECT_FALSE(IsClose(1.0, 1.1, 0.0, 0.0));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123456));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123457));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123455));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123458));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123454));
EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123459));
EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123453));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123459, 9.5e-16, 1.6e-15));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123453, 7e-16, 2e-15));
EXPECT_TRUE(IsClose(3.141592653589793238, 3.141592653589793239, 0.0, 0.0));
EXPECT_FALSE(IsClose(1e15, 1e-15));
EXPECT_FALSE(IsClose(1e30, 1e-30));
TestEdgeCasesClose<T>();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31c1fe9d-b92e-40d4-9b3a-0cd0e17a79e6 | cpp | tensorflow/tensorflow | dataset | tensorflow/core/framework/dataset.cc | tensorflow/core/framework/dataset_test.cc | #include "tensorflow/core/framework/dataset.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#if defined(PLATFORM_WINDOWS)
#undef GetMessage
#endif
namespace tensorflow {
namespace data {
namespace {
static mutex* get_dataset_op_registry_lock() {
static mutex dataset_op_registry_lock(LINKER_INITIALIZED);
return &dataset_op_registry_lock;
}
static std::unordered_set<string>* get_dataset_op_registry() {
static std::unordered_set<string>* names = new std::unordered_set<string>;
return names;
}
std::string UniqueNodeName(const std::string& base) {
static std::atomic<int64_t> counter(0);
return strings::StrCat(base, "/", counter.fetch_add(1));
}
class DatasetVariantWrapper {
public:
DatasetVariantWrapper() : dataset_(nullptr) {}
explicit DatasetVariantWrapper(DatasetBase* dataset) : dataset_(dataset) {}
DatasetVariantWrapper(const DatasetVariantWrapper& other)
: dataset_(other.dataset_) {
if (dataset_) dataset_->Ref();
}
DatasetVariantWrapper& operator=(DatasetVariantWrapper&& other) {
if (&other == this) return *this;
std::swap(dataset_, other.dataset_);
return *this;
}
DatasetVariantWrapper& operator=(const DatasetVariantWrapper& other) = delete;
~DatasetVariantWrapper() {
if (dataset_) dataset_->Unref();
}
DatasetBase* get() const { return dataset_; }
string TypeName() const { return "tensorflow::DatasetVariantWrapper"; }
string DebugString() const {
if (dataset_) {
return dataset_->DebugString();
} else {
return "<Uninitialized DatasetVariantWrapper>";
}
}
void Encode(VariantTensorData* data) const {
LOG(ERROR) << "The Encode() method is not implemented for "
"DatasetVariantWrapper objects.";
}
bool Decode(const VariantTensorData& data) {
LOG(ERROR) << "The Decode() method is not implemented for "
"DatasetVariantWrapper objects.";
return false;
}
private:
DatasetBase* dataset_;
};
const char kWrappedDatasetVariantTypeName[] =
"tensorflow::data::WrappedDatasetVariant";
class WrappedDatasetVariantWrapper {
public:
WrappedDatasetVariantWrapper() {}
explicit WrappedDatasetVariantWrapper(const Tensor& ds_tensor)
: ds_tensor_(ds_tensor) {}
Tensor get() const { return ds_tensor_; }
string TypeName() const { return "tensorflow::WrappedDatasetVariantWrapper"; }
string DebugString() const {
return "tensorflow::WrappedDatasetVariantWrapper::DebugString";
}
void Encode(VariantTensorData* data) const {
*(data->add_tensors()) = ds_tensor_;
}
bool Decode(const VariantTensorData& data) {
ds_tensor_ = data.tensors(0);
return true;
}
private:
Tensor ds_tensor_;
};
class WrapDatasetVariantOp : public OpKernel {
public:
explicit WrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& tensor = ctx->input(0);
OP_REQUIRES(ctx,
tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()),
errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT."));
DatasetBase* unused;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(tensor, &unused));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));
output->scalar<Variant>()() = WrappedDatasetVariantWrapper(tensor);
}
};
REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant").Device(DEVICE_CPU),
WrapDatasetVariantOp);
REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant")
.HostMemory("input_handle")
.HostMemory("output_handle")
.Device(DEVICE_GPU),
WrapDatasetVariantOp);
class UnwrapDatasetVariantOp : public OpKernel {
public:
explicit UnwrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& tensor = ctx->input(0);
OP_REQUIRES(ctx,
tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()),
errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT."));
Variant variant = tensor.scalar<Variant>()();
const WrappedDatasetVariantWrapper* wrapper =
variant.get<WrappedDatasetVariantWrapper>();
OP_REQUIRES(ctx, wrapper != nullptr,
errors::InvalidArgument(
"Tensor must be a WrappedDataset variant object."));
Tensor ds_tensor = wrapper->get();
OP_REQUIRES_OK(ctx, ctx->set_output("output_handle", ds_tensor));
}
};
REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant").Device(DEVICE_CPU),
UnwrapDatasetVariantOp);
REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant")
.HostMemory("input_handle")
.HostMemory("output_handle")
.Device(DEVICE_GPU),
UnwrapDatasetVariantOp);
static Status WrappedDatasetVariantDeviceCopy(
const WrappedDatasetVariantWrapper& from, WrappedDatasetVariantWrapper* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {
*to = WrappedDatasetVariantWrapper(from);
return absl::OkStatus();
}
#define REGISTER_OPTIONAL_COPY(DIRECTION) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \
WrappedDatasetVariantWrapper, DIRECTION, \
WrappedDatasetVariantDeviceCopy)
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(WrappedDatasetVariantWrapper,
kWrappedDatasetVariantTypeName);
}
Status GraphDefBuilderWrapper::AddDataset(const DatasetBase* dataset,
const std::vector<Node*>& inputs,
Node** output) {
return AddDataset(dataset, inputs, {}, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset, const std::vector<Node*>& inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
Node** output) {
std::vector<std::pair<size_t, Node*>> enumerated_inputs(inputs.size());
for (size_t i = 0; i < inputs.size(); i++) {
enumerated_inputs[i] = std::make_pair(i, inputs[i]);
}
return AddDataset(dataset, enumerated_inputs, {}, attrs, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset,
const std::vector<std::pair<size_t, Node*>>& inputs,
const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
Node** output) {
return AddDataset(dataset, inputs, list_inputs, attrs,
false, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset,
const std::vector<std::pair<size_t, Node*>>& inputs,
const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
bool use_dataset_name, Node** output) {
auto& type_string = dataset->type_string();
auto opts = absl::make_unique<GraphDefBuilder::Options>(b_->opts());
bool has_output_types_attr = HasAttr(type_string, "output_types");
bool has_output_shapes_attr = HasAttr(type_string, "output_shapes");
if (has_output_shapes_attr) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("output_shapes", dataset->output_shapes()));
}
if (has_output_types_attr) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("output_types", dataset->output_dtypes()));
}
bool has_metadata_attr = HasAttr(type_string, "metadata");
if (has_metadata_attr) {
std::string serialized_metadata;
dataset->metadata().SerializeToString(&serialized_metadata);
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("metadata", serialized_metadata));
}
for (const auto& attr : attrs) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr(attr.first, attr.second));
}
if (opts->HaveError()) {
return errors::Internal("AddDataset: Failed to build Options with error ",
opts->StatusToString());
}
NodeBuilder node_builder(
use_dataset_name ? dataset->node_name() : opts->GetNameForOp(type_string),
type_string, opts->op_registry());
{
size_t total_size = inputs.size() + list_inputs.size();
auto inputs_iter = inputs.begin();
auto list_inputs_iter = list_inputs.begin();
for (int i = 0; i < total_size; i++) {
if (inputs_iter != inputs.end() && inputs_iter->first == i) {
node_builder.Input(NodeBuilder::NodeOut(inputs_iter->second));
inputs_iter++;
} else if (list_inputs_iter != list_inputs.end() &&
list_inputs_iter->first == i) {
std::vector<NodeBuilder::NodeOut> nodeout_inputs;
nodeout_inputs.reserve(list_inputs_iter->second.size());
for (Node* n : list_inputs_iter->second) {
nodeout_inputs.emplace_back(n);
}
node_builder.Input(nodeout_inputs);
list_inputs_iter++;
} else {
return errors::InvalidArgument("No input found for index ", i);
}
}
}
*output = opts->FinalizeBuilder(&node_builder);
if (*output == nullptr) {
return errors::Internal("AddDataset: Failed to build ", type_string,
" op with error ", opts->StatusToString());
}
return absl::OkStatus();
}
Status GraphDefBuilderWrapper::AddFunction(
SerializationContext* ctx, const string& function_name,
const FunctionLibraryDefinition& lib_def) {
if (b_->HasFunction(function_name)) {
VLOG(1) << "Function with name " << function_name << "already exists in"
<< " the graph. It will not be added again.";
return absl::OkStatus();
}
const FunctionDef* f_def = lib_def.Find(function_name);
if (f_def == nullptr) {
return errors::InvalidArgument("Unable to find FunctionDef for ",
function_name, " in the registry.");
}
FunctionDefLibrary def;
*def.add_function() = *f_def;
const string gradient_func = lib_def.FindGradient(function_name);
if (!gradient_func.empty()) {
GradientDef* g_def = def.add_gradient();
g_def->set_function_name(function_name);
g_def->set_gradient_func(gradient_func);
}
TF_RETURN_IF_ERROR(b_->AddFunctionLibrary(def));
for (const NodeDef& node_def : f_def->node_def()) {
const OpRegistrationData* op_reg_data = nullptr;
TF_RETURN_IF_ERROR(lib_def.LookUp(node_def.op(), &op_reg_data));
if (op_reg_data->is_function_op) {
TF_RETURN_IF_ERROR(AddFunction(ctx, op_reg_data->op_def.name(), lib_def));
}
for (const auto& pair : node_def.attr()) {
TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, pair.second, lib_def));
}
}
for (auto iter = f_def->attr().begin(); iter != f_def->attr().end(); iter++) {
TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, iter->second, lib_def));
}
return absl::OkStatus();
}
void GraphDefBuilderWrapper::AddPlaceholderInternal(const Tensor& val,
Node** output) {
*output = ops::SourceOp(
"Placeholder",
b_->opts().WithAttr("dtype", val.dtype()).WithAttr("shape", val.shape()));
}
void GraphDefBuilderWrapper::AddTensorInternal(const Tensor& val,
Node** output) {
*output = ops::SourceOp(
"Const",
b_->opts().WithAttr("dtype", val.dtype()).WithAttr("value", val));
}
bool GraphDefBuilderWrapper::HasAttr(const string& name,
const string& attr_name) const {
const OpDef* op_def = nullptr;
Status s = b_->opts().op_registry()->LookUpOpDef(name, &op_def);
if (!s.ok() || op_def == nullptr) {
return false;
}
return HasAttr(op_def, attr_name);
}
int32_t GetRunnerThreadpoolSizeFromOpKernelContext(OpKernelContext* ctx) {
thread::ThreadPool* thread_pool =
ctx->device()->tensorflow_device_thread_pool();
if (thread_pool) {
return thread_pool->NumThreads();
} else {
static const int32_t kDefaultRunnerThreadpoolSize = port::MaxParallelism();
return kDefaultRunnerThreadpoolSize;
}
}
int64_t MemoryCheckpoint::IdRegistry::Add(const std::string& prefix,
const std::string& key) {
mutex_lock l(mu_);
auto pair = std::make_pair(prefix, key);
if (string_to_int_.contains(pair)) {
return string_to_int_[pair];
}
int64_t id = next_id_++;
int_to_string_[id] = pair;
string_to_int_[pair] = id;
return id;
}
std::vector<int64_t> MemoryCheckpoint::IdRegistry::GetMatchingIds(
const std::string& prefix_to_match) {
mutex_lock l(mu_);
std::vector<int64_t> ids;
for (const auto& [pair, id] : string_to_int_) {
auto [prefix, key] = pair;
if (prefix.compare(0, prefix_to_match.length(), prefix_to_match) == 0) {
ids.push_back(id);
}
}
return ids;
}
std::pair<std::string, std::string> MemoryCheckpoint::IdRegistry::Get(
int64_t id) {
mutex_lock l(mu_);
auto result = int_to_string_.find(id);
DCHECK(result != int_to_string_.end())
<< "Failed find id " << id << " in IdRegistry. "
<< "Max id is: " << next_id_ - 1;
return result->second;
}
void MemoryCheckpoint::IdRegistry::RemoveIds(const std::vector<int64_t>& ids) {
mutex_lock l(mu_);
for (const auto& id : ids) {
string_to_int_.erase(int_to_string_[id]);
int_to_string_.erase(id);
}
}
std::string MemoryCheckpoint::DebugString() const {
std::string result = absl::StrCat("status=", status_.ToString(),
", "
"root=",
(is_root_ ? "true" : "false"), "\n");
absl::StrAppend(&result, "number of integers: ", int_values_.size(), "\n");
for (const auto& [k, v] : int_values_) {
absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":",
id_registry_->Get(k).second, ": ", v, "\n");
}
absl::StrAppend(&result, "number of strings: ", str_values_.size(), "\n");
for (const auto& [k, v] : str_values_) {
absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":",
id_registry_->Get(k).second, ": ", v, "\n");
}
absl::StrAppend(&result, "number of tensors: ", tensor_values_.size(), "\n");
absl::StrAppend(
&result, "number of expired prefixes: ", expired_prefixes_.size(), "\n");
return result;
}
void MemoryCheckpoint::Merge(MemoryCheckpoint* other) {
if (!status_.ok()) {
return;
}
if (!other->status_.ok()) {
status_ = other->status_;
int_values_.clear();
str_values_.clear();
tensor_values_.clear();
}
for (const auto& [k, v] : other->int_values_) {
int_values_[k] = v;
}
for (const auto& [k, v] : other->str_values_) {
str_values_[k] = v;
}
for (const auto& [k, v] : other->tensor_values_) {
tensor_values_[k] = v;
}
for (const auto& prefix : other->expired_prefixes_) {
Purge(prefix);
}
other->expired_prefixes_.clear();
VLOG(5) << "MemoryCheckpoint::Merge " << DebugString();
}
void MemoryCheckpoint::Purge(const std::string& prefix) {
std::vector<int64_t> ids = id_registry_->GetMatchingIds(prefix);
for (const auto& id : ids) {
int_values_.erase(id);
str_values_.erase(id);
tensor_values_.erase(id);
}
if (!is_root_) {
expired_prefixes_.insert(prefix);
} else {
id_registry_->RemoveIds(ids);
}
}
Status MemoryCheckpoint::Save(IteratorStateWriter* writer) const {
for (const auto& [id, value] : int_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));
}
for (const auto& [id, value] : str_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));
}
for (const auto& [id, value] : tensor_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteTensor(prefix, key, value));
}
return absl::OkStatus();
}
Status IteratorBase::InitializeBase(IteratorContext* ctx,
const IteratorBase* parent) {
parent_ = parent;
id_ =
Hash64CombineUnordered(Hash64(prefix()), reinterpret_cast<uint64>(this));
if (parent_) {
parent_id_ = Hash64CombineUnordered(Hash64(parent_->prefix()),
reinterpret_cast<uint64>(parent_));
if (const auto& model = ctx->model()) {
auto factory = [ctx, this](model::Node::Args args) {
return CreateNode(ctx, std::move(args));
};
model->AddNode(std::move(factory), prefix(), parent->model_node(),
&node_);
cleanup_fns_.push_back([this, model]() { model->RemoveNode(node_); });
}
}
return absl::OkStatus();
}
Status GetCompressedElementFromVariantTensor(
const Tensor& tensor, const CompressedElement** out_compressed_element) {
if (!(tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()))) {
return errors::InvalidArgument(
"`CompressedElement` tensor must be a scalar of dtype `DT_VARIANT`.");
}
const Variant& variant = tensor.scalar<Variant>()();
const CompressedElement* compressed_element =
variant.get<CompressedElement>();
if (compressed_element == nullptr) {
return errors::InvalidArgument(
"Tensor must be a `CompressedElement` object.");
}
*out_compressed_element = compressed_element;
return absl::OkStatus();
}
int64_t GetAllocatedBytes(const std::vector<Tensor>& element) {
int64_t allocated_bytes = 0;
for (auto& tensor : element) {
if (tensor.dtype() == DT_VARIANT) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {
allocated_bytes += dataset->AllocatedBytes();
continue;
}
const CompressedElement* compressed_element;
if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)
.ok()) {
allocated_bytes += compressed_element->ByteSizeLong();
continue;
}
}
allocated_bytes += tensor.AllocatedBytes();
}
return allocated_bytes;
}
int64_t GetTotalBytes(const std::vector<Tensor>& element) {
int64_t total_bytes = 0;
for (auto& tensor : element) {
if (tensor.dtype() == DT_VARIANT) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {
total_bytes += dataset->TotalBytes();
continue;
}
const CompressedElement* compressed_element;
if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)
.ok()) {
total_bytes += compressed_element->ByteSizeLong();
continue;
}
}
total_bytes += tensor.TotalBytes();
}
return total_bytes;
}
std::string FullName(const std::string& prefix, const std::string& name) {
if (absl::StrContains(name, kColon)) {
LOG(ERROR) << name << " should not contain " << kColon;
}
return strings::StrCat(kFullNameRandomHex, kPipe, prefix, kColon, name);
}
Status ExtractIteratorPrefix(StringPiece key, string* prefix) {
if (!absl::StartsWith(key, data::kFullNameRandomHex)) {
return errors::InvalidArgument("Key: ", key,
" was not generated using full_name.");
}
std::vector<string> split_keys = str_util::Split(key, data::kPipe);
if (split_keys.size() != 2) {
return errors::InvalidArgument("Key: ", key,
" was not generated using full_name.");
}
string real_key = split_keys[1];
const int pos = real_key.rfind(kColon);
*prefix = real_key.substr(0, pos);
return absl::OkStatus();
}
Status GetDatasetFromVariantTensor(const Tensor& tensor,
DatasetBase** out_dataset) {
if (!(tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()))) {
return errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT.");
}
const Variant& variant = tensor.scalar<Variant>()();
const DatasetVariantWrapper* wrapper = variant.get<DatasetVariantWrapper>();
if (wrapper == nullptr) {
return errors::InvalidArgument("Tensor must be a Dataset object.");
}
*out_dataset = wrapper->get();
if (*out_dataset == nullptr) {
return errors::Internal("Read uninitialized Dataset variant.");
}
return absl::OkStatus();
}
Status StoreDatasetInVariantTensor(DatasetBase* dataset, Tensor* tensor) {
if (!(tensor->dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor->shape()))) {
return errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT.");
}
tensor->scalar<Variant>()() = DatasetVariantWrapper(dataset);
return absl::OkStatus();
}
namespace internal {
#define WARN_PROTO_FIELD_CONFLICT(reflection, field, field_type, src, dst) \
{ \
auto source_value = reflection->Get##field_type(src, field); \
auto destination_value = reflection->Get##field_type(*dst, field); \
if (source_value != destination_value) { \
LOG(WARNING) << "Changing the value of option field " << field->name() \
<< " from " << destination_value << " to " << source_value; \
} \
}
#define WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst) \
{ \
auto source_value = reflection->GetEnum(src, field); \
auto destination_value = reflection->GetEnum(*dst, field); \
if (source_value != destination_value) { \
LOG(WARNING) << "Changing the value of option enum field " \
<< field->name() << " from " \
<< destination_value->full_name() << " to " \
<< source_value->full_name(); \
} \
}
void WarnProtoConflicts(const protobuf::Message& src, protobuf::Message* dst) {
std::vector<const protobuf::FieldDescriptor*> set_src;
std::vector<const protobuf::FieldDescriptor*> set_dst;
const protobuf::Reflection* reflection = src.GetReflection();
reflection->ListFields(src, &set_src);
reflection->ListFields(*dst, &set_dst);
std::sort(set_src.begin(), set_src.end());
std::sort(set_dst.begin(), set_dst.end());
std::vector<const protobuf::FieldDescriptor*> in_both;
std::set_intersection(set_src.begin(), set_src.end(), set_dst.begin(),
set_dst.end(), std::back_inserter(in_both));
for (auto field : in_both) {
if (field->name() == "framework_type") {
continue;
}
if (field->type() == protobuf::FieldDescriptor::TYPE_MESSAGE) {
WarnProtoConflicts(reflection->GetMessage(src, field),
reflection->MutableMessage(dst, field));
} else {
switch (field->cpp_type()) {
case protobuf::FieldDescriptor::CPPTYPE_INT32:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Int32, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_INT64:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Int64, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_UINT32:
WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt32, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_UINT64:
WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt64, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Double, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_FLOAT:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Float, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_BOOL:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Bool, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_ENUM:
WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst);
break;
default: {
LOG(ERROR) << "Unrecognized proto type for field "
<< field->full_name();
}
}
}
}
}
#undef WARN_PROTO_ENUM_FIELD_CONFLICT
#undef WARN_PROTO_FIELD_CONFLICT
void MergeOptions(const protobuf::Message& source,
protobuf::Message* destination) {
WarnProtoConflicts(source, destination);
destination->MergeFrom(source);
}
void MergeOptions(const protobuf::MessageLite& source,
protobuf::MessageLite* destination) {
destination->CheckTypeAndMergeFrom(source);
}
}
void DatasetBase::Initialize(const Metadata& metadata) {
Status s = ComputeNumSources();
if (!s.ok()) {
LOG_EVERY_N_SEC(ERROR, 10) << s;
}
s = MergeOptionsFromInputs();
if (!s.ok()) {
LOG_EVERY_N_SEC(ERROR, 10) << s;
}
metadata_ = metadata;
if (metadata_.name() == "") {
static std::atomic<int64_t> id_counter(0);
*metadata_.mutable_name() =
strings::StrCat(type_string(), ":", id_counter.fetch_add(1));
}
}
Status DatasetBase::ComputeNumSources() {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return s;
}
if (num_sources_ >= 0) {
return absl::OkStatus();
}
num_sources_ = 0;
if (inputs.empty()) {
num_sources_ = 1;
return absl::OkStatus();
}
for (const auto& input : inputs) {
if (input->num_sources() < 0) {
return errors::FailedPrecondition(
"Cannot compute input sources for dataset of type ", type_string(),
", because sources could not be computed for input dataset of type ",
input->type_string());
}
num_sources_ += input->num_sources();
}
return absl::OkStatus();
}
Status DatasetBase::CheckRandomAccessCompatible(const int64 index) const {
CardinalityOptions options;
options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
int64 cardinality = Cardinality(options);
if (cardinality == kInfiniteCardinality ||
cardinality == kUnknownCardinality) {
return tensorflow::errors::FailedPrecondition(
"Dataset of type ", this->DebugString(), " has ",
cardinality == kInfiniteCardinality ? "infinite" : "unknown",
" cardinality, which does not support random access.");
}
if (index < 0 || index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
return absl::OkStatus();
}
Status DatasetBase::Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
return errors::Unimplemented("Random access is not implemented for dataset ",
DebugString());
}
Status DatasetBase::Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
return errors::Unimplemented("Random access is not implemented for dataset ",
DebugString());
}
absl::StatusOr<DatasetBase*> DatasetBase::Finalize(
OpKernelContext* ctx,
std::function<absl::StatusOr<core::RefCountPtr<DatasetBase>>()>
make_finalized_dataset) const {
mutex_lock l(mu_);
if (!finalized_dataset_) {
TF_ASSIGN_OR_RETURN(finalized_dataset_, make_finalized_dataset());
}
return finalized_dataset_.get();
}
Status DatasetBase::MergeOptionsFromInputs() {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return s;
}
if (inputs.empty()) {
return absl::OkStatus();
}
Options merged_options = inputs[0]->options_;
for (int i = 1; i < inputs.size(); ++i) {
internal::MergeOptions(inputs[i]->options_, &merged_options);
}
internal::MergeOptions(options_, &merged_options);
options_ = merged_options;
return absl::OkStatus();
}
Status DatasetBase::MakeIterator(
IteratorContext* ctx, const IteratorBase* parent,
const string& output_prefix,
std::unique_ptr<IteratorBase>* iterator) const {
if (type_string() == "OptionsDataset" || type_string() == "FinalizeDataset") {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
return inputs[0]->MakeIterator(ctx, parent, output_prefix, iterator);
}
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
strings::StrCat("MakeIterator::", type_string()), {});
},
tsl::profiler::TraceMeLevel::kInfo);
*iterator = MakeIteratorInternal(output_prefix);
Status s = (*iterator)->InitializeBase(ctx, parent);
if (s.ok()) {
s.Update((*iterator)->Initialize(ctx));
ctx->SaveCheckpoint(iterator->get());
}
if (!s.ok()) {
iterator->reset();
}
return s;
}
Status DatasetBase::MakeSplitProviders(
std::vector<std::unique_ptr<SplitProvider>>* split_providers) const {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return errors::Unimplemented(
"Cannot create split providers for dataset of type ", type_string(),
", because the dataset implements neither `InputDatasets` nor "
"`MakeSplitProvider`.");
}
if (inputs.size() != 1) {
return errors::Unimplemented(
"Cannot create split providers for dataset of type ", type_string(),
", because the dataset is not unary (instead having arity ",
inputs.size(),
"), and no custom implementation of `MakeSplitProvider` is defined.");
}
return inputs[0]->MakeSplitProviders(split_providers);
}
std::optional<int64_t> DatasetBase::GetEstimatedElementSize() const {
const auto& shapes = output_shapes();
const auto& dtypes = output_dtypes();
if (shapes.size() != dtypes.size()) {
LOG(ERROR) << "This should not happen because the sizes of output_shapes() "
"and output_dtypes() should always be "
"the same.";
return std::nullopt;
}
size_t num_outputs = shapes.size();
int64_t element_size = 0;
for (int i = 0; i < num_outputs; ++i) {
const auto& partial_shape = shapes[i];
const auto& dtype = dtypes[i];
auto num_elements = partial_shape.num_elements();
if (num_elements == -1) {
return std::nullopt;
}
element_size += num_elements * DataTypeSize(dtype);
}
return element_size;
}
int64_t DatasetBase::Cardinality() const {
mutex_lock l(cardinality_mu_);
if (cardinality_ == kUnknownCardinality) {
CardinalityOptions options;
cardinality_ = CardinalityInternal(options);
}
return cardinality_;
}
int64_t DatasetBase::Cardinality(CardinalityOptions options) const {
mutex_lock l(cardinality_mu_);
if (cardinality_ == kUnknownCardinality) {
cardinality_ = CardinalityInternal(options);
}
return cardinality_;
}
Status DatasetBase::InputDatasets(
std::vector<const DatasetBase*>* inputs) const {
return errors::Unimplemented(
"Cannot compute input sources for dataset of type ", type_string(),
", because the dataset does not implement `InputDatasets`. To fix this, "
"your dataset should override the `InputDatasets` method. If it is a "
"source dataset, it should return empty inputs.");
}
Status DatasetBase::DatasetGraphDefBuilder::AddInputDataset(
SerializationContext* ctx, const DatasetBase* dataset, Node** output) {
Status status = dataset->AsGraphDefInternal(ctx, this, output);
if (ctx->is_graph_rewrite()) {
if (status.ok()) {
(*output)->AddAttr(kCardinalityAttrForRewrite, dataset->Cardinality());
} else if (errors::IsUnimplemented(status)) {
Tensor t(DT_VARIANT, TensorShape({}));
dataset->Ref();
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(const_cast<DatasetBase*>(dataset), &t));
TF_RETURN_IF_ERROR(AddPlaceholder(t, output));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back((*output)->name(), std::move(t));
LOG_EVERY_N_SEC(WARNING, 30)
<< "Input of " << dataset->DebugString()
<< " will not be optimized because the dataset does not implement "
"the "
"AsGraphDefInternal() method needed to apply optimizations.";
return absl::OkStatus();
}
}
return status;
}
Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensor(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.dtype() == DT_VARIANT) {
Status s = AddDatasetOrTensorHelper(ctx, t, output);
if (s.ok()) {
return s;
}
}
if (t.dtype() == DT_RESOURCE && !ctx->is_graph_rewrite()) {
Status s = AddResourceHelper(ctx, t, output);
if (!errors::IsUnimplemented(s)) {
return s;
}
}
return AddTensor(t, output);
}
Status DatasetBase::DatasetGraphDefBuilder::AddIdentity(
SerializationContext* ctx, const std::string& name_prefix, Node** input,
Node** output) {
*output =
ops::UnaryOp("Identity", *input,
builder()->opts().WithName(UniqueNodeName(name_prefix)));
return absl::OkStatus();
}
Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensorHelper(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.dims() == 0) {
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(t, &dataset));
return AddInputDataset(ctx, dataset, output);
}
std::vector<NodeBuilder::NodeOut> nodes;
for (int i = 0; i < t.dim_size(0); ++i) {
Node* node;
TF_RETURN_IF_ERROR(AddDatasetOrTensorHelper(ctx, t.SubSlice(i), &node));
nodes.emplace_back(node);
}
auto op_name = "Pack";
auto opts = builder()->opts();
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(nodes));
*output = opts.FinalizeBuilder(&node_builder);
return absl::OkStatus();
}
Status DatasetBase::DatasetGraphDefBuilder::AddResourceHelper(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.NumElements() == 0) {
return errors::InvalidArgument("Empty resouce handle");
}
const ResourceHandle& handle = t.flat<ResourceHandle>()(0);
if (ctx->device_name() != handle.device()) {
return errors::InvalidArgument("Trying to access resource ", handle.name(),
" located in device ", handle.device(),
" from device ", ctx->device_name());
}
ResourceBase* resource;
TF_RETURN_IF_ERROR(ctx->resource_mgr()->Lookup(handle, &resource));
core::ScopedUnref unref(resource);
return resource->AsGraphDef(builder(), output);
}
DatasetBaseIterator::DatasetBaseIterator(const BaseParams& params)
: params_(params) {
params_.dataset->Ref();
VLOG(2) << prefix() << " constructor";
strings::StrAppend(&traceme_metadata_, "name=", dataset()->metadata().name());
strings::StrAppend(&traceme_metadata_, ",shapes=");
auto& shapes = output_shapes();
for (int i = 0; i < shapes.size(); ++i) {
if (i > 0) {
strings::StrAppend(&traceme_metadata_, " ");
}
strings::StrAppend(&traceme_metadata_, shapes.at(i).DebugString());
}
strings::StrAppend(&traceme_metadata_, ",types=");
auto& types = output_dtypes();
for (int i = 0; i < types.size(); ++i) {
if (i > 0) {
strings::StrAppend(&traceme_metadata_, " ");
}
strings::StrAppend(&traceme_metadata_, DataTypeString(types.at(i)));
}
}
DatasetBaseIterator::~DatasetBaseIterator() {
VLOG(2) << prefix() << " destructor";
params_.dataset->Unref();
}
string DatasetBaseIterator::BuildTraceMeName() {
string result =
strings::StrCat(params_.prefix, "#", traceme_metadata_, ",id=", id_);
if (parent_) {
strings::StrAppend(&result, ",parent_id=", parent_id_);
}
TraceMeMetadata metadata = GetTraceMeMetadata();
for (const auto& pair : metadata) {
strings::StrAppend(&result, ",", pair.first, "=", pair.second);
}
if (model_node() != nullptr) {
if (model_node()->buffered_elements() > 0) {
strings::StrAppend(
&result, ",buffered_elements=",
static_cast<long long>(model_node()->buffered_elements()));
strings::StrAppend(
&result, ",buffered_bytes_MB=",
static_cast<long long>(
static_cast<double>(model_node()->buffered_bytes()) * 1e-6));
}
}
strings::StrAppend(&result, "#");
return result;
}
Status DatasetBaseIterator::GetNext(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
activity_watcher::ActivityScope activity_scope([&]() {
activity_watcher::Activity::Attributes attributes;
attributes["iterator_prefix"] = prefix();
return std::make_unique<activity_watcher::Activity>(
"Iterator::GetNext", activity_watcher::ActivityCategory::kDatasetOp,
std::move(attributes));
});
tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << prefix() << " GetNext enter";
auto model = ctx->model();
bool output_was_recording =
node_ && node_->output() && node_->output()->is_recording();
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
if (output_was_recording) {
node_->output()->record_stop(now_nanos);
}
node_->record_start(now_nanos);
}
out_tensors->clear();
Status s = GetNextInternal(ctx, out_tensors, end_of_sequence);
ctx->SaveCheckpoint(this);
if (!SymbolicCheckpointCompatible()) {
ctx->UpdateCheckpointStatus([this]() {
return errors::Unimplemented(dataset()->type_string(),
" does not support symbolic checkpointing.");
});
}
if (TF_PREDICT_TRUE(s.ok())) {
if (TF_PREDICT_TRUE(!*end_of_sequence)) {
if (TF_PREDICT_FALSE(out_tensors->size() !=
dataset()->output_dtypes().size())) {
return errors::Internal("Expected ", dataset()->output_dtypes().size(),
" components but got ", out_tensors->size(),
".");
}
RecordElement(ctx, out_tensors);
} else {
out_tensors->clear();
}
}
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
node_->record_stop(now_nanos);
if (output_was_recording) {
node_->output()->record_start(now_nanos);
}
}
if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {
s = errors::Internal("Iterator \"", params_.prefix,
"\" returned `OutOfRange`. This indicates an "
"implementation error as `OutOfRange` errors are not "
"expected to be returned here. Original message: ",
s.message());
LOG(ERROR) << s;
}
DVLOG(3) << prefix() << " GetNext exit";
return s;
}
Status DatasetBaseIterator::Skip(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) {
tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << prefix() << " Skip enter";
auto model = ctx->model();
bool output_was_recording =
node_ && node_->output() && node_->output()->is_recording();
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
auto output = node_->output();
if (output_was_recording) {
output->record_stop(now_nanos);
}
node_->record_start(now_nanos);
}
Status s = SkipInternal(ctx, num_to_skip, end_of_sequence, num_skipped);
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
node_->record_stop(now_nanos);
auto output = node_->output();
if (output_was_recording) {
output->record_start(now_nanos);
}
}
if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {
s = errors::Internal("Iterator \"", params_.prefix,
"\" returned `OutOfRange`. This indicates an "
"implementation error as `OutOfRange` errors are not "
"expected to be returned here. Original message: ",
s.message());
LOG(ERROR) << s;
}
DVLOG(3) << prefix() << " Skip exit";
return s;
}
Status DatasetBaseIterator::SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence,
int* num_skipped) {
*num_skipped = 0;
for (int i = 0; i < num_to_skip; ++i) {
std::vector<Tensor> out_tensors;
TF_RETURN_IF_ERROR(GetNextInternal(ctx, &out_tensors, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
RecordElement(ctx, &out_tensors);
(*num_skipped)++;
}
return absl::OkStatus();
}
void DatasetOpKernel::Compute(OpKernelContext* ctx) {
DatasetBase* dataset = nullptr;
MakeDataset(ctx, &dataset);
if (ctx->status().ok()) {
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES_OK(ctx, StoreDatasetInVariantTensor(dataset, output));
if (ctx->stack_trace().has_value() && VLOG_IS_ON(4)) {
VLOG(4) << "Dataset " << dataset->type_string()
<< " created using the following stack trace:";
for (const auto& stack_frame : ctx->stack_trace()->ToStackFrames(
{}, {}, false, -1)) {
VLOG(4) << stack_frame.file_name << ":" << stack_frame.line_number
<< " in " << stack_frame.function_name << "()";
}
}
dataset->Initialize(metadata_);
}
}
string DatasetOpKernel::TraceString(const OpKernelContext& ctx,
bool verbose) const {
return tsl::profiler::TraceMeOp(name_view(), type_string_view());
}
bool DatasetOpKernel::IsDatasetOp(const OpDef& op_def) {
if (op_def.output_arg_size() != 1) return false;
if (op_def.output_arg(0).type() != DT_VARIANT) return false;
absl::string_view op_name = op_def.name();
std::vector<std::string> v1, v2;
if (absl::StartsWith(op_name, "__wrapped__")) {
v1 = absl::StrSplit(op_name, "__wrapped__", absl::SkipEmpty());
if (v1.empty()) return false;
v2 = absl::StrSplit(v1[0], "_", absl::SkipEmpty());
op_name = v2.empty() ? v1[0] : v2[0];
}
if (op_name == "DatasetFromGraph") return true;
if (absl::EndsWith(op_name, "Dataset")) return true;
size_t index = op_name.length() - 1;
while (index >= 0 && isdigit(op_name[index])) {
index--;
}
constexpr absl::string_view kDatasetPrefix = "DatasetV";
constexpr absl::string_view::size_type kPrefixLength = kDatasetPrefix.size();
if (index < kPrefixLength - 1 || index == op_name.length() - 1) return false;
return op_name.substr(index - kPrefixLength + 1, kPrefixLength) ==
kDatasetPrefix;
}
void UnaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
MakeDataset(ctx, input, output);
}
void BinaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
DatasetBase* another_input;
OP_REQUIRES_OK(ctx,
GetDatasetFromVariantTensor(ctx->input(1), &another_input));
MakeDataset(ctx, input, another_input, output);
}
const char DatasetBase::kDatasetGraphKey[] = "_DATASET_GRAPH";
const char DatasetBase::kDatasetGraphOutputNodeKey[] =
"_DATASET_GRAPH_OUTPUT_NODE";
BackgroundWorker::BackgroundWorker(Env* env, const char* name)
: env_(env), name_(name) {}
BackgroundWorker::~BackgroundWorker() {
{
mutex_lock l(mu_);
cancelled_ = true;
}
cond_var_.notify_one();
thread_.reset();
}
void BackgroundWorker::Schedule(std::function<void()> work_item) {
{
mutex_lock l(mu_);
if (!thread_) {
thread_ = absl::WrapUnique(env_->StartThread(
{} , name_, [this]() { WorkerLoop(); }));
}
work_queue_.push_back(std::move(work_item));
}
cond_var_.notify_one();
}
void BackgroundWorker::WorkerLoop() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "Background");
while (true) {
std::function<void()> work_item = nullptr;
{
mutex_lock l(mu_);
while (!cancelled_ && work_queue_.empty()) {
cond_var_.wait(l);
}
if (cancelled_) {
return;
}
DCHECK(!work_queue_.empty());
work_item = std::move(work_queue_.front());
work_queue_.pop_front();
}
DCHECK(work_item != nullptr);
work_item();
}
}
namespace {
class RunnerImpl : public Runner {
public:
void Run(const std::function<void()>& f) override {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "Runner");
f();
PreventTailCall();
}
private:
virtual void PreventTailCall() {}
};
}
Runner* Runner::get() {
static Runner* singleton = new RunnerImpl;
return singleton;
}
}
} | #include "tensorflow/core/framework/dataset.h"
#include <memory>
#include <tuple>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
TEST(DatasetTest, FullName) {
EXPECT_EQ(FullName("prefix", "name"),
"60d899aa0d8ce4351e7c3b419e92d25b|prefix:name");
}
enum DataTypeTest {
_tf_int_32,
_tf_int_64,
_tf_float_,
_tf_double_,
_tf_string_
};
struct DatasetTestParam {
const DataTypeTest type;
std::function<std::vector<Tensor>()> tensor_factory;
const int64_t expected_bytes;
};
class DatasetTestTotalBytes
: public ::testing::TestWithParam<DatasetTestParam> {};
TEST_P(DatasetTestTotalBytes, TestTotalBytes) {
const DatasetTestParam& test_case = GetParam();
if (test_case.type == _tf_string_) {
EXPECT_LE(GetTotalBytes(test_case.tensor_factory()),
test_case.expected_bytes);
} else {
EXPECT_EQ(GetTotalBytes(test_case.tensor_factory()),
test_case.expected_bytes);
}
}
std::vector<Tensor> tensor_tf_int_32s() {
return {test::AsTensor<int32>({1, 2, 3, 4, 5}),
test::AsTensor<int32>({1, 2, 3, 4})};
}
std::vector<Tensor> tensor_tf_int_64s() {
return {test::AsTensor<int64_t>({1, 2, 3, 4, 5}),
test::AsTensor<int64_t>({10, 12})};
}
std::vector<Tensor> tensor_tf_float_s() {
return {test::AsTensor<float>({1.0, 2.0, 3.0, 4.0})};
}
std::vector<Tensor> tensor_tf_double_s() {
return {test::AsTensor<double>({100.0}), test::AsTensor<double>({200.0}),
test::AsTensor<double>({400.0}), test::AsTensor<double>({800.0})};
}
const tstring str = "test string";
std::vector<Tensor> tensor_strs() { return {test::AsTensor<tstring>({str})}; }
INSTANTIATE_TEST_SUITE_P(
DatasetTestTotalBytes, DatasetTestTotalBytes,
::testing::ValuesIn(std::vector<DatasetTestParam>{
{_tf_int_32, tensor_tf_int_32s, 4 * 9 },
{_tf_int_64, tensor_tf_int_64s, 8 * 7 },
{_tf_float_, tensor_tf_float_s, 4 * 4 },
{_tf_double_, tensor_tf_double_s, 8 * 4 },
{_tf_string_, tensor_strs,
static_cast<int64_t>(sizeof(str) + str.size()) }}));
struct MergeOptionsTestParam {
const std::string source;
const std::string destination;
const std::string expected;
};
class MergeOptionsTest
: public ::testing::TestWithParam<MergeOptionsTestParam> {};
TEST_P(MergeOptionsTest, MergeOptions) {
const MergeOptionsTestParam& test_case = GetParam();
Options source;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.source,
&source));
Options destination;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.destination,
&destination));
Options expected;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.expected,
&expected));
internal::MergeOptions(source, &destination);
EXPECT_EQ(expected.SerializeAsString(), destination.SerializeAsString());
}
INSTANTIATE_TEST_SUITE_P(
MergeOptionsTest, MergeOptionsTest,
::testing::ValuesIn(std::vector<MergeOptionsTestParam>{
{"deterministic: false", "",
"deterministic: false"},
{"deterministic: false",
"deterministic: false",
"deterministic: false"},
{"deterministic: false",
"deterministic: true",
"deterministic: false"},
{"external_state_policy: POLICY_IGNORE",
"external_state_policy: POLICY_FAIL",
"external_state_policy: POLICY_IGNORE"}}));
TEST(DatasetTest, IsDatasetOp) {
OpDef op_def;
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
op_def.add_output_arg()->set_type(DT_STRING);
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
op_def.mutable_output_arg(0)->set_type(DT_VARIANT);
op_def.set_name("Identity");
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
for (const auto& name : {"Dataset", "RangeDataset", "MapDatasetV1",
"ParallelInterleaveDatasetV42",
"DataServiceDatasetV1000", "DatasetFromGraph"}) {
op_def.set_name(name);
EXPECT_TRUE(DatasetOpKernel::IsDatasetOp(op_def));
}
}
TEST(DatasetTest, IdRegistry) {
MemoryCheckpoint::IdRegistry id_registry;
auto id_1 = id_registry.Add("foo", "key_1");
auto id_2 = id_registry.Add("foo:bar", "key_2");
auto id_3 = id_registry.Add("foo:bar:baz", "key_3");
auto [prefix_1, key_1] = id_registry.Get(id_1);
EXPECT_EQ(prefix_1, "foo");
EXPECT_EQ(key_1, "key_1");
auto [prefix_2, key_2] = id_registry.Get(id_2);
EXPECT_EQ(prefix_2, "foo:bar");
EXPECT_EQ(key_2, "key_2");
auto [prefix_3, key_3] = id_registry.Get(id_3);
EXPECT_EQ(prefix_3, "foo:bar:baz");
EXPECT_EQ(key_3, "key_3");
auto matching_ids = id_registry.GetMatchingIds("hello");
EXPECT_EQ(matching_ids.size(), 0);
matching_ids = id_registry.GetMatchingIds("foo:bar:baz");
EXPECT_EQ(matching_ids.size(), 1);
matching_ids = id_registry.GetMatchingIds("foo:bar");
EXPECT_EQ(matching_ids.size(), 2);
matching_ids = id_registry.GetMatchingIds("foo");
EXPECT_EQ(matching_ids.size(), 3);
matching_ids = id_registry.GetMatchingIds("f");
EXPECT_EQ(matching_ids.size(), 3);
absl::flat_hash_set<int64_t> matching_ids_set(matching_ids.begin(),
matching_ids.end());
EXPECT_TRUE(matching_ids_set.contains(id_1));
EXPECT_TRUE(matching_ids_set.contains(id_2));
EXPECT_TRUE(matching_ids_set.contains(id_3));
id_registry.RemoveIds(matching_ids);
matching_ids = id_registry.GetMatchingIds("foo");
EXPECT_EQ(matching_ids.size(), 0);
}
TEST(DatasetTest, MemoryCheckpointWrites) {
std::shared_ptr<MemoryCheckpoint::IdRegistry> id_registry =
std::make_shared<MemoryCheckpoint::IdRegistry>();
MemoryCheckpoint memory_checkpoint(id_registry);
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_EXPECT_OK(memory_checkpoint.WriteScalar("name_foo", "key_bar", 5));
TF_EXPECT_OK(
memory_checkpoint.WriteTensor("name_corgi", "key_baz", input_tensor));
auto matching_ids = id_registry->GetMatchingIds("name_foo");
EXPECT_EQ(matching_ids.size(), 1);
auto id = matching_ids.at(0);
auto [_, key] = id_registry->Get(id);
EXPECT_EQ(key, "key_bar");
matching_ids = id_registry->GetMatchingIds("name_corgi");
EXPECT_EQ(matching_ids.size(), 1);
id = matching_ids.at(0);
std::tie(_, key) = id_registry->Get(id);
EXPECT_EQ(key, "key_baz");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07635265-48b2-4ba1-86d5-a8203c9f03e8 | cpp | tensorflow/tensorflow | attr_value_util | tensorflow/core/framework/attr_value_util.cc | tensorflow/core/framework/attr_value_util_test.cc | #include "tensorflow/core/framework/attr_value_util.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/attr_value.pb_text.h"
#include "tensorflow/core/framework/tensor.pb_text.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb_text.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
namespace attr_value_util_internal {
int64_t TensorByteSize(const TensorProto& t) {
auto result = PartialTensorShape::BuildPartialTensorShape(t.tensor_shape());
if (!result.ok()) {
VLOG(1) << "Error encounted while computing computing tensor byte size: "
<< result.status();
return -1;
}
int64_t num_elems = result.value().num_elements();
if (num_elems < 0) {
return -1;
}
int64_t tensor_byte_size =
MultiplyWithoutOverflow(num_elems, DataTypeSize(t.dtype()));
if (tensor_byte_size < 0) {
VLOG(1)
<< "Overflow encountered when computing tensor byte size, multiplying "
<< num_elems << " with " << DataTypeSize(t.dtype());
return -1;
}
return tensor_byte_size;
}
}
namespace {
constexpr int kMaxAttrValueTensorByteSize = 32 * 1024 * 1024;
constexpr int kMaxTensorNestDepth = 100;
uint64 TensorProtoHash(const TensorProto& tp) {
Tensor tensor(tp.dtype());
bool success = tensor.FromProto(tp);
if (success) {
TensorProto p;
tensor.AsProtoTensorContent(&p);
return DeterministicProtoHash64(p);
} else {
return DeterministicProtoHash64(tp);
}
}
uint64 FastTensorProtoHash(const TensorProto& tp) {
if (attr_value_util_internal::TensorByteSize(tp) >
kMaxAttrValueTensorByteSize) {
return DeterministicProtoHash64(tp);
} else {
return TensorProtoHash(tp);
}
}
bool AreTensorProtosEqual(const TensorProto& lhs, const TensorProto& rhs,
bool allow_false_negatives) {
const int64_t lhs_tensor_bytes =
attr_value_util_internal::TensorByteSize(lhs);
const int64_t rhs_tensor_bytes =
attr_value_util_internal::TensorByteSize(rhs);
if (lhs_tensor_bytes != rhs_tensor_bytes) {
return false;
}
const int64_t lhs_proto_bytes = lhs.ByteSizeLong();
const bool large_expansion =
(lhs_proto_bytes < 512 && lhs_tensor_bytes > 4096);
const bool only_compare_proto =
(allow_false_negatives && lhs_tensor_bytes > kMaxAttrValueTensorByteSize);
if (large_expansion || only_compare_proto) {
if (AreSerializedProtosEqual(lhs, rhs))
return true;
else if (only_compare_proto)
return false;
}
Tensor lhs_t(lhs.dtype());
bool success = lhs_t.FromProto(lhs);
if (!success) {
return false;
}
Tensor rhs_t(rhs.dtype());
success = rhs_t.FromProto(rhs);
if (!success) {
return false;
}
TensorProto lhs_tp;
lhs_t.AsProtoTensorContent(&lhs_tp);
TensorProto rhs_tp;
rhs_t.AsProtoTensorContent(&rhs_tp);
return AreSerializedProtosEqual(lhs_tp, rhs_tp);
}
using TensorProtoHasher = std::function<uint64(const TensorProto&)>;
uint64 AttrValueHash(const AttrValue& a, const TensorProtoHasher& tensor_hash) {
if (a.has_tensor()) return tensor_hash(a.tensor());
if (a.has_func()) {
const NameAttrList& func = a.func();
uint64 h = Hash64(func.name());
std::map<string, AttrValue> map(func.attr().begin(), func.attr().end());
for (const auto& pair : map) {
h = Hash64(pair.first.data(), pair.first.size(), h);
h = Hash64Combine(AttrValueHash(pair.second, tensor_hash), h);
}
return h;
}
return DeterministicProtoHash64(a);
}
string SummarizeString(const string& str) {
string escaped = absl::CEscape(str);
constexpr int kMaxStringSummarySize = 80;
if (escaped.size() >= kMaxStringSummarySize) {
StringPiece prefix(escaped);
StringPiece suffix = prefix;
prefix.remove_suffix(escaped.size() - 10);
suffix.remove_prefix(escaped.size() - 10);
return strings::StrCat("\"", prefix, "...", suffix, "\"");
} else {
return strings::StrCat("\"", escaped, "\"");
}
}
string SummarizeTensor(const TensorProto& tensor_proto) {
Tensor t;
int64_t tensor_byte_size =
attr_value_util_internal::TensorByteSize(tensor_proto);
if (tensor_byte_size > kMaxAttrValueTensorByteSize ||
tensor_byte_size == -1
) {
return strings::StrCat("<TensorProto: ", tensor_proto.ShortDebugString(),
">");
} else if (!t.FromProto(tensor_proto)) {
return strings::StrCat(
"<Invalid TensorProto: ", tensor_proto.ShortDebugString(), ">");
}
return t.DebugString();
}
string SummarizeFunc(const NameAttrList& func) {
std::vector<string> entries;
for (const auto& p : func.attr()) {
entries.push_back(
strings::StrCat(p.first, "=", SummarizeAttrValue(p.second)));
}
std::sort(entries.begin(), entries.end());
return strings::StrCat(func.name(), "[", absl::StrJoin(entries, ", "), "]");
}
bool ParseAttrValueHelper_TensorNestsUnderLimit(int limit, string to_parse) {
int nests = 0;
int maxed_out = to_parse.length();
int open_curly = to_parse.find('{');
int open_bracket = to_parse.find('<');
int close_curly = to_parse.find('}');
int close_bracket = to_parse.find('>');
if (open_curly == -1) {
open_curly = maxed_out;
}
if (open_bracket == -1) {
open_bracket = maxed_out;
}
int min = std::min(open_curly, open_bracket);
do {
if (open_curly == maxed_out && open_bracket == maxed_out) {
return true;
}
if (min == open_curly) {
nests += 1;
open_curly = to_parse.find('{', open_curly + 1);
if (open_curly == -1) {
open_curly = maxed_out;
}
} else if (min == open_bracket) {
nests += 1;
open_bracket = to_parse.find('<', open_bracket + 1);
if (open_bracket == -1) {
open_bracket = maxed_out;
}
} else if (min == close_curly) {
nests -= 1;
close_curly = to_parse.find('}', close_curly + 1);
if (close_curly == -1) {
close_curly = maxed_out;
}
} else if (min == close_bracket) {
nests -= 1;
close_bracket = to_parse.find('>', close_bracket + 1);
if (close_bracket == -1) {
close_bracket = maxed_out;
}
}
min = std::min({open_curly, open_bracket, close_curly, close_bracket});
} while (nests < 100);
return false;
}
}
string SummarizeAttrValue(const AttrValue& attr_value) {
switch (attr_value.value_case()) {
case AttrValue::kS:
return SummarizeString(attr_value.s());
case AttrValue::kI:
return strings::StrCat(attr_value.i());
case AttrValue::kF:
return strings::StrCat(attr_value.f());
case AttrValue::kB:
return attr_value.b() ? "true" : "false";
case AttrValue::kType:
return EnumName_DataType(attr_value.type());
case AttrValue::kShape:
return PartialTensorShape::DebugString(attr_value.shape());
case AttrValue::kTensor:
return SummarizeTensor(attr_value.tensor());
case AttrValue::kList: {
std::vector<string> pieces;
if (attr_value.list().s_size() > 0) {
for (int i = 0; i < attr_value.list().s_size(); ++i) {
pieces.push_back(SummarizeString(attr_value.list().s(i)));
}
} else if (attr_value.list().i_size() > 0) {
for (int i = 0; i < attr_value.list().i_size(); ++i) {
pieces.push_back(strings::StrCat(attr_value.list().i(i)));
}
} else if (attr_value.list().f_size() > 0) {
for (int i = 0; i < attr_value.list().f_size(); ++i) {
pieces.push_back(strings::StrCat(attr_value.list().f(i)));
}
} else if (attr_value.list().b_size() > 0) {
for (int i = 0; i < attr_value.list().b_size(); ++i) {
pieces.push_back(attr_value.list().b(i) ? "true" : "false");
}
} else if (attr_value.list().type_size() > 0) {
for (int i = 0; i < attr_value.list().type_size(); ++i) {
pieces.push_back(EnumName_DataType(attr_value.list().type(i)));
}
} else if (attr_value.list().shape_size() > 0) {
for (int i = 0; i < attr_value.list().shape_size(); ++i) {
pieces.push_back(
TensorShape::DebugString(attr_value.list().shape(i)));
}
} else if (attr_value.list().tensor_size() > 0) {
for (int i = 0; i < attr_value.list().tensor_size(); ++i) {
pieces.push_back(SummarizeTensor(attr_value.list().tensor(i)));
}
} else if (attr_value.list().func_size() > 0) {
for (int i = 0; i < attr_value.list().func_size(); ++i) {
pieces.push_back(SummarizeFunc(attr_value.list().func(i)));
}
}
constexpr int kMaxListSummarySize = 30;
if (pieces.size() >= kMaxListSummarySize) {
uint64_t fingerprint =
Fingerprint64(absl::StrJoin(pieces.begin(), pieces.end(), ","));
pieces.erase(pieces.begin() + 5, pieces.end() - 6);
pieces[5] = "...";
return strings::StrCat("[", absl::StrJoin(pieces, ", "),
"]{attr_hash=", fingerprint, "}");
} else {
return strings::StrCat("[", absl::StrJoin(pieces, ", "), "]");
}
}
case AttrValue::kFunc: {
return SummarizeFunc(attr_value.func());
}
case AttrValue::kPlaceholder:
return strings::StrCat("$", attr_value.placeholder());
case AttrValue::VALUE_NOT_SET:
return "<Unknown AttrValue type>";
}
return "<Unknown AttrValue type>";
}
Status AttrValueHasType(const AttrValue& attr_value, StringPiece type) {
int num_set = 0;
#define VALIDATE_FIELD(name, type_string, oneof_case) \
do { \
if (attr_value.has_list()) { \
if (attr_value.list().name##_size() > 0) { \
if (type != "list(" type_string ")") { \
return errors::InvalidArgument( \
"AttrValue had value with type 'list(" type_string ")' when '", \
type, "' expected"); \
} \
++num_set; \
} \
} else if (attr_value.value_case() == AttrValue::oneof_case) { \
if (type != type_string) { \
return errors::InvalidArgument( \
"AttrValue had value with type '" type_string "' when '", type, \
"' expected"); \
} \
++num_set; \
} \
} while (false)
VALIDATE_FIELD(s, "string", kS);
VALIDATE_FIELD(i, "int", kI);
VALIDATE_FIELD(f, "float", kF);
VALIDATE_FIELD(b, "bool", kB);
VALIDATE_FIELD(type, "type", kType);
VALIDATE_FIELD(shape, "shape", kShape);
VALIDATE_FIELD(tensor, "tensor", kTensor);
VALIDATE_FIELD(func, "func", kFunc);
#undef VALIDATE_FIELD
if (attr_value.value_case() == AttrValue::kPlaceholder) {
return errors::InvalidArgument(
"AttrValue had value with unexpected type 'placeholder'");
}
if (absl::StartsWith(type, "list(") && !attr_value.has_list()) {
if (num_set) {
return errors::InvalidArgument(
"AttrValue missing value with expected type '", type, "'");
} else {
++num_set;
}
}
if (num_set == 0 && !absl::StartsWith(type, "list(")) {
return errors::InvalidArgument(
"AttrValue missing value with expected type '", type, "'");
}
if (type == "type") {
if (!DataType_IsValid(attr_value.type())) {
return errors::InvalidArgument("AttrValue has invalid DataType enum: ",
attr_value.type());
}
if (IsRefType(attr_value.type())) {
return errors::InvalidArgument(
"AttrValue must not have reference type value of ",
DataTypeString(attr_value.type()));
}
if (attr_value.type() == DT_INVALID) {
return errors::InvalidArgument("AttrValue has invalid DataType");
}
} else if (type == "list(type)") {
for (auto as_int : attr_value.list().type()) {
const DataType dtype = static_cast<DataType>(as_int);
if (!DataType_IsValid(dtype)) {
return errors::InvalidArgument("AttrValue has invalid DataType enum: ",
as_int);
}
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"AttrValue must not have reference type value of ",
DataTypeString(dtype));
}
if (dtype == DT_INVALID) {
return errors::InvalidArgument("AttrValue contains invalid DataType");
}
}
}
return absl::OkStatus();
}
bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) {
string field_name;
bool is_list = absl::ConsumePrefix(&type, "list(");
if (absl::ConsumePrefix(&type, "string")) {
field_name = "s";
} else if (absl::ConsumePrefix(&type, "int")) {
field_name = "i";
} else if (absl::ConsumePrefix(&type, "float")) {
field_name = "f";
} else if (absl::ConsumePrefix(&type, "bool")) {
field_name = "b";
} else if (absl::ConsumePrefix(&type, "type")) {
field_name = "type";
} else if (absl::ConsumePrefix(&type, "shape")) {
field_name = "shape";
} else if (absl::ConsumePrefix(&type, "tensor")) {
field_name = "tensor";
} else if (absl::ConsumePrefix(&type, "func")) {
field_name = "func";
} else if (absl::ConsumePrefix(&type, "placeholder")) {
field_name = "placeholder";
} else {
return false;
}
if (is_list && !absl::ConsumePrefix(&type, ")")) {
return false;
}
string to_parse;
if (is_list) {
StringPiece cleaned = text;
str_util::RemoveLeadingWhitespace(&cleaned);
str_util::RemoveTrailingWhitespace(&cleaned);
if (cleaned.size() < 2 || cleaned[0] != '[' ||
cleaned[cleaned.size() - 1] != ']') {
return false;
}
cleaned.remove_prefix(1);
str_util::RemoveLeadingWhitespace(&cleaned);
if (cleaned.size() == 1) {
out->Clear();
out->mutable_list();
return true;
}
to_parse = strings::StrCat("list { ", field_name, ": ", text, " }");
} else {
to_parse = strings::StrCat(field_name, ": ", text);
}
if (field_name == "tensor") {
if (!ParseAttrValueHelper_TensorNestsUnderLimit(kMaxTensorNestDepth,
to_parse)) {
return false;
}
}
return ProtoParseFromString(to_parse, out);
}
void SetAttrValue(const AttrValue& value, AttrValue* out) { *out = value; }
#define DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \
void SetAttrValue(ARG_TYPE value, AttrValue* out) { out->set_##FIELD(value); }
#define DEFINE_SET_ATTR_VALUE_LIST(ARG_TYPE, FIELD) \
void SetAttrValue(ARG_TYPE value, AttrValue* out) { \
out->mutable_list()->Clear(); \
for (const auto& v : value) { \
out->mutable_list()->add_##FIELD(v); \
} \
}
#define DEFINE_SET_ATTR_VALUE_BOTH(ARG_TYPE, FIELD) \
DEFINE_SET_ATTR_VALUE_ONE(ARG_TYPE, FIELD) \
DEFINE_SET_ATTR_VALUE_LIST(gtl::ArraySlice<ARG_TYPE>, FIELD)
DEFINE_SET_ATTR_VALUE_ONE(const string&, s)
DEFINE_SET_ATTR_VALUE_LIST(absl::Span<const string>, s)
DEFINE_SET_ATTR_VALUE_BOTH(const char*, s)
DEFINE_SET_ATTR_VALUE_BOTH(int64_t, i)
DEFINE_SET_ATTR_VALUE_BOTH(int32_t, i)
DEFINE_SET_ATTR_VALUE_BOTH(float, f)
DEFINE_SET_ATTR_VALUE_BOTH(double, f)
DEFINE_SET_ATTR_VALUE_BOTH(bool, b)
DEFINE_SET_ATTR_VALUE_LIST(const std::vector<bool>&, b)
DEFINE_SET_ATTR_VALUE_LIST(std::initializer_list<bool>, b)
DEFINE_SET_ATTR_VALUE_BOTH(DataType, type)
void SetAttrValue(const tstring& value, AttrValue* out) {
out->set_s(value.data(), value.size());
}
void SetAttrValue(absl::Span<const tstring> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
out->mutable_list()->add_s(v.data(), v.size());
}
}
void SetAttrValue(StringPiece value, AttrValue* out) {
out->set_s(value.data(), value.size());
}
void SetAttrValue(const absl::Span<const StringPiece> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
out->mutable_list()->add_s(v.data(), v.size());
}
}
void MoveAttrValue(std::vector<string>&& value, AttrValue* out) {
out->mutable_list()->Clear();
for (auto& v : value) {
out->mutable_list()->add_s(std::move(v));
}
}
void SetAttrValue(const TensorShape& value, AttrValue* out) {
value.AsProto(out->mutable_shape());
}
void SetAttrValue(const TensorShapeProto& value, AttrValue* out) {
*out->mutable_shape() = value;
}
void SetAttrValue(const PartialTensorShape& value, AttrValue* out) {
value.AsProto(out->mutable_shape());
}
void SetAttrValue(const absl::Span<const TensorShape> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
v.AsProto(out->mutable_list()->add_shape());
}
}
void SetAttrValue(absl::Span<const TensorShapeProto> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_shape() = v;
}
}
void SetAttrValue(const absl::Span<const PartialTensorShape> value,
AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
v.AsProto(out->mutable_list()->add_shape());
}
}
void SetAttrValue(const Tensor& value, AttrValue* out) {
if (value.NumElements() > 1) {
value.AsProtoTensorContent(out->mutable_tensor());
} else {
value.AsProtoField(out->mutable_tensor());
}
}
void SetAttrValue(const absl::Span<const Tensor> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
if (v.NumElements() > 1) {
v.AsProtoTensorContent(out->mutable_list()->add_tensor());
} else {
v.AsProtoField(out->mutable_list()->add_tensor());
}
}
}
void SetAttrValue(const TensorProto& value, AttrValue* out) {
*out->mutable_tensor() = value;
}
void SetAttrValue(const absl::Span<const TensorProto> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_tensor() = v;
}
}
void SetAttrValue(const NameAttrList& value, AttrValue* out) {
*out->mutable_func() = value;
}
void SetAttrValue(absl::Span<const NameAttrList> value, AttrValue* out) {
out->mutable_list()->Clear();
for (const auto& v : value) {
*out->mutable_list()->add_func() = v;
}
}
bool AreAttrValuesEqual(const AttrValue& a, const AttrValue& b,
bool allow_false_negatives) {
if (a.type() != b.type()) {
return false;
} else if (a.type() != DT_INVALID && b.type() != DT_INVALID) {
return a.type() == b.type();
}
if (a.has_tensor() != b.has_tensor()) {
return false;
} else if (a.has_tensor() && b.has_tensor()) {
return AreTensorProtosEqual(a.tensor(), b.tensor(), allow_false_negatives);
}
if (a.has_func() != b.has_func()) {
return false;
} else if (a.has_func() && b.has_func()) {
const NameAttrList& af = a.func();
const NameAttrList& bf = b.func();
if (af.name() != bf.name()) return false;
std::unordered_map<string, AttrValue> am(af.attr().begin(),
af.attr().end());
for (const auto& bm_pair : bf.attr()) {
const auto& iter = am.find(bm_pair.first);
if (iter == am.end()) return false;
if (!AreAttrValuesEqual(iter->second, bm_pair.second,
allow_false_negatives))
return false;
am.erase(iter);
}
if (!am.empty()) return false;
return true;
}
return AreSerializedProtosEqual(a, b);
}
uint64 AttrValueHash(const AttrValue& a) {
return AttrValueHash(a, TensorProtoHash);
}
uint64 FastAttrValueHash(const AttrValue& a) {
return AttrValueHash(a, FastTensorProtoHash);
}
bool HasPlaceHolder(const AttrValue& val) {
switch (val.value_case()) {
case AttrValue::kList: {
for (const NameAttrList& func : val.list().func()) {
for (const auto& p : func.attr()) {
if (HasPlaceHolder(p.second)) {
return true;
}
}
}
break;
}
case AttrValue::kFunc:
for (const auto& p : val.func().attr()) {
if (HasPlaceHolder(p.second)) {
return true;
}
}
break;
case AttrValue::kPlaceholder:
return true;
default:
break;
}
return false;
}
bool SubstitutePlaceholders(const SubstituteFunc& substitute,
AttrValue* value) {
switch (value->value_case()) {
case AttrValue::kList: {
for (NameAttrList& func : *value->mutable_list()->mutable_func()) {
for (auto& p : *func.mutable_attr()) {
if (!SubstitutePlaceholders(substitute, &p.second)) {
return false;
}
}
}
break;
}
case AttrValue::kFunc:
for (auto& p : *(value->mutable_func()->mutable_attr())) {
if (!SubstitutePlaceholders(substitute, &p.second)) {
return false;
}
}
break;
case AttrValue::kPlaceholder:
return substitute(value->placeholder(), value);
case AttrValue::VALUE_NOT_SET:
return false;
default:
break;
}
return true;
}
} | #include "tensorflow/core/framework/attr_value_util.h"
#include <numeric>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
AttrValue V(T value) {
AttrValue ret;
SetAttrValue(value, &ret);
return ret;
}
AttrValue P(const string& p) {
AttrValue ret;
ret.set_placeholder(p);
return ret;
}
AttrValue F(const string& name,
std::vector<std::pair<string, AttrValue>> pairs) {
AttrValue ret;
ret.mutable_func()->set_name(name);
ret.mutable_func()->mutable_attr()->insert(pairs.begin(), pairs.end());
return ret;
}
AttrValue Fs(
std::vector<std::pair<string, std::vector<std::pair<string, AttrValue>>>>
funcs) {
AttrValue ret;
for (const auto& func : funcs) {
NameAttrList* entry = ret.mutable_list()->add_func();
entry->set_name(func.first);
entry->mutable_attr()->insert(func.second.begin(), func.second.end());
}
return ret;
}
TEST(AttrValueUtil, HasType) {
EXPECT_TRUE(AttrValueHasType(V(123), "int").ok());
EXPECT_TRUE(AttrValueHasType(V(1.2), "float").ok());
EXPECT_TRUE(AttrValueHasType(V(DT_FLOAT), "type").ok());
EXPECT_TRUE(AttrValueHasType(F("f", {}), "func").ok());
EXPECT_TRUE(AttrValueHasType(Fs({{"f", {}}, {"g", {}}}), "list(func)").ok());
EXPECT_FALSE(AttrValueHasType(V(123), "func").ok());
EXPECT_FALSE(AttrValueHasType(V(1.2), "int").ok());
EXPECT_FALSE(AttrValueHasType(V(DT_FLOAT), "shape").ok());
EXPECT_FALSE(AttrValueHasType(F("f", {}), "string").ok());
EXPECT_FALSE(AttrValueHasType(P("T"), "float").ok());
EXPECT_FALSE(AttrValueHasType(V(static_cast<DataType>(1000)), "type").ok());
std::vector<DataType> list_type({static_cast<DataType>(1000)});
EXPECT_FALSE(AttrValueHasType(V(list_type), "list(type)").ok());
}
SubstituteFunc ReplaceTWith(const AttrValue& val) {
return [val](const string& placeholder, AttrValue* target) {
if (placeholder == "T") {
*target = val;
return true;
} else {
return false;
}
};
}
TEST(AttrValueUtil, Basic) {
auto v = F("MatMul", {{"dtype", P("T")},
{"transpose_a", V(false)},
{"transpose_b", V(true)},
{"use_cublas", V(true)}});
TF_EXPECT_OK(AttrValueHasType(v, "func"));
EXPECT_TRUE(HasPlaceHolder(v));
EXPECT_EQ(
SummarizeAttrValue(v),
"MatMul[dtype=$T, transpose_a=false, transpose_b=true, use_cublas=true]");
SubstitutePlaceholders(ReplaceTWith(V(DT_FLOAT)), &v);
EXPECT_TRUE(!HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"MatMul[dtype=DT_FLOAT, transpose_a=false, transpose_b=true, "
"use_cublas=true]");
}
TEST(AttrValueUtil, Shaped) {
auto v =
F("OpRequiresShape", {{"shape_full", V(TensorShape({1, 0}))},
{"shape_part", V(PartialTensorShape({-1, 1, 0}))}});
TF_EXPECT_OK(AttrValueHasType(v, "func"));
EXPECT_FALSE(HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"OpRequiresShape[shape_full=[1,0], shape_part=[?,1,0]]");
}
TEST(AttrValueUtil, DeepAttr) {
auto v = Fs({{"f", {{"T", P("T")}}}, {"g", {{"T", P("T")}}}});
TF_EXPECT_OK(AttrValueHasType(v, "list(func)"));
EXPECT_TRUE(HasPlaceHolder(v));
for (int i = 0; i < 3; ++i) {
v = F("f", {{"T", P("T")}, {"F", v}});
EXPECT_TRUE(HasPlaceHolder(v));
}
EXPECT_EQ(SummarizeAttrValue(v),
"f[F=f[F=f[F=[f[T=$T], g[T=$T]], T=$T], T=$T], T=$T]");
SubstitutePlaceholders(ReplaceTWith(F("x", {})), &v);
EXPECT_TRUE(!HasPlaceHolder(v));
EXPECT_EQ(SummarizeAttrValue(v),
"f[F=f[F=f[F=[f[T=x[]], g[T=x[]]], T=x[]], T=x[]], T=x[]]");
}
TEST(AttrValueUtil, SummarizeAttrValueDoesNotElideShortStrings) {
AttrValue attr_value;
SetAttrValue(string(40, '-'), &attr_value);
EXPECT_EQ(strings::StrCat("\"", string(40, '-'), "\""),
SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueElidesLongStrings) {
AttrValue attr_value;
SetAttrValue(string(80, '-'), &attr_value);
EXPECT_EQ("\"----------...----------\"", SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueDoesNotElideShortLists) {
std::vector<int> alist(10);
std::iota(alist.begin(), alist.end(), 0);
AttrValue attr_value;
SetAttrValue(alist, &attr_value);
EXPECT_EQ("[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]", SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, SummarizeAttrValueElidesLongLists) {
std::vector<int> alist(110);
std::iota(alist.begin(), alist.end(), 0);
AttrValue attr_value;
SetAttrValue(alist, &attr_value);
EXPECT_EQ(
"[0, 1, 2, 3, 4, ..., 105, 106, 107, 108, "
"109]{attr_hash=14506120815048308275}",
SummarizeAttrValue(attr_value));
}
TEST(AttrValueUtil, TensorByteSizeNumElementsOverflows) {
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(9223372036854775807L);
proto.mutable_tensor_shape()->add_dim()->set_size(2092026309338556617L);
proto.set_dtype(DT_INT32);
EXPECT_EQ(attr_value_util_internal::TensorByteSize(proto), -1);
}
TEST(AttrValueUtil, TensorByteSizeShouldNotOverflow) {
{
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(4611686018427387904L);
proto.set_dtype(DT_INT32);
EXPECT_EQ(attr_value_util_internal::TensorByteSize(proto), -1);
}
{
TensorProto proto;
proto.mutable_tensor_shape()->add_dim()->set_size(46123445412334L);
proto.set_dtype(DT_INT32);
EXPECT_NE(attr_value_util_internal::TensorByteSize(proto), -1);
}
}
AttrValue FromText(const string& text) {
AttrValue attr;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &attr));
return attr;
}
void ExpectDifferent(const AttrValue& a1, const AttrValue& a2) {
EXPECT_FALSE(AreAttrValuesEqual(a1, a2));
EXPECT_FALSE(AreAttrValuesEqual(a2, a1));
EXPECT_NE(AttrValueHash(a1), AttrValueHash(a2));
}
TEST(AttrValueEquality, StringAndFuncTensors) {
AttrValue a = FromText(R"(
tensor {
dtype: DT_STRING
tensor_shape {
dim {
size: 2
}
}
string_val: 'reader_dataset_ops_test/tmphtXHks/text_line.0.txt'
string_val: 'reader_dataset_ops_test/tmphtXHks/text_line.1.txt'
})");
EXPECT_TRUE(AreAttrValuesEqual(a, a));
EXPECT_EQ(AttrValueHash(a), AttrValueHash(a));
AttrValue b = a;
(*b.mutable_tensor()->mutable_string_val(0))[3] = '1';
ExpectDifferent(a, b);
AttrValue c1;
c1.mutable_func()->set_name("func_name");
(*c1.mutable_func()->mutable_attr())["attr1"] = a;
(*c1.mutable_func()->mutable_attr())["attr2"] = b;
EXPECT_TRUE(AreAttrValuesEqual(c1, c1));
EXPECT_EQ(AttrValueHash(c1), AttrValueHash(c1));
ExpectDifferent(c1, a);
AttrValue c2 = c1;
c2.mutable_func()->set_name("func_name2");
ExpectDifferent(c1, c2);
c2 = c1;
(*c2.mutable_func()->mutable_attr())["attr3"] = b;
ExpectDifferent(c1, c2);
c2 = c1;
(*c2.mutable_func()->mutable_attr())["attr2"] = a;
ExpectDifferent(c1, c2);
c2 = c1;
c2.mutable_func()->mutable_attr()->erase("attr2");
ExpectDifferent(c1, c2);
}
TEST(AttrValueEquality, GiantTensors) {
AttrValue tensor = FromText(R"(
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1024
}
dim {
size: 1024
}
dim {
size: 1024
}
dim {
size: 1024
}
}
int_val: 0
})");
EXPECT_TRUE(AreAttrValuesEqual(tensor, tensor));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/attr_value_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/attr_value_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93b256e8-a954-4c10-a627-a30c48df4eab | cpp | tensorflow/tensorflow | model | tensorflow/lite/delegates/gpu/common/model.cc | tensorflow/lite/delegates/gpu/common/model_test.cc | #include "tensorflow/lite/delegates/gpu/common/model.h"
#include <stdint.h>
#include <algorithm>
#include <iterator>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
std::vector<Node*> GraphFloat32::nodes() const {
return FilterNodes([](const NodeDef&) { return true; });
}
std::vector<Value*> GraphFloat32::values() const {
return FilterValues([](const ValueDef&) { return true; });
}
std::vector<Value*> GraphFloat32::inputs() const {
return FilterValues([](const ValueDef& v) { return v.producer == nullptr; });
}
std::vector<Value*> GraphFloat32::variable_inputs() const {
return FilterValues(
[](const ValueDef& v) { return v.value->tensor.is_variable_input; });
}
std::vector<Value*> GraphFloat32::outputs() const {
std::vector<Value*> values;
std::vector<Value*> values_known_graph_outputs;
values.reserve(values_.size());
values_known_graph_outputs.reserve(values_.size());
for (auto& v : values_) {
auto value_ptr = v.value.get();
if (value_ptr == nullptr) continue;
if (v.consumers.empty()) {
values.push_back(v.value.get());
} else if (std::find(known_graph_outputs_.begin(),
known_graph_outputs_.end(),
value_ptr) != known_graph_outputs_.end()) {
values_known_graph_outputs.push_back(v.value.get());
}
}
values.insert(values.end(), values_known_graph_outputs.begin(),
values_known_graph_outputs.end());
return values;
}
std::vector<Value*> GraphFloat32::FindInputs(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).inputs;
}
std::vector<Value*> GraphFloat32::FindOutputs(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).outputs;
}
bool GraphFloat32::IsGraphInput(ValueId id) const {
if (id >= values_.size()) {
return false;
}
return values_[id].producer == nullptr;
}
bool GraphFloat32::IsGraphOutput(ValueId id) const {
if (id >= values_.size()) {
return false;
}
if (std::find(known_graph_outputs_.begin(), known_graph_outputs_.end(),
values_[id].value.get()) != known_graph_outputs_.end()) {
return true;
}
return values_[id].consumers.empty();
}
Node* GraphFloat32::FindProducer(ValueId id) const {
if (id >= values_.size()) {
return nullptr;
}
return values_[id].producer;
}
std::vector<Node*> GraphFloat32::FindConsumers(ValueId id) const {
if (id >= values_.size()) {
return {};
}
return values_[id].consumers;
}
Node* GraphFloat32::GetNode(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).node.get();
}
Value* GraphFloat32::GetValue(ValueId id) const {
if (id >= values_.size()) {
return nullptr;
}
return values_[id].value.get();
}
Node* GraphFloat32::NewNode() {
const NodeId new_id = nodes_.size();
NodeDef def;
def.node = std::make_unique<Node>(Node{static_cast<NodeId>(new_id), {}});
Node* node = def.node.get();
nodes_[new_id] = std::move(def);
execution_plan_.push_back(new_id);
return node;
}
absl::Status GraphFloat32::InsertNodeAfter(NodeId id, Node** new_node) {
if (id >= nodes_.size()) {
return absl::OutOfRangeError("NodeId is out of range");
}
int idx = 0;
while (idx < execution_plan_.size()) {
if (execution_plan_[idx] == id) break;
++idx;
}
if (idx == execution_plan_.size()) {
return absl::OutOfRangeError("NodeId not in execution plan");
}
const NodeId new_id = nodes_.size();
NodeDef def;
def.node = std::make_unique<Node>(Node{static_cast<NodeId>(new_id), {}});
*new_node = def.node.get();
nodes_[new_id] = std::move(def);
execution_plan_.insert(execution_plan_.begin() + idx + 1, new_id);
return absl::OkStatus();
}
Value* GraphFloat32::NewValue() {
ValueDef def;
def.value =
std::make_unique<Value>(Value{static_cast<ValueId>(values_.size()), {}});
Value* value = def.value.get();
values_.push_back(std::move(def));
return value;
}
absl::Status GraphFloat32::SetProducer(NodeId producer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(producer, &n));
Node* node_ptr = n->node.get();
if (node_ptr == v->producer) {
return absl::AlreadyExistsError(absl::StrCat(
"Node ", producer, " is already a producer of the value ", value));
}
if (IsInput(producer, value)) {
return absl::InvalidArgumentError("Node is a consumer of the value");
}
if (v->producer != nullptr) {
Erase(&nodes_[v->producer->id].outputs, value_ptr);
}
v->producer = node_ptr;
n->outputs.push_back(value_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::RemoveProducer(ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
if (v->producer == nullptr) {
return absl::InvalidArgumentError("Value does not have a producer");
}
Erase(&nodes_[v->producer->id].outputs, value_ptr);
v->producer = nullptr;
return absl::OkStatus();
}
absl::Status GraphFloat32::AddConsumer(NodeId consumer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(consumer, &n));
Node* node_ptr = n->node.get();
if (node_ptr == v->producer) {
return absl::InvalidArgumentError("Node is a producer of the value");
}
if (IsInput(consumer, value)) {
return absl::AlreadyExistsError(absl::StrCat(
"Node ", consumer, " is already a consumer of the value ", value));
}
n->inputs.push_back(value_ptr);
v->consumers.push_back(node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::ReplaceInput(NodeId node, ValueId old_value,
ValueId new_value) {
ValueDef* v_old;
RETURN_IF_ERROR(LookupValue(old_value, &v_old));
Value* value_old_ptr = v_old->value.get();
ValueDef* v_new;
RETURN_IF_ERROR(LookupValue(new_value, &v_new));
Value* value_new_ptr = v_new->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(node, &n));
Node* node_ptr = n->node.get();
if (!IsInput(node, old_value)) {
return absl::InvalidArgumentError("old_value must be input of node.");
}
if (IsInput(node, new_value)) {
return absl::InvalidArgumentError("new_value can not be input of node.");
}
if (node_ptr == v_new->producer) {
return absl::InvalidArgumentError("new_value can not be output of node.");
}
for (int i = 0; i < n->inputs.size(); ++i) {
if (n->inputs[i] == value_old_ptr) {
n->inputs[i] = value_new_ptr;
break;
}
}
v_new->consumers.push_back(node_ptr);
Erase(&v_old->consumers, node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::RemoveConsumer(NodeId consumer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(consumer, &n));
Node* node_ptr = n->node.get();
if (!IsInput(consumer, value)) {
return absl::InvalidArgumentError("Node is not a consumer of the value");
}
Erase(&n->inputs, value_ptr);
Erase(&v->consumers, node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::DeleteNode(NodeId id) {
NodeDef* n;
RETURN_IF_ERROR(LookupNode(id, &n));
Node* node_ptr = n->node.get();
for (auto value : n->inputs) {
Erase(&values_[value->id].consumers, node_ptr);
}
for (auto value : n->outputs) {
values_[value->id].producer = nullptr;
}
n->inputs.clear();
n->outputs.clear();
n->node.reset();
return absl::OkStatus();
}
absl::Status GraphFloat32::DeleteValue(ValueId id) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(id, &v));
Value* value_ptr = v->value.get();
if (v->producer != nullptr) {
Erase(&nodes_[v->producer->id].outputs, value_ptr);
}
if (!v->consumers.empty()) {
for (auto node : v->consumers) {
Erase(&nodes_[node->id].inputs, value_ptr);
}
}
v->producer = nullptr;
v->consumers.clear();
v->value.reset();
return absl::OkStatus();
}
absl::Status GraphFloat32::MakeExactCopy(GraphFloat32* model) const {
model->nodes_.clear();
model->execution_plan_.clear();
model->values_.clear();
model->known_graph_outputs_.clear();
for (auto& value_def : values_) {
model->values_.push_back({});
if (value_def.value) {
model->values_.back().value = std::make_unique<Value>(*value_def.value);
if (std::find(known_graph_outputs_.begin(), known_graph_outputs_.end(),
value_def.value.get()) != known_graph_outputs_.end()) {
model->known_graph_outputs_.push_back(
model->values_.back().value.get());
}
}
}
for (auto node_id : execution_plan_) {
model->execution_plan_.push_back(node_id);
model->nodes_[node_id] = {};
auto& node_def = nodes_.at(node_id);
if (node_def.node) {
model->nodes_[node_id].node = std::make_unique<Node>(*node_def.node);
}
}
for (auto node_id : execution_plan_) {
auto& node_def = nodes_.at(node_id);
if (node_def.node) {
for (auto output : node_def.outputs) {
RETURN_IF_ERROR(model->SetProducer(node_def.node->id, output->id));
}
for (auto input : node_def.inputs) {
RETURN_IF_ERROR(model->AddConsumer(node_def.node->id, input->id));
}
}
}
return absl::OkStatus();
}
bool GraphFloat32::IsInput(NodeId node, ValueId value) {
if (node >= nodes_.size() || value >= values_.size()) {
return false;
}
const NodeDef& n = nodes_[node];
const ValueDef& v = values_[value];
if (!n.node || !v.value) {
return false;
}
return std::find(n.inputs.begin(), n.inputs.end(), v.value.get()) !=
n.inputs.end();
}
absl::Status GraphFloat32::LookupNode(NodeId id, NodeDef** node_def) {
if (id >= nodes_.size()) {
return absl::OutOfRangeError("NodeId is out of range");
}
auto& n = nodes_[id];
if (!n.node) {
return absl::OutOfRangeError("Node is already deleted");
}
*node_def = &n;
return absl::OkStatus();
}
absl::Status GraphFloat32::LookupValue(ValueId id, ValueDef** value_def) {
if (id >= values_.size()) {
return absl::OutOfRangeError("ValueId is out of range");
}
auto& v = values_[id];
if (!v.value) {
return absl::OutOfRangeError("Value is already deleted");
}
*value_def = &v;
return absl::OkStatus();
}
absl::Status RemovePrecedingNode(GraphFloat32* graph, const Node* to_remove,
const Node* to_keep) {
for (auto output : graph->FindOutputs(to_remove->id)) {
auto consumers = graph->FindConsumers(output->id);
if (consumers.size() > 1 ||
(consumers.size() == 1 && consumers[0] != to_keep)) {
return absl::InvalidArgumentError(
"Output from to_remove node has other consumers");
}
}
for (auto input : graph->FindInputs(to_remove->id)) {
RETURN_IF_ERROR(graph->AddConsumer(to_keep->id, input->id));
}
for (auto output : graph->FindOutputs(to_remove->id)) {
RETURN_IF_ERROR(graph->DeleteValue(output->id));
}
return graph->DeleteNode(to_remove->id);
}
absl::Status RemoveFollowingNode(GraphFloat32* graph, const Node* to_remove,
const Node* to_keep) {
for (auto input : graph->FindInputs(to_remove->id)) {
Node* producer = graph->FindProducer(input->id);
if (producer->id != to_keep->id) {
return absl::InvalidArgumentError("To_remove node has other inputs");
}
}
for (auto input : graph->FindInputs(to_remove->id)) {
RETURN_IF_ERROR(graph->DeleteValue(input->id));
}
for (auto output : graph->FindOutputs(to_remove->id)) {
RETURN_IF_ERROR(graph->SetProducer(to_keep->id, output->id));
}
return graph->DeleteNode(to_remove->id);
}
absl::Status RemoveSimpleNodeKeepInput(GraphFloat32* graph,
const Node* simple_node) {
const auto inputs = graph->FindInputs(simple_node->id);
const auto outputs = graph->FindOutputs(simple_node->id);
if (inputs.size() != 1 || outputs.size() != 1) {
return absl::FailedPreconditionError(
"simple_node node must have 1 input and 1 output");
}
const auto input_id = inputs[0]->id;
const auto output_id = outputs[0]->id;
const Node* producer = graph->FindProducer(input_id);
const auto consumers = graph->FindConsumers(output_id);
RETURN_IF_ERROR(graph->DeleteNode(simple_node->id));
for (auto& consumer : consumers) {
RETURN_IF_ERROR(graph->ReplaceInput(consumer->id, output_id, input_id));
}
RETURN_IF_ERROR(graph->DeleteValue(output_id));
if (!producer && consumers.empty()) {
RETURN_IF_ERROR(graph->DeleteValue(input_id));
}
return absl::OkStatus();
}
absl::Status RemoveSimpleNodeKeepOutput(GraphFloat32* graph,
const Node* simple_node) {
const auto inputs = graph->FindInputs(simple_node->id);
const auto outputs = graph->FindOutputs(simple_node->id);
if (inputs.size() != 1 || outputs.size() != 1) {
return absl::FailedPreconditionError(
"simple_node must have 1 input and 1 output");
}
const auto input_id = inputs[0]->id;
const auto output_id = outputs[0]->id;
const Node* producer = graph->FindProducer(input_id);
const auto input_consumers = graph->FindConsumers(input_id);
if (input_consumers.size() != 1) {
return absl::FailedPreconditionError(
"simple_node should be the only consumer on the node.");
}
RETURN_IF_ERROR(graph->DeleteNode(simple_node->id));
if (producer) {
RETURN_IF_ERROR(graph->RemoveProducer(input_id));
RETURN_IF_ERROR(graph->SetProducer(producer->id, output_id));
}
RETURN_IF_ERROR(graph->DeleteValue(input_id));
const auto output_consumers = graph->FindConsumers(output_id);
if (!producer && output_consumers.empty()) {
RETURN_IF_ERROR(graph->DeleteValue(output_id));
}
return absl::OkStatus();
}
absl::Status AddOutput(GraphFloat32* graph, const Node* from_node,
Value** output) {
auto link = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(from_node->id, link->id));
*output = link;
return absl::OkStatus();
}
absl::Status ConnectTwoNodes(GraphFloat32* graph, const Node* from_node,
const Node* to_node, Value** output) {
const Node* output_producer =
*output ? graph->FindProducer((*output)->id) : nullptr;
if (*output && output_producer && output_producer->id != from_node->id) {
return absl::InvalidArgumentError("Wrong output is passed.");
}
if (*output) {
RETURN_IF_ERROR(graph->AddConsumer(to_node->id, (*output)->id));
} else {
Value* link;
RETURN_IF_ERROR(AddOutput(graph, from_node, &link));
RETURN_IF_ERROR(graph->AddConsumer(to_node->id, link->id));
*output = link;
}
return absl::OkStatus();
}
absl::Status CheckBatchSizeForAllValues(const GraphFloat32& model) {
if (model.values().empty()) return absl::OkStatus();
const int32_t b = model.values()[0]->tensor.shape.b;
for (auto value : model.values()) {
if (value->tensor.shape.b != b) {
return absl::InvalidArgumentError(
absl::StrCat("Batch size mismatch, expected ", b, " but got ",
value->tensor.shape.b));
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(Model, SingleNode) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node));
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindInputs(node->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindOutputs(node->id), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(graph_input->id), UnorderedElementsAre(node));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node));
EXPECT_THAT(graph.FindConsumers(graph_output->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(graph_input->id), ::testing::Eq(nullptr));
}
TEST(Model, SingleNodeMultipleOutputs) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output1 = graph.NewValue();
Value* graph_output2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output1->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output2->id).ok());
EXPECT_THAT(graph.FindOutputs(node->id),
UnorderedElementsAre(graph_output1, graph_output2));
EXPECT_THAT(graph.FindProducer(graph_output1->id), ::testing::Eq(node));
EXPECT_THAT(graph.FindProducer(graph_output2->id), ::testing::Eq(node));
}
TEST(Model, SetSameConsumer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
EXPECT_FALSE(graph.AddConsumer(node->id, graph_input->id).ok());
}
TEST(Model, RemoveConsumer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input1 = graph.NewValue();
Value* graph_input2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input2->id).ok());
EXPECT_THAT(graph.FindConsumers(graph_input1->id),
UnorderedElementsAre(node));
EXPECT_THAT(graph.FindConsumers(graph_input2->id),
UnorderedElementsAre(node));
EXPECT_THAT(graph.FindInputs(node->id),
UnorderedElementsAre(graph_input1, graph_input2));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre());
ASSERT_TRUE(graph.RemoveConsumer(node->id, graph_input1->id).ok());
EXPECT_THAT(graph.FindConsumers(graph_input1->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindInputs(node->id), UnorderedElementsAre(graph_input2));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_input1));
ASSERT_FALSE(graph.RemoveConsumer(node->id, graph_input1->id).ok());
}
TEST(Model, SetSameProducer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_FALSE(graph.SetProducer(node->id, graph_output->id).ok());
}
TEST(Model, ReplaceInput) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* v0 = graph.NewValue();
Value* v1 = graph.NewValue();
Value* v2 = graph.NewValue();
Value* v3 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, v0->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, v1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, v2->id).ok());
EXPECT_THAT(graph.FindInputs(node->id), ElementsAre(v0, v1, v2));
ASSERT_TRUE(graph.ReplaceInput(node->id, v1->id, v3->id).ok());
EXPECT_THAT(graph.FindInputs(node->id), ElementsAre(v0, v3, v2));
}
TEST(Model, RemoveProducer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node));
ASSERT_TRUE(graph.RemoveProducer(graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(nullptr));
ASSERT_FALSE(graph.RemoveProducer(graph_output->id).ok());
}
class OneNodeModel : public testing::Test {
protected:
void SetUp() override {
node_ = graph_.NewNode();
Value* graph_input = graph_.NewValue();
Value* graph_output = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(node_->id, graph_input->id).ok());
ASSERT_TRUE(graph_.SetProducer(node_->id, graph_output->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph_.nodes(), ElementsAre(node_));
}
GraphFloat32 graph_;
Node* node_;
};
TEST_F(OneNodeModel, DeleteNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, node_).ok());
EXPECT_TRUE(graph_.inputs().empty());
EXPECT_TRUE(graph_.outputs().empty());
EXPECT_TRUE(graph_.nodes().empty());
}
TEST_F(OneNodeModel, DeleteNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, node_).ok());
EXPECT_TRUE(graph_.inputs().empty());
EXPECT_TRUE(graph_.outputs().empty());
EXPECT_TRUE(graph_.nodes().empty());
}
class TwoNodesModel : public testing::Test {
protected:
void SetUp() override {
graph_input_ = graph_.NewValue();
first_node_ = graph_.NewNode();
value_ = graph_.NewValue();
second_node_ = graph_.NewNode();
graph_output_ = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(first_node_->id, graph_input_->id).ok());
ASSERT_TRUE(graph_.SetProducer(first_node_->id, value_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(second_node_->id, value_->id).ok());
ASSERT_TRUE(graph_.SetProducer(second_node_->id, graph_output_->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, second_node_));
}
GraphFloat32 graph_;
Node* first_node_;
Node* second_node_;
Value* graph_input_;
Value* value_;
Value* graph_output_;
};
TEST_F(TwoNodesModel, DeleteFirstNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, first_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(second_node_));
}
TEST_F(TwoNodesModel, DeleteFirstNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, first_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(value_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(second_node_));
}
TEST_F(TwoNodesModel, DeleteSecondNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(value_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_));
}
TEST_F(TwoNodesModel, DeleteSecondNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_));
}
class ThreeNodesModel : public testing::Test {
protected:
void SetUp() override {
first_node_ = graph_.NewNode();
second_node_ = graph_.NewNode();
third_node_ = graph_.NewNode();
graph_input_ = graph_.NewValue();
value0_ = graph_.NewValue();
value1_ = graph_.NewValue();
graph_output_ = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(first_node_->id, graph_input_->id).ok());
ASSERT_TRUE(graph_.SetProducer(first_node_->id, value0_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(second_node_->id, value0_->id).ok());
ASSERT_TRUE(graph_.SetProducer(second_node_->id, value1_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(third_node_->id, value1_->id).ok());
ASSERT_TRUE(graph_.SetProducer(third_node_->id, graph_output_->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(),
ElementsAre(first_node_, second_node_, third_node_));
}
GraphFloat32 graph_;
Node* first_node_;
Node* second_node_;
Node* third_node_;
Value* graph_input_;
Value* value0_;
Value* value1_;
Value* graph_output_;
};
TEST_F(ThreeNodesModel, DeleteMiddleNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, third_node_));
EXPECT_THAT(graph_.values(),
UnorderedElementsAre(graph_input_, value0_, graph_output_));
}
TEST_F(ThreeNodesModel, DeleteMiddleNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, third_node_));
EXPECT_THAT(graph_.values(),
UnorderedElementsAre(graph_input_, value1_, graph_output_));
}
TEST(Model, RemoveSimpleNodeKeepInputComplexCase) {
GraphFloat32 graph;
Node* n0 = graph.NewNode();
Node* n1 = graph.NewNode();
Node* n2 = graph.NewNode();
Value* v0 = graph.NewValue();
Value* v1 = graph.NewValue();
Value* v2 = graph.NewValue();
Value* v3 = graph.NewValue();
Value* o1 = graph.NewValue();
Value* o2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(n0->id, v0->id).ok());
ASSERT_TRUE(graph.AddConsumer(n0->id, v1->id).ok());
ASSERT_TRUE(graph.SetProducer(n0->id, o1->id).ok());
ASSERT_TRUE(graph.AddConsumer(n1->id, v1->id).ok());
ASSERT_TRUE(graph.SetProducer(n1->id, v2->id).ok());
ASSERT_TRUE(graph.AddConsumer(n2->id, v2->id).ok());
ASSERT_TRUE(graph.AddConsumer(n2->id, v3->id).ok());
ASSERT_TRUE(graph.SetProducer(n2->id, o2->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(v0, v1, v3));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(o1, o2));
EXPECT_THAT(graph.nodes(), ElementsAre(n0, n1, n2));
ASSERT_FALSE(RemoveSimpleNodeKeepOutput(&graph, n1).ok());
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph, n1).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(v0, v1, v3));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(o1, o2));
EXPECT_THAT(graph.nodes(), ElementsAre(n0, n2));
EXPECT_THAT(graph.values(), UnorderedElementsAre(v0, v1, v3, o1, o2));
EXPECT_THAT(graph.FindInputs(n0->id), ElementsAre(v0, v1));
EXPECT_THAT(graph.FindInputs(n2->id), ElementsAre(v1, v3));
}
TEST(Model, CircularDependency) {
{
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, value->id).ok());
EXPECT_FALSE(graph.SetProducer(node->id, value->id).ok());
}
{
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, value->id).ok());
EXPECT_FALSE(graph.AddConsumer(node->id, value->id).ok());
}
}
TEST(Model, ReassignValue) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, graph_output->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.FindInputs(node1->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node2->id), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(graph_input->id),
UnorderedElementsAre(node1, node2));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node2));
EXPECT_THAT(graph.FindConsumers(graph_output->id), UnorderedElementsAre());
}
TEST(Model, DeleteValue) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre(value));
ASSERT_TRUE(graph.DeleteValue(value->id).ok());
value = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre());
ASSERT_TRUE(graph.DeleteValue(graph_input->id).ok());
graph_input = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindInputs(node1->id), UnorderedElementsAre());
ASSERT_TRUE(graph.DeleteValue(graph_output->id).ok());
graph_output = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre());
EXPECT_THAT(graph.outputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node2->id), UnorderedElementsAre());
}
TEST(Model, DeleteNode) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Node* node3 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* graph_output2 = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node3->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
ASSERT_TRUE(graph.SetProducer(node3->id, graph_output2->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2, node3));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id),
UnorderedElementsAre(node2, node3));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
EXPECT_THAT(graph.FindInputs(node3->id), UnorderedElementsAre(value));
ASSERT_TRUE(graph.DeleteNode(node3->id).ok());
node3 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input, graph_output2));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
ASSERT_TRUE(graph.DeleteNode(node1->id).ok());
node1 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre(node2));
EXPECT_THAT(graph.inputs(),
UnorderedElementsAre(value, graph_output2, graph_input));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_input, graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(nullptr));
ASSERT_TRUE(graph.DeleteNode(node2->id).ok());
node2 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_output, graph_output2,
graph_input, value));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output, graph_output2,
graph_input, value));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(nullptr));
}
TEST(Model, InsertNodeAfter) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
Node* new_node1;
absl::Status status = graph.InsertNodeAfter(node1->id, &new_node1);
ASSERT_TRUE(status.ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, new_node1, node2));
Node* new_node2;
status = graph.InsertNodeAfter(100, &new_node2);
EXPECT_EQ(status.code(), absl::StatusCode::kOutOfRange);
status = graph.InsertNodeAfter(node2->id, &new_node2);
ASSERT_TRUE(status.ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, new_node1, node2, new_node2));
}
TEST(BatchMatchingTest, EmptyGraph) {
GraphFloat32 graph;
EXPECT_TRUE(CheckBatchSizeForAllValues(graph).ok());
}
TEST(BatchMatchingTest, AllMatch) {
GraphFloat32 graph;
Value* a = graph.NewValue();
Value* b = graph.NewValue();
a->tensor.shape = BHWC(1, 1, 1, 1);
b->tensor.shape = BHWC(1, 1, 1, 1);
EXPECT_TRUE(CheckBatchSizeForAllValues(graph).ok());
}
TEST(BatchMatchingTest, NotAllMatch) {
GraphFloat32 graph;
Value* a = graph.NewValue();
Value* b = graph.NewValue();
a->tensor.shape = BHWC(1, 1, 1, 1);
b->tensor.shape = BHWC(2, 1, 1, 1);
EXPECT_EQ(CheckBatchSizeForAllValues(graph).code(),
absl::StatusCode::kInvalidArgument);
}
TEST(Model, KnownGraphOutput) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output1 = graph.NewValue();
Value* graph_output2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, graph_output1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, graph_output1->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output2->id).ok());
graph.AddKnownGraphOutput(graph_output1);
graph.AddKnownGraphOutput(graph_output2);
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), ElementsAre(graph_output2, graph_output1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
338ecf87-9d13-4d10-b0ec-321ec44cbddc | cpp | tensorflow/tensorflow | full_type_inference_util | tensorflow/core/framework/full_type_inference_util.cc | tensorflow/core/framework/full_type_inference_util_test.cc | #include "tensorflow/core/framework/full_type_inference_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace full_type {
TypeInferenceFn KeepExisting() { return nullptr; }
TypeInferenceFn Tensor(FullTypeId t) {
return [t](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets) {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
ret_type.add_args()->set_type_id(TFT_TENSOR);
ret_type.mutable_args(0)->add_args()->set_type_id(t);
return ret_type;
};
}
TypeInferenceFn ReplicateInput(int i, int n) {
return [i, n](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets) {
const FullTypeDef& in_type = input_types.at(i).get();
FullTypeDef ret_type;
if (in_type.type_id() != TFT_UNSET) {
ret_type.set_type_id(TFT_PRODUCT);
for (int k = 0; k < n; k++) {
*(ret_type.add_args()) = in_type;
}
}
return ret_type;
};
}
TypeInferenceFn Merge() {
return [](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(!input_types.empty());
FullTypeDef merged;
for (int i = 0; i < input_types.size(); i++) {
const auto& t = input_types[i].get();
if (t.type_id() == TFT_UNSET) {
continue;
}
if (IsSubtype(t, merged)) {
merged = t;
continue;
}
if (IsSubtype(merged, t)) {
continue;
}
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected compatible input types, but input ",
i, ":\n", t.DebugString(),
" is neither a subtype nor a supertype of the "
"combined inputs preceding it:\n",
merged.DebugString()));
}
FullTypeDef ret_type;
if (merged.type_id() != TFT_UNSET) {
ret_type.set_type_id(TFT_PRODUCT);
*(ret_type.add_args()) = merged;
}
return ret_type;
};
}
TypeInferenceFn Encode(FullTypeId t, int i) {
return [t, i](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= i);
FullTypeDef ret_type;
const FullTypeDef& in_t = input_types[i].get();
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
ret_type.set_type_id(TFT_PRODUCT);
auto* enc_type = ret_type.add_args();
enc_type->set_type_id(TFT_ENCODED);
*enc_type->add_args() = in_t;
enc_type->add_args()->set_type_id(t);
return ret_type;
};
}
TypeInferenceFn Decode(FullTypeId t, int i) {
return [t, i](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= i);
const FullTypeDef& in_t = input_types[i].get();
const FullTypeId enc_tid = GetArgDefaultUnset(in_t, 1).type_id();
if ((enc_tid != TFT_UNSET) && (enc_tid != t)) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected encoded type ", t, " for input ", i,
", got ", in_t.DebugString()));
}
FullTypeDef ret_type;
const FullTypeDef& out_t = GetArgDefaultUnset(in_t, 0);
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
ret_type.set_type_id(TFT_PRODUCT);
*ret_type.add_args() = out_t;
return ret_type;
};
}
TypeInferenceFn UnaryContainerCreate(FullTypeId t, int element_idx) {
return [t, element_idx](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= element_idx);
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* arg_t = ret_type.add_args();
arg_t->set_type_id(t);
*(arg_t->add_args()) = input_types[element_idx].get();
return ret_type;
};
}
TypeInferenceFn UnaryContainerAdd(FullTypeId t, int container_idx,
int element_idx, bool homogeneous) {
return [t, container_idx, element_idx, homogeneous](
const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK(input_types.size() >= container_idx);
DCHECK(input_types.size() >= element_idx);
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* cont_t = ret_type.add_args();
cont_t->set_type_id(t);
const FullTypeDef& in_cont_t = input_types[container_idx].get();
const FullTypeDef& in_el_t = input_types[element_idx].get();
if (in_cont_t.type_id() != TFT_UNSET) {
if (in_cont_t.type_id() != t) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("expected container type ", t, " for input ",
container_idx, ", got ", in_cont_t.DebugString()));
}
*cont_t = in_cont_t;
}
VLOG(1) << "ContainerAddUnary: " << cont_t->DebugString() << ", "
<< in_el_t.DebugString() << ", " << container_idx << "; "
<< element_idx;
for (const auto& tmp : input_types) {
VLOG(1) << " input: " << tmp.get().DebugString();
}
if (in_el_t.type_id() == TFT_UNSET) {
return ret_type;
}
const FullTypeDef& el_t = GetArgDefaultUnset(*cont_t, 0);
if (el_t.type_id() == TFT_UNSET) {
cont_t->clear_args();
*(cont_t->add_args()) = in_el_t;
return ret_type;
}
if (IsSubtype(in_el_t, el_t)) {
return ret_type;
}
if (homogeneous) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected a subtype of ", el_t.DebugString(),
" for input ", element_idx,
" of a homogeneous container ", t, ", got ",
in_el_t.DebugString()));
} else {
return Status(
absl::StatusCode::kUnimplemented,
absl::StrCat("need union types for heterogeneous containers.\n"
"A homogeneous container would expect a subtype of ",
el_t.DebugString(), " for input ", element_idx,
", but got ", in_el_t.DebugString()));
}
};
}
TypeInferenceFn MultiaryUnstack(
FullTypeId t, std::function<FullTypeDef(const FullTypeDef&)> unstack) {
return [t, unstack](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* cont_t = ret_type.add_args();
cont_t->set_type_id(t);
FullTypeDef* el_t = cont_t->add_args();
el_t->set_type_id(TFT_PRODUCT);
for (int element_idx = 0; element_idx < input_types.size(); ++element_idx) {
*(el_t->add_args()) = unstack(input_types[element_idx].get());
}
return ret_type;
};
}
FullTypeDef UnstackTensor(const FullTypeDef& t) {
DCHECK((t.type_id() == TFT_TENSOR) || (t.type_id() == TFT_RAGGED) ||
(t.type_id() == TFT_UNSET));
DCHECK_LE(t.args_size(), 1);
return t;
}
TypeInferenceFn ContainerMap(
FullTypeId t, int input_idx,
std::function<FullTypeDef(const FullTypeDef&)> map) {
return [t, input_idx, map](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK_GE(input_types.size(), input_idx);
const FullTypeDef& in_cont_t = input_types.at(input_idx).get();
FullTypeDef ret_type;
if (in_cont_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_cont_t.type_id() != t) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected type ", t, " for input ", input_idx,
", got ", in_cont_t.DebugString()));
}
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* out_cont_t = ret_type.add_args();
out_cont_t->set_type_id(t);
const FullTypeDef& in_el_t = GetArgDefaultUnset(in_cont_t, 0);
if (in_el_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_el_t.type_id() != TFT_PRODUCT) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected PRODUCT element type for input ",
input_idx, ", got ", in_el_t.DebugString()));
}
FullTypeDef* out_el_t = out_cont_t->add_args();
out_el_t->set_type_id(TFT_PRODUCT);
for (int k = 0; k < in_el_t.args_size(); k++) {
*(out_el_t->add_args()) = map(in_el_t.args(k));
}
return ret_type;
};
}
TypeInferenceFn MapCovariant(FullTypeId t, FullTypeId u, int input_idx) {
return
[t, u, input_idx](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
DCHECK_GE(input_types.size(), input_idx);
const FullTypeDef& in_t = input_types.at(input_idx).get();
FullTypeDef ret_type;
if (in_t.type_id() == TFT_UNSET) {
return ret_type;
}
if (in_t.type_id() != t) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("expected type ", t, " for input ",
input_idx, ", got ", in_t.DebugString()));
}
ret_type.set_type_id(TFT_PRODUCT);
FullTypeDef* t = ret_type.add_args();
t->set_type_id(u);
*t->mutable_args() = in_t.args();
return ret_type;
};
}
TypeInferenceFn FunctionCall(const string& func_attr_name) {
return [func_attr_name](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
return infer_function_rets(func_attr_name, input_types);
};
}
TypeInferenceFn Tuple(const std::vector<TypeInferenceFn>& func_list) {
return [func_list](const TypeRefVector& input_types,
const FunctionTypeInferrer& infer_function_rets)
-> absl::StatusOr<FullTypeDef> {
FullTypeDef ret_type;
ret_type.set_type_id(TFT_PRODUCT);
for (const auto& func : func_list) {
const auto& status_or_t = func(input_types, infer_function_rets);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
status_or_t.status(),
absl::StrCat("for Tuple type infernce function ",
ret_type.args_size()));
const FullTypeDef& t = status_or_t.value();
if (t.type_id() == TFT_UNSET) {
VLOG(1) << "For Tuple type inference function, function "
<< ret_type.args_size() << " is unset.";
FullTypeDef unset_type;
return unset_type;
}
if (t.type_id() != TFT_PRODUCT) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("for Tuple type inference function, expected result "
"of type inference function ",
ret_type.args_size(),
" to start with TFT_PRODUCT not ", t.DebugString()));
}
for (int i = 0; i < t.args_size(); i++) {
*(ret_type.add_args()) = t.args(i);
}
}
return ret_type;
};
}
FullTypeDef BatchTensor(const FullTypeDef& t) {
return t;
}
FullTypeDef ShardTensor(const FullTypeDef& t) {
return t;
}
}
} | #include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace full_type {
namespace {
TEST(ReplicateInput, Default) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = ReplicateInput()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, Duplicate) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = ReplicateInput(0, 2)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(rt.args(1).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, FirstOfMultipleArgs) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = ReplicateInput(0, 2)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(rt.args(1).type_id(), TFT_ARRAY);
}
TEST(ReplicateInput, SecondOfMultipleArgs) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = ReplicateInput(1, 2)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(rt.args(1).type_id(), TFT_TENSOR);
}
TEST(ReplicateInput, Unset) {
FullTypeDef t;
const auto ret = ReplicateInput()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
TEST(Merge, Single) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = Merge()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(Merge, Double) {
FullTypeDef t;
t.set_type_id(TFT_ARRAY);
const auto ret = Merge()({t, t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
}
TEST(Merge, Unset) {
FullTypeDef t;
t.set_type_id(TFT_UNSET);
const auto ret = Merge()({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
TEST(Merge, UnsetComponents) {
FullTypeDef t1;
FullTypeDef t2;
const auto ret = Merge()({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
}
void ExpectInferredArrayOfTensor(absl::StatusOr<FullTypeDef> ret) {
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(Merge, RejectsMismatched) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret = Merge()({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected compatible input types"));
}
TEST(Merge, UsesPartialInfo) {
FullTypeDef t1;
FullTypeDef t2;
t2.set_type_id(TFT_ARRAY);
t2.add_args()->set_type_id(TFT_TENSOR);
ExpectInferredArrayOfTensor(Merge()({t1, t2}, {}));
ExpectInferredArrayOfTensor(Merge()({t2, t1}, {}));
}
TEST(Merge, SelectsMostSpecificOfSubtypes) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_ARRAY);
t2.add_args()->set_type_id(TFT_TENSOR);
ExpectInferredArrayOfTensor(Merge()({t1, t2}, {}));
ExpectInferredArrayOfTensor(Merge()({t2, t1}, {}));
}
TEST(UnaryContainerCreate, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_ANY);
const auto ret = UnaryContainerCreate(TFT_ARRAY, 1)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_ARRAY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 2, 1,
false)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, RejectsMismatchedContainerType) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 1, 0,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected container type"));
}
TEST(UnaryContainerAdd, IgnoresUnsetContainerType) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
FullTypeDef t2;
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 1, 0,
false)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, UnsetElementTypeRemainsUnset) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef t2;
FullTypeDef t3;
t3.set_type_id(TFT_ARRAY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 2, 1,
false)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 0);
}
TEST(UnaryContainerAdd, UnsetElementTypeKeepsOriginalElementType) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(UnaryContainerAdd, KeepsContainerTypeIfElementIsSubtype) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_ANY);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_ARRAY);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_ANY);
}
TEST(UnaryContainerAdd, RejectsMismatchedElementTypesHeterogenous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("need union types"));
}
TEST(UnaryContainerAdd, RejectsMismatchedElementTypesHomogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_DATASET);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected a subtype"));
}
TEST(UnaryContainerAdd, RejectsSupertypeElementTypeHeterogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_ANY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
false)({t1, t2}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("need union types"));
}
TEST(UnaryContainerAdd, RejectsSupertypeElementTypeHomogeneous) {
FullTypeDef t1;
t1.set_type_id(TFT_ARRAY);
t1.add_args()->set_type_id(TFT_TENSOR);
FullTypeDef t2;
t2.set_type_id(TFT_ANY);
const auto ret =
UnaryContainerAdd(TFT_ARRAY, 0, 1,
true)({t1, t2}, {});
EXPECT_THAT(ret.status().message(),
::testing::HasSubstr("expected a subtype"));
}
TEST(MultiaryUnstack, Basic) {
FullTypeDef t1;
t1.set_type_id(TFT_TENSOR);
const auto ret = MultiaryUnstack(TFT_DATASET, UnstackTensor)({t1}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(MultiaryUnstack, Ternary) {
FullTypeDef t1;
t1.set_type_id(TFT_RAGGED);
t1.add_args()->set_type_id(TFT_STRING);
FullTypeDef t2;
t2.set_type_id(TFT_TENSOR);
FullTypeDef t3;
t3.set_type_id(TFT_RAGGED);
t3.add_args()->set_type_id(TFT_INT64);
const auto ret =
MultiaryUnstack(TFT_DATASET, UnstackTensor)({t1, t2, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 3);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).args(0).type_id(), TFT_STRING);
ASSERT_EQ(rt.args(0).args(0).args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args(0).args(2).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(2).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(2).args(0).type_id(), TFT_INT64);
}
TEST(MapContainer, Basic) {
FullTypeDef cont_t;
cont_t.set_type_id(TFT_DATASET);
FullTypeDef* el_t = cont_t.add_args();
el_t->set_type_id(TFT_PRODUCT);
(el_t->add_args())->set_type_id(TFT_TENSOR);
const auto ret = ContainerMap(TFT_DATASET, 0, BatchTensor)({cont_t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_TENSOR);
}
TEST(MapContainer, Ternary) {
FullTypeDef t1;
t1.set_type_id(TFT_ANY);
FullTypeDef cont_t;
cont_t.set_type_id(TFT_DATASET);
FullTypeDef* el_t = cont_t.add_args();
el_t->set_type_id(TFT_PRODUCT);
FullTypeDef* e1 = el_t->add_args();
e1->set_type_id(TFT_RAGGED);
e1->add_args()->set_type_id(TFT_STRING);
FullTypeDef* e2 = el_t->add_args();
e2->set_type_id(TFT_TENSOR);
FullTypeDef* e3 = el_t->add_args();
e3->set_type_id(TFT_RAGGED);
e3->add_args()->set_type_id(TFT_INT64);
FullTypeDef t3;
t3.set_type_id(TFT_ANY);
const auto ret =
ContainerMap(TFT_DATASET, 1, BatchTensor)({t1, cont_t, t3}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args(0).args(0).args_size(), 3);
ASSERT_EQ(rt.args(0).args(0).args(0).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(0).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(0).args(0).type_id(), TFT_STRING);
ASSERT_EQ(rt.args(0).args(0).args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args(0).args(2).type_id(), TFT_RAGGED);
ASSERT_EQ(rt.args(0).args(0).args(2).args_size(), 1);
ASSERT_EQ(rt.args(0).args(0).args(2).args(0).type_id(), TFT_INT64);
}
TEST(MapCovariant, Basic) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
ASSERT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 1);
EXPECT_EQ(rt.args(0).type_id(), TFT_DATASET);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(rt.args(0).args(0).args_size(), 0);
}
TEST(MapCovariant, IgnoresUnset) {
FullTypeDef t;
t.set_type_id(TFT_UNSET);
const auto ret = MapCovariant(TFT_TENSOR, TFT_DATASET, 0)({t}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
ASSERT_EQ(rt.args_size(), 0);
}
TEST(MapCovariant, RejectsMismatchedType) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
const auto ret = MapCovariant(TFT_ARRAY, TFT_DATASET, 0)({t}, {});
EXPECT_THAT(ret.status().message(), ::testing::HasSubstr("expected type"));
}
static TypeInferenceFn tuple_func() {
std::vector<TypeInferenceFn> func_list{ReplicateInput(), Tensor(TFT_INT32)};
return Tuple(func_list);
}
TEST(Tuple, Basic) {
const TypeInferenceFn ret_func = tuple_func();
FullTypeDef t_in;
t_in.set_type_id(TFT_TENSOR);
t_in.add_args()->set_type_id(TFT_FLOAT);
const auto ret = ret_func({t_in}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_PRODUCT);
ASSERT_EQ(rt.args_size(), 2);
EXPECT_EQ(rt.args(0).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(0).args_size(), 1);
EXPECT_EQ(rt.args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(rt.args(1).type_id(), TFT_TENSOR);
ASSERT_EQ(rt.args(1).args_size(), 1);
EXPECT_EQ(rt.args(1).args(0).type_id(), TFT_INT32);
}
TEST(Tuple, Unset) {
const TypeInferenceFn ret_func = tuple_func();
FullTypeDef t_in;
const auto ret = ret_func({t_in}, {});
TF_EXPECT_OK(ret.status());
const FullTypeDef& rt = ret.value();
EXPECT_EQ(rt.type_id(), TFT_UNSET);
ASSERT_EQ(rt.args_size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_inference_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_inference_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d12e3e48-452a-40e4-a3c4-d4b35d7d3659 | cpp | tensorflow/tensorflow | ops_util | tensorflow/core/framework/ops_util.cc | tensorflow/core/kernels/ops_util_test.cc | #include <algorithm>
#include <cmath>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/util/padding.h"
namespace tensorflow {
Eigen::PaddingType BrainPadding2EigenPadding(Padding padding) {
switch (padding) {
case Padding::VALID:
return Eigen::PADDING_VALID;
case Padding::SAME:
return Eigen::PADDING_SAME;
case Padding::EXPLICIT:
LOG(FATAL) << "Eigen does not have explicit padding enum "
"value";
}
return Eigen::PADDING_SAME;
}
Status GetBroadcastSize(const int index, const int in_size, const int ksize,
const int stride, const int pad_size, int* bindex,
int* bsize) {
if (index * stride > in_size) {
return errors::InvalidArgument(
"index * stride must be less than or equal to input size");
}
*bindex = index * stride;
*bsize = ksize;
if (*bindex < pad_size) {
*bsize = ksize + *bindex - pad_size;
*bindex = 0;
} else {
*bindex -= pad_size;
}
if (*bindex + ksize > in_size) {
*bsize = std::min((in_size - *bindex), ksize);
}
return absl::OkStatus();
}
string SanitizeThreadSuffix(string suffix) {
string clean;
for (int i = 0; i < suffix.size(); ++i) {
const char ch = suffix[i];
if ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') || ch == '_' || ch == '-') {
clean += ch;
} else {
clean += '_';
}
}
return clean;
}
} | #include "tensorflow/core/kernels/ops_util.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/kernel_shape_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class OpsUtilTest : public ::testing::Test {
protected:
OpsUtilTest() {}
~OpsUtilTest() override {}
struct padding_struct {
struct {
int in_height;
int in_width;
int filter_height;
int filter_width;
int row_stride;
int col_stride;
Padding padding;
} input;
struct {
int new_height;
int new_width;
int pad_top;
int pad_bottom;
int pad_left;
int pad_right;
} output;
};
struct bcast_struct {
struct {
int index;
int in_size;
int ksize;
int stride;
int pad_size;
} input;
struct {
int new_index;
int new_size;
} output;
};
static void VerifyGet2dOutputSizeBoundaries(padding_struct pad_struct,
error::Code code) {
int64_t new_height, new_width, pad_rows, pad_cols;
Status status = GetWindowedOutputSize(
pad_struct.input.in_height, pad_struct.input.filter_height,
1, pad_struct.input.row_stride,
pad_struct.input.padding, &new_height, &pad_rows);
EXPECT_EQ(status.code(), code) << status;
status = GetWindowedOutputSize(
pad_struct.input.in_width, pad_struct.input.filter_width,
1, pad_struct.input.col_stride,
pad_struct.input.padding, &new_width, &pad_cols);
EXPECT_EQ(status.code(), code) << status;
}
static void VerifyGet2dOutputSizeValues(padding_struct pad_struct,
error::Code code) {
int64_t new_height, new_width, pad_rows, pad_cols;
Status status = GetWindowedOutputSize(
pad_struct.input.in_height, pad_struct.input.filter_height,
1, pad_struct.input.row_stride,
pad_struct.input.padding, &new_height, &pad_rows);
EXPECT_EQ(status.code(), code) << status;
status = GetWindowedOutputSize(
pad_struct.input.in_width, pad_struct.input.filter_width,
1, pad_struct.input.col_stride,
pad_struct.input.padding, &new_width, &pad_cols);
EXPECT_EQ(status.code(), code) << status;
EXPECT_EQ(pad_struct.output.new_height, new_height);
EXPECT_EQ(pad_struct.output.new_width, new_width);
EXPECT_EQ(pad_struct.output.pad_top, pad_rows);
EXPECT_EQ(pad_struct.output.pad_left, pad_cols);
}
static void VerifyGet2dOutputVerboseSizeValues(padding_struct pad_struct,
error::Code code) {
int64_t new_height, new_width, pad_top, pad_bottom, pad_left, pad_right;
Status status = GetWindowedOutputSizeVerbose(
pad_struct.input.in_height, pad_struct.input.filter_height,
1, pad_struct.input.row_stride,
pad_struct.input.padding, &new_height, &pad_top, &pad_bottom);
EXPECT_EQ(status.code(), code) << status;
status = GetWindowedOutputSizeVerbose(
pad_struct.input.in_width, pad_struct.input.filter_width,
1, pad_struct.input.col_stride,
pad_struct.input.padding, &new_width, &pad_left, &pad_right);
EXPECT_EQ(status.code(), code) << status;
EXPECT_EQ(pad_struct.output.new_height, new_height);
EXPECT_EQ(pad_struct.output.new_width, new_width);
EXPECT_EQ(pad_struct.output.pad_top, pad_top);
EXPECT_EQ(pad_struct.output.pad_bottom, pad_bottom);
EXPECT_EQ(pad_struct.output.pad_left, pad_left);
EXPECT_EQ(pad_struct.output.pad_right, pad_right);
}
static void VerifyBoundaries(bcast_struct bcast, error::Code code) {
int new_index, new_size;
Status status = GetBroadcastSize(
bcast.input.index, bcast.input.in_size, bcast.input.ksize,
bcast.input.stride, bcast.input.pad_size, &new_index, &new_size);
EXPECT_EQ(status.code(), code) << status;
}
static void VerifyBcastValues(bcast_struct bcast) {
int new_index, new_size;
EXPECT_EQ(absl::OkStatus(),
GetBroadcastSize(bcast.input.index, bcast.input.in_size,
bcast.input.ksize, bcast.input.stride,
bcast.input.pad_size, &new_index, &new_size));
EXPECT_EQ(bcast.output.new_index, new_index);
EXPECT_EQ(bcast.output.new_size, new_size);
}
};
TEST_F(OpsUtilTest, Get2dOutputSizeNegativeSizeTest) {
padding_struct pad_struct = {{1, 1, 3, 3, 1, 1, VALID}, {-1, -1, 0, 0, 0, 0}};
VerifyGet2dOutputSizeBoundaries(pad_struct, error::INVALID_ARGUMENT);
}
TEST_F(OpsUtilTest, Get2dOutputSizeSquareFilterTest) {
padding_struct pad_struct1 = {{3, 3, 2, 2, 2, 2, SAME}, {2, 2, 0, 0, 0, 0}};
padding_struct pad_struct2 = {{3, 3, 2, 2, 2, 2, VALID}, {1, 1, 0, 0, 0, 0}};
VerifyGet2dOutputSizeValues(pad_struct1, error::OK);
VerifyGet2dOutputSizeValues(pad_struct2, error::OK);
}
TEST_F(OpsUtilTest, Get2dOutputSizeNonSquareFilterTest) {
padding_struct pad_struct1 = {{4, 5, 1, 2, 1, 1, SAME}, {4, 5, 0, 0, 0, 0}};
padding_struct pad_struct2 = {{4, 5, 1, 2, 1, 1, VALID}, {4, 4, 0, 0, 0, 0}};
VerifyGet2dOutputSizeValues(pad_struct1, error::OK);
VerifyGet2dOutputSizeValues(pad_struct2, error::OK);
}
TEST_F(OpsUtilTest, Get2dOutputSizeUnevenStrideTest) {
padding_struct pad_struct1 = {{4, 4, 2, 2, 1, 2, VALID}, {3, 2, 0, 0, 0, 0}};
padding_struct pad_struct2 = {{4, 4, 2, 2, 2, 1, VALID}, {2, 3, 0, 0, 0, 0}};
VerifyGet2dOutputSizeValues(pad_struct1, error::OK);
VerifyGet2dOutputSizeValues(pad_struct2, error::OK);
}
TEST_F(OpsUtilTest, Get2dOutputSizeVerbose) {
padding_struct pad_struct1 = {{3, 3, 2, 2, 2, 2, SAME}, {2, 2, 0, 1, 0, 1}};
padding_struct pad_struct2 = {{3, 3, 2, 2, 2, 2, VALID}, {1, 1, 0, 0, 0, 0}};
VerifyGet2dOutputVerboseSizeValues(pad_struct1, error::OK);
VerifyGet2dOutputVerboseSizeValues(pad_struct2, error::OK);
}
TEST_F(OpsUtilTest, GetBroadcastTestBadIndex) {
bcast_struct bcast = {{2, 3, 1, 2, 0}, {0, 3}};
VerifyBoundaries(bcast, error::INVALID_ARGUMENT);
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_1_0) {
bcast_struct bcast[] = {
{{0, 3, 3, 1, 0}, {0, 3}},
{{1, 3, 3, 1, 0}, {1, 2}},
{{2, 3, 3, 1, 0}, {2, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_1_1) {
bcast_struct bcast[] = {
{{0, 3, 3, 1, 1}, {0, 2}},
{{1, 3, 3, 1, 1}, {0, 3}},
{{2, 3, 3, 1, 1}, {1, 2}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_1_2) {
bcast_struct bcast[] = {
{{0, 3, 3, 1, 2}, {0, 1}},
{{1, 3, 3, 1, 2}, {0, 2}},
{{2, 3, 3, 1, 2}, {0, 3}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_2_0) {
bcast_struct bcast[] = {
{{0, 3, 3, 2, 0}, {0, 3}},
{{1, 3, 3, 2, 0}, {2, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_2_1) {
bcast_struct bcast[] = {
{{0, 3, 3, 2, 1}, {0, 2}},
{{1, 3, 3, 2, 1}, {1, 2}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_2_2) {
bcast_struct bcast[] = {
{{0, 3, 3, 2, 2}, {0, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_3_0) {
bcast_struct bcast[] = {
{{0, 3, 3, 3, 0}, {0, 3}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_3_1) {
bcast_struct bcast[] = {
{{0, 3, 3, 3, 1}, {0, 2}},
{{1, 3, 3, 3, 1}, {2, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_3_3_2) {
bcast_struct bcast[] = {
{{0, 3, 3, 3, 2}, {0, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_1_2_0) {
bcast_struct bcast[] = {
{{0, 3, 1, 2, 0}, {0, 1}},
{{1, 3, 1, 2, 0}, {2, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_2_3_0) {
bcast_struct bcast[] = {
{{0, 3, 2, 3, 0}, {0, 2}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, GetBroadcastTest3_2_3_1) {
bcast_struct bcast[] = {
{{0, 3, 2, 3, 1}, {0, 1}},
{{1, 3, 2, 3, 1}, {2, 1}},
};
for (size_t i = 0; i < sizeof(bcast) / sizeof(bcast[0]); ++i) {
VerifyBcastValues(bcast[i]);
}
}
TEST_F(OpsUtilTest, SanitizeThreadSuffix) {
EXPECT_EQ("_aBc123_-___", SanitizeThreadSuffix("/aBc123_- /"));
}
TEST_F(OpsUtilTest, Aligned1DSlice) {
#if EIGEN_MAX_ALIGN_BYTES == 0
Tensor t(DT_FLOAT, TensorShape({3}));
int64 start = 0;
int64 end = 1;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, true);
#else
Tensor t(DT_FLOAT, TensorShape({EIGEN_MAX_ALIGN_BYTES * 2}));
int64_t start = 0;
int64_t end = EIGEN_MAX_ALIGN_BYTES;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, true);
Tensor sliced;
CHECK(sliced.CopyFrom(t.Slice(start, end), TensorShape({end - start})));
EXPECT_EQ(sliced.IsAligned(), true);
#endif
}
#if EIGEN_MAX_ALIGN_BYTES > 0
TEST_F(OpsUtilTest, Misaligned1DSlice) {
Tensor t(DT_FLOAT, TensorShape({EIGEN_MAX_ALIGN_BYTES * 2}));
int64_t start = 1;
int64_t end = EIGEN_MAX_ALIGN_BYTES + 1;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, false);
Tensor sliced;
CHECK(sliced.CopyFrom(t.Slice(start, end), TensorShape({end - start})));
EXPECT_EQ(sliced.IsAligned(), false);
}
#endif
TEST_F(OpsUtilTest, Aligned2DSliceOfDim0) {
#if EIGEN_MAX_ALIGN_BYTES == 0
Tensor t(DT_FLOAT, TensorShape({3, 4}));
int64 start = 1;
int64 end = 2;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, true);
#else
int64_t inner_dim_size = EIGEN_MAX_ALIGN_BYTES;
Tensor t(DT_FLOAT, TensorShape({3, inner_dim_size}));
int64_t start = 1;
int64_t end = 2;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, true);
Tensor sliced;
CHECK(sliced.CopyFrom(t.Slice(start, end), TensorShape({1, inner_dim_size})));
EXPECT_EQ(sliced.IsAligned(), true);
#endif
}
#if EIGEN_MAX_ALIGN_BYTES > 0
TEST_F(OpsUtilTest, Misaligned2DSliceOfDim0) {
int64_t inner_dim_size = EIGEN_MAX_ALIGN_BYTES + 1;
Tensor t(DT_FLOAT, TensorShape({3, inner_dim_size}));
int64_t start = 1;
int64_t end = 2;
bool output = IsDim0SliceAligned<float>(t.shape(), start, end);
EXPECT_EQ(output, false);
Tensor sliced;
CHECK(sliced.CopyFrom(t.Slice(start, end), TensorShape({1, inner_dim_size})));
EXPECT_EQ(sliced.IsAligned(), false);
}
#endif
TEST_F(OpsUtilTest, MisalignedEmptyShape) {
TensorShape shape({});
int64_t start = 1;
int64_t end = 2;
bool output = IsDim0SliceAligned<float>(shape, start, end);
EXPECT_EQ(output, false);
}
TEST_F(OpsUtilTest, MisalignedEmptyDim0) {
TensorShape shape({0, 1, 2});
int64_t start = 0;
int64_t end = 1;
bool output = IsDim0SliceAligned<float>(shape, start, end);
EXPECT_EQ(output, false);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/ops_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ops_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
862e5cf8-a705-43d3-a4f0-b68033bd09fe | cpp | tensorflow/tensorflow | tensor_matcher | tensorflow/core/framework/tensor_matcher.cc | tensorflow/lite/experimental/shlo/tensor_matcher_test.cc | #include "tensorflow/core/framework/tensor_matcher.h"
#include <stdint.h>
#include <complex>
#include <ostream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace test {
namespace {
using tensorflow::Tensor;
template <typename T>
::testing::Matcher<absl::Span<const T>> MakePointwiseMatcher(
absl::Span<const T> target) {
return ::testing::MatcherCast<absl::Span<const T>>(
::testing::Pointwise(::testing::Eq(), target));
}
template <>
::testing::Matcher<absl::Span<const float>> MakePointwiseMatcher(
absl::Span<const float> target) {
return ::testing::MatcherCast<absl::Span<const float>>(
::testing::Pointwise(::testing::FloatEq(), target));
}
template <>
::testing::Matcher<absl::Span<const double>> MakePointwiseMatcher(
absl::Span<const double> target) {
return ::testing::MatcherCast<absl::Span<const double>>(
::testing::Pointwise(::testing::DoubleEq(), target));
}
template <typename T>
bool MatchAndExplainPointwise(absl::Span<const T> value,
absl::Span<const T> target,
::testing::MatchResultListener* listener) {
return MakePointwiseMatcher<T>(target).MatchAndExplain(value, listener);
}
class TensorEqMatcherImpl : public ::testing::MatcherInterface<const Tensor&> {
public:
explicit TensorEqMatcherImpl(const Tensor& target) : target_(target) {}
void DescribeTo(::std::ostream* os) const override {
*os << "data type is " << tensorflow::DataTypeString(target_.dtype())
<< ", and shape is " << target_.shape();
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
*os << ", and tensor data "; \
absl::Span<const T> data(target_.unaligned_flat<T>()); \
MakePointwiseMatcher<T>(data).DescribeTo(os); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
}
}
}
void DescribeNegationTo(::std::ostream* os) const override {
*os << "data type is not " << tensorflow::DataTypeString(target_.dtype())
<< ", or shape is not " << target_.shape();
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
*os << ", or tensor data "; \
absl::Span<const T> data(target_.unaligned_flat<T>()); \
MakePointwiseMatcher<T>(data).DescribeNegationTo(os); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
}
}
}
bool MatchAndExplain(
const Tensor& value,
::testing::MatchResultListener* listener) const override {
const bool dtype_compare = value.dtype() == target_.dtype();
*listener << "whose data type " << tensorflow::DataTypeString(value.dtype())
<< (dtype_compare ? " matches " : " doesn't match ")
<< tensorflow::DataTypeString(target_.dtype());
const bool shape_compare = value.shape() == target_.shape();
*listener << ", whose shape " << value.shape()
<< (shape_compare ? " matches " : " doesn't match ")
<< target_.shape();
if (!dtype_compare || !shape_compare) {
return false;
}
bool result;
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
result = MatchAndExplainPointwise<T>( \
value.unaligned_flat<T>(), target_.unaligned_flat<T>(), listener); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
TF_CALL_QUANTIZED_TYPES(CASE_TYPE);
TF_CALL_int4(CASE_TYPE);
TF_CALL_uint4(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
result = false;
}
}
return result;
}
private:
const Tensor target_;
};
}
TensorEq::operator ::testing::Matcher<const Tensor&>() const {
return ::testing::MakeMatcher(new TensorEqMatcherImpl(target_));
}
}
} | #include "tensorflow/lite/experimental/shlo/tensor_matcher.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor_with_data.h"
namespace shlo_ref {
namespace {
using ::shlo_ref::testing::TensorEq;
using ::testing::Not;
TEST(TensorMatcherTest, Eq) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), TensorEq(rhs.tensor()));
}
TEST(TensorMatcherTest, NotEqQuantized) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8, DataType::kF32>(
Shape{{1, 3}}, {.5f, 1.0f, 1.5f}, 0.1, 0);
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqType) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI32>(Shape{{1, 3}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqShape) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{3, 1}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqData) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 11, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_matcher.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/tensor_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241482d4-5593-45b6-9f17-7c5376ad9853 | cpp | tensorflow/tensorflow | tensor_shape | tensorflow/core/framework/tensor_shape.cc | tensorflow/core/framework/tensor_shape_test.cc | #include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
static_assert(sizeof(TensorShapeRep) == sizeof(TensorShape),
"TensorShape must have no fields beyond TensorShapeRep");
static_assert(sizeof(TensorShapeRep) == sizeof(PartialTensorShape),
"PartialTensorShape must have no fields beyond TensorShapeRep");
template <class Shape>
static void AppendTo(const TensorShapeBase<Shape>& s,
absl::InlinedVector<int64, 8UL>* vals) {
for (auto dim : s) {
vals->push_back(dim.size);
}
}
void TensorShape::CheckDimsEqual(int NDIMS) const {
CHECK_EQ(NDIMS, dims()) << "Asking for tensor of " << NDIMS << " dimensions"
<< " from a tensor of " << dims() << " dimensions";
}
void TensorShape::CheckDimsAtMost(int NDIMS) const {
CHECK_GE(NDIMS, dims()) << "Asking for tensor of at most " << NDIMS
<< " dimensions from a tensor of " << dims()
<< " dimensions";
}
template <class Shape>
bool TensorShapeBase<Shape>::IsValid() {
if (kIsPartial && unknown_rank()) return dims() == 0;
int64_t num_elements = 1;
if (dims() > MaxDimensions()) return false;
for (auto d : dim_sizes()) {
if (d < (kIsPartial ? -1 : 0)) return false;
if (d == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d);
if (num_elements < 0) return false;
}
}
return true;
}
template <class Shape>
bool TensorShapeBase<Shape>::IsValid(const TensorShapeProto& proto) {
if (kIsPartial && proto.unknown_rank()) return proto.dim_size() == 0;
int64_t num_elements = 1;
if (proto.dim().size() > MaxDimensions()) return false;
for (const auto& d : proto.dim()) {
if (d.size() < (kIsPartial ? -1 : 0)) return false;
if (d.size() == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d.size());
if (num_elements < 0) return false;
}
}
return true;
}
template <class Shape>
Status TensorShapeBase<Shape>::IsValidShape(const TensorShapeProto& proto) {
if (kIsPartial && proto.unknown_rank()) {
if (proto.dim_size() > 0) {
return errors::InvalidArgument(
"An unknown shape must not have any dimensions set.");
}
return absl::OkStatus();
}
int64_t num_elements = 1;
if (proto.dim().size() > MaxDimensions()) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" has too many dimensions");
}
for (const auto& d : proto.dim()) {
if (d.size() < (kIsPartial ? -1 : 0)) {
if (kIsPartial) {
return errors::InvalidArgument(
"Shape ", DebugString(proto),
" has dimensions with values below -1 (where -1 means unknown)");
} else {
return errors::InvalidArgument("Shape ", DebugString(proto),
" is not fully defined");
}
}
if (d.size() == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d.size());
if (num_elements < 0) {
return errors::InvalidArgument(
"Shape ", DebugString(proto),
" is too large (more than 2**63 - 1 entries)");
}
}
}
return absl::OkStatus();
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase(const TensorShapeProto& proto) {
set_tag(REP16);
set_data_type(DT_INVALID);
if (kIsPartial && proto.unknown_rank()) {
set_ndims_byte(kUnknownRank);
set_num_elements(-1);
} else {
set_ndims_byte(0);
set_num_elements(1);
for (const auto& d : proto.dim()) {
AddDim(d.size());
}
}
}
template <class Shape>
Status TensorShapeBase<Shape>::BuildTensorShapeBase(
const TensorShapeProto& proto, TensorShapeBase* out) {
out->set_tag(REP16);
out->set_data_type(DT_INVALID);
if (kIsPartial && proto.unknown_rank()) {
out->set_ndims_byte(kUnknownRank);
out->set_num_elements(-1);
} else {
out->set_ndims_byte(0);
out->set_num_elements(1);
int64_t num_elements_excluding_zero_dims = 1;
Status s = absl::OkStatus();
for (const auto& d : proto.dim()) {
s = out->AddDimWithStatus(d.size());
if (!s.ok()) {
return s;
}
if (d.size() > 0) {
num_elements_excluding_zero_dims =
MultiplyWithoutOverflow(num_elements_excluding_zero_dims, d.size());
if (TF_PREDICT_FALSE(num_elements_excluding_zero_dims < 0)) {
return errors::InvalidArgument(
"Encountered overflow when multiplying shape dimensions");
}
}
}
}
return absl::OkStatus();
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase(absl::Span<const int64_t> dim_sizes) {
set_tag(REP16);
set_data_type(DT_INVALID);
TF_CHECK_OK(InitDims(dim_sizes));
}
template <class Shape>
Status TensorShapeBase<Shape>::BuildTensorShapeBase(
absl::Span<const int64_t> dim_sizes, TensorShapeBase* out) {
out->set_tag(REP16);
out->set_data_type(DT_INVALID);
return out->InitDims(dim_sizes);
}
static inline bool Set16(bool partial, uint16* dst, int dim, int64_t val) {
if (partial) {
if (val < 0) {
dst[dim] = std::numeric_limits<uint16>::max();
return true;
}
}
dst[dim] = val;
return false;
}
template <class Shape>
Status TensorShapeBase<Shape>::InitDims(absl::Span<const int64_t> dim_sizes) {
DCHECK_EQ(tag(), REP16);
static const int64_t kMaxSmall = 0xd744;
static_assert(kMaxSmall * kMaxSmall * kMaxSmall * kMaxSmall <= kint64max,
"bad overflow check");
bool large_size = false;
for (auto s : dim_sizes) {
if (s > kMaxSmall) {
large_size = true;
break;
}
}
if (!kIsPartial && !large_size) {
for (auto s : dim_sizes) {
if (TF_PREDICT_FALSE(s < 0)) {
return errors::InvalidArgument(
"Expected shape dimensions to be non-negative, got ", s);
}
}
}
if (!large_size) {
uint16* dst = as16()->dims_;
switch (dim_sizes.size()) {
case 1: {
set_ndims_byte(1);
const int64_t size = dim_sizes[0];
const bool neg = Set16(kIsPartial, dst, 0, size);
set_num_elements(neg ? -1 : size);
return absl::OkStatus();
}
case 2: {
set_ndims_byte(2);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
set_num_elements(neg ? -1 : (size0 * size1));
return absl::OkStatus();
}
case 3: {
set_ndims_byte(3);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
const int64_t size2 = dim_sizes[2];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
neg |= Set16(kIsPartial, dst, 2, size2);
set_num_elements(neg ? -1 : (size0 * size1 * size2));
return absl::OkStatus();
}
case 4: {
set_ndims_byte(4);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
const int64_t size2 = dim_sizes[2];
const int64_t size3 = dim_sizes[3];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
neg |= Set16(kIsPartial, dst, 2, size2);
neg |= Set16(kIsPartial, dst, 3, size3);
set_num_elements(neg ? -1 : (size0 * size1 * size2 * size3));
return absl::OkStatus();
}
}
}
set_ndims_byte(0);
set_num_elements(1);
Status status = absl::OkStatus();
for (int64_t s : dim_sizes) {
status.Update(AddDimWithStatus(internal::SubtleMustCopy(s)));
if (!status.ok()) {
return status;
}
}
return status;
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase() {
set_tag(REP16);
set_data_type(DT_INVALID);
if (kIsPartial) {
set_ndims_byte(kUnknownRank);
set_num_elements(-1);
} else {
set_ndims_byte(0);
set_num_elements(1);
}
}
void TensorShapeRep::DestructorOutOfLine() {
DCHECK(tag() == REP_OUT_OF_LINE);
delete as64()->dims_;
}
void TensorShapeRep::SlowCopyFrom(const TensorShapeRep& b) {
if (b.tag() != REP_OUT_OF_LINE) {
if (tag() == REP_OUT_OF_LINE) {
delete as64()->dims_;
}
memcpy(buf(), b.buf(), sizeof(u_.buf));
} else {
set_ndims_byte(b.ndims_byte());
set_data_type(b.data_type());
if (tag() == REP_OUT_OF_LINE) {
*(as64()->dims_) = *(b.as64()->dims_);
} else {
set_tag(REP_OUT_OF_LINE);
as64()->dims_ = new absl::InlinedVector<int64_t, 4UL>(*(b.as64()->dims_));
}
}
}
template <class Shape>
int64_t TensorShapeBase<Shape>::dim_size(int d) const {
if (unknown_rank()) return -1;
CHECK_GE(d, 0);
if (d > 0) CHECK_LT(d, dims());
if (tag() == REP16) {
uint16 dim = as16()->dims_[d];
if (kIsPartial && dim == kUnknownRep16) return -1;
return dim;
} else if (tag() == REP32) {
uint32 dim = as32()->dims_[d];
if (kIsPartial && dim == kUnknownRep32) return -1;
return dim;
} else {
return (*as64()->dims_)[d];
}
}
void TensorShapeRep::Clear() {
ClearAllButDataType();
set_data_type(DT_INVALID);
}
void TensorShapeRep::ClearAllButDataType() {
if (tag() == REP_OUT_OF_LINE) {
delete as64()->dims_;
}
set_tag(REP16);
set_ndims_byte(0);
set_num_elements(1);
}
template <class Shape>
Status TensorShapeBase<Shape>::RecomputeNumElements() {
if (unknown_rank()) {
set_num_elements(-1);
return absl::OkStatus();
}
int64_t n = 1;
for (auto dim : *this) {
if (kIsPartial && dim.size < 0) {
n = -1;
break;
}
n = MultiplyWithoutOverflow(n, dim.size);
if (TF_PREDICT_FALSE(n < 0)) {
return errors::InvalidArgument(
"Shape ", this->DebugString(),
" results in overflow when computing number of elements");
}
}
set_num_elements(n);
return absl::OkStatus();
}
template <class Shape>
void TensorShapeBase<Shape>::AddDim(int64_t size) {
if (!kIsPartial) CHECK_GE(size, 0);
if (unknown_rank()) return;
CHECK_LT(ndims_byte(), MaxDimensions()) << "Too many dimensions in tensor";
int64_t new_num_elements;
if (kIsPartial && (num_elements() < 0 || size < 0)) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(num_elements(), size);
CHECK_LE(0, new_num_elements);
}
UnsafeAddDim(size, new_num_elements);
}
template <class Shape>
Status TensorShapeBase<Shape>::AddDimWithStatus(int64_t size) {
if (!kIsPartial) {
if (TF_PREDICT_FALSE(size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ",
size);
}
}
if (unknown_rank()) {
return absl::OkStatus();
}
if (TF_PREDICT_FALSE(ndims_byte() >= MaxDimensions())) {
return errors::InvalidArgument("Too many dimensions in tensor");
}
int64_t new_num_elements;
if (kIsPartial && (num_elements() < 0 || size < 0)) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(num_elements(), size);
if (TF_PREDICT_FALSE(new_num_elements < 0)) {
return errors::InvalidArgument("Encountered overflow when multiplying ",
num_elements(), " with ", size,
", result: ", new_num_elements);
}
}
UnsafeAddDim(size, new_num_elements);
return absl::OkStatus();
}
template <class Shape>
void TensorShapeBase<Shape>::UnsafeAddDim(int64_t size,
int64_t new_num_elements) {
const int nd = ndims_byte();
if (tag() == REP16 && nd < 6 && size < kMaxRep16) {
as16()->dims_[nd] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && nd < 3 && size < kMaxRep32) {
as32()->dims_[nd] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
as64()->dims_->push_back(size);
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.push_back(size);
bool can_be_rep32 = (vals.size() <= 3);
if (can_be_rep32) {
for (size_t i = 0; i < vals.size(); i++) {
if (vals[i] >= kMaxRep32) {
can_be_rep32 = false;
break;
}
}
}
if (can_be_rep32) {
set_tag(REP32);
for (size_t d = 0; d < vals.size(); d++) {
as32()->dims_[d] = kIsPartial && vals[d] < 0
? kUnknownRep32
: static_cast<uint32>(vals[d]);
}
} else {
set_tag(REP_OUT_OF_LINE);
as64()->dims_ =
new absl::InlinedVector<int64_t, 4UL>(vals.begin(), vals.end());
}
}
set_ndims_byte(nd + 1);
set_num_elements(new_num_elements);
}
template <class Shape>
void TensorShapeBase<Shape>::AppendShape(const TensorShapeBase& shape) {
for (auto d : shape) AddDim(d.size);
}
template <class Shape>
Status TensorShapeBase<Shape>::AppendShapeWithStatus(
const TensorShapeBase& shape) {
Status s = absl::OkStatus();
for (auto d : shape) {
s.Update(AddDimWithStatus(d.size));
if (!s.ok()) {
return s;
}
}
return s;
}
template <class Shape>
void TensorShapeBase<Shape>::InsertDim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LE(d, dims());
if (!kIsPartial) CHECK_GE(size, 0);
CHECK_LT(dims(), MaxDimensions());
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.insert(vals.begin() + d, size);
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
}
template <class Shape>
Status TensorShapeBase<Shape>::InsertDimWithStatus(int d, int64_t size) {
if (!kIsPartial) {
if (TF_PREDICT_FALSE(size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ",
size);
}
}
if (TF_PREDICT_FALSE(d < 0)) {
return errors::Internal("The insertion index must be non-negative, got ",
d);
}
if (TF_PREDICT_FALSE(d > dims())) {
return errors::Internal("The insertion index must be at most ", dims(),
" got ", d);
}
if (TF_PREDICT_FALSE(dims() >= MaxDimensions())) {
return errors::Internal("Shape has ", dims(),
" dimensions which is the maximum allowed");
}
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.insert(vals.begin() + d, size);
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
return s;
}
template <class Shape>
absl::InlinedVector<int64_t, 4UL> TensorShapeBase<Shape>::dim_sizes() const {
absl::InlinedVector<int64_t, 4UL> result;
for (auto dim : *this) {
result.push_back(dim.size);
}
return result;
}
template <class Shape>
void TensorShapeBase<Shape>::set_dim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
if (!kIsPartial) {
CHECK_GE(size, 0);
}
if (tag() == REP16 && size < kMaxRep16) {
as16()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && size < kMaxRep32) {
as32()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
(*as64()->dims_)[d] = size;
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals[d] = size;
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
}
TF_CHECK_OK(RecomputeNumElements());
}
template <class Shape>
Status TensorShapeBase<Shape>::SetDimWithStatus(int d, int64_t size) {
if (TF_PREDICT_FALSE(d < 0)) {
return errors::InvalidArgument("Index must be non-negative, got ", d);
}
if (TF_PREDICT_FALSE(d >= dims())) {
return errors::InvalidArgument("Index must be less than ", dims(), ", got ",
d);
}
if (TF_PREDICT_FALSE(!kIsPartial && size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ", size);
}
if (tag() == REP16 && size < kMaxRep16) {
as16()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && size < kMaxRep32) {
as32()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
(*as64()->dims_)[d] = size;
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals[d] = size;
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
}
return RecomputeNumElements();
}
template <class Shape>
void TensorShapeBase<Shape>::RemoveDimRange(int begin, int end) {
if (unknown_rank()) return;
begin = begin < 0 ? dims() + begin + 1 : begin;
end = end < 0 ? dims() + end + 1 : end;
CHECK_GE(begin, 0);
CHECK_LE(begin, dims());
CHECK_GE(end, 0);
CHECK_LE(end, dims());
if (begin >= end) return;
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.erase(vals.begin() + begin, vals.begin() + end);
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
TF_CHECK_OK(RecomputeNumElements());
}
template <class Shape>
Status TensorShapeBase<Shape>::RemoveDimRangeWithStatus(int begin, int end) {
if (unknown_rank()) {
return absl::OkStatus();
}
begin = begin < 0 ? dims() + begin + 1 : begin;
end = end < 0 ? dims() + end + 1 : end;
if (TF_PREDICT_FALSE(begin < 0)) {
return errors::Internal("Start index must be non-negative, got ", begin);
}
if (TF_PREDICT_FALSE(begin > dims())) {
return errors::Internal("Start index must be less than ", dims(), ", got ",
begin);
}
if (TF_PREDICT_FALSE(end < 0)) {
return errors::Internal("End index must be non-negative, got ", end);
}
if (TF_PREDICT_FALSE(end > dims())) {
return errors::Internal("End index must be less than ", dims(), ", got ",
end);
}
if (begin >= end) {
return absl::OkStatus();
}
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.erase(vals.begin() + begin, vals.begin() + end);
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
return RecomputeNumElements();
}
bool TensorShape::IsSameSize(const TensorShape& b) const {
if (b.dims() != dims()) return false;
for (int d = 0; d < dims(); d++) {
if (dim_size(d) != b.dim_size(d)) return false;
}
return true;
}
template <class Shape>
void TensorShapeBase<Shape>::AsProto(TensorShapeProto* proto) const {
proto->Clear();
if (unknown_rank()) {
proto->set_unknown_rank(true);
} else {
for (int i = 0; i < dims(); i++) {
proto->add_dim()->set_size(dim_size(i));
}
}
}
template <class Shape>
TensorShapeProto TensorShapeBase<Shape>::AsProto() const {
TensorShapeProto out;
AsProto(&out);
return out;
}
template <class Shape>
TensorShapeIter<Shape> TensorShapeBase<Shape>::begin() const {
return TensorShapeIter<Shape>(static_cast<const Shape*>(this), 0);
}
template <class Shape>
TensorShapeIter<Shape> TensorShapeBase<Shape>::end() const {
const int max_dim = unknown_rank() ? -1 : dims();
return TensorShapeIter<Shape>(static_cast<const Shape*>(this), max_dim);
}
string TensorShapeRep::DebugString() const {
const auto& shape = *static_cast<const PartialTensorShape*>(this);
if (shape.unknown_rank()) return "<unknown>";
string s = "[";
for (int i = 0; i < shape.dims(); i++) {
if (i > 0) strings::StrAppend(&s, ",");
int64_t dim = shape.dim_size(i);
if (dim < 0) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, dim);
}
}
strings::StrAppend(&s, "]");
return s;
}
string TensorShapeRep::DebugString(const TensorShapeProto& proto) {
string s;
if (proto.unknown_rank()) {
strings::StrAppend(&s, "<unknown>");
if (proto.dim_size() == 0) return s;
}
strings::StrAppend(&s, "[");
bool first = true;
for (const auto& d : proto.dim()) {
if (!first) strings::StrAppend(&s, ",");
if (d.size() == -1) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, d.size());
}
first = false;
}
strings::StrAppend(&s, "]");
return s;
}
bool TensorShapeUtils::StartsWith(const TensorShape& shape,
const TensorShape& prefix) {
if (shape.dims() < prefix.dims()) return false;
for (int i = 0; i < prefix.dims(); ++i) {
if (shape.dim_size(i) != prefix.dim_size(i)) return false;
}
return true;
}
bool TensorShapeUtils::EndsWith(const TensorShape& shape,
const TensorShape& suffix) {
const int suffix_size = suffix.dims();
if (shape.dims() < suffix_size) return false;
for (int i = 0; i < suffix_size; ++i) {
if (shape.dim_size(shape.dims() - suffix_size + i) != suffix.dim_size(i)) {
return false;
}
}
return true;
}
template <typename T, class Shape>
Status MakeShapeHelper(const T* dims, int64_t n, Shape* out) {
out->Clear();
if (n > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Too many dimensions");
}
if (n < 0) {
return errors::InvalidArgument("Negative number of dimensions ", n);
}
for (int64_t i = 0; i < n; ++i) {
T dim = internal::SubtleMustCopy(dims[i]);
int64_t new_num_elements;
if (dim < 0) {
if (!out->kIsPartial) {
return errors::InvalidArgument("Dimension ", dim, " must be >= 0");
}
if (dim < -1) {
return errors::InvalidArgument("Dimension ", dim, " must be >= -1");
}
dim = -1;
new_num_elements = -1;
} else if (out->num_elements() < 0) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(out->num_elements(), dim);
if (TF_PREDICT_FALSE(new_num_elements < 0)) {
TensorShapeProto proto;
for (int64_t j = 0; j < n; ++j) {
proto.add_dim()->set_size(internal::SubtleMustCopy(dims[j]));
}
return errors::InvalidArgument(
"Shape ", TensorShape::DebugString(proto),
" would have more than 2**63 - 1 elements");
}
}
out->UnsafeAddDim(dim, new_num_elements);
}
return absl::OkStatus();
}
#define MAKE_SHAPE(T, Shape) \
Status TensorShapeUtils::MakeShape(const T* dims, int64_t n, Shape* out) { \
return MakeShapeHelper(dims, n, out); \
} \
Status TensorShapeUtils::MakeShape(gtl::ArraySlice<T> shape, Shape* out) { \
return MakeShapeHelper(shape.data(), shape.size(), out); \
}
MAKE_SHAPE(int32, TensorShape)
MAKE_SHAPE(int64_t, TensorShape)
MAKE_SHAPE(int32, PartialTensorShape)
MAKE_SHAPE(int64_t, PartialTensorShape)
#undef MAKE_SHAPE
string TensorShapeUtils::ShapeListString(
const absl::Span<const TensorShape>& shapes) {
string result = "[";
bool first = true;
for (const TensorShape& shape : shapes) {
strings::StrAppend(&result, (first ? "" : ", "), shape.DebugString());
first = false;
}
strings::StrAppend(&result, "]");
return result;
}
PartialTensorShape PartialTensorShape::Concatenate(int64_t size) const {
PartialTensorShape out = *this;
out.AddDim(size);
return out;
}
Status PartialTensorShape::ConcatenateWithStatus(
int64_t size, PartialTensorShape* out) const {
*out = *this;
return out->AddDimWithStatus(size);
}
PartialTensorShape PartialTensorShape::Concatenate(
const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) {
return PartialTensorShape();
}
PartialTensorShape out = *this;
for (auto dim : shape) out.AddDim(dim.size);
return out;
}
Status PartialTensorShape::ConcatenateWithStatus(
const PartialTensorShape& shape, PartialTensorShape* out) const {
if (unknown_rank() || shape.unknown_rank()) {
*out = PartialTensorShape();
return absl::OkStatus();
}
*out = *this;
for (auto dim : shape) {
Status s = out->AddDimWithStatus(dim.size);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
Status PartialTensorShape::MergeWith(const PartialTensorShape& shape,
PartialTensorShape* result) const {
if (unknown_rank()) {
*result = shape;
return absl::OkStatus();
}
if (shape.unknown_rank()) {
*result = *this;
return absl::OkStatus();
}
const int dims_ = dims();
if (dims_ != shape.dims()) {
return errors::InvalidArgument(
"PartialTensorShape: Incompatible ranks during merge: ", dims_, " vs. ",
shape.dims());
}
if (result == this) {
return errors::Internal(
"PartialTensorShape::MergeWith: Cannot output result to itself");
}
result->Clear();
Status s = absl::OkStatus();
for (int i = 0; i < dims_; ++i) {
const int64_t dim0 = dim_size(i);
const int64_t dim1 = shape.dim_size(i);
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) {
return errors::InvalidArgument(
"PartialTensorShape: Incompatible shapes during merge: ",
DebugString(), " vs. ", shape.DebugString());
}
s.Update(result->AddDimWithStatus(dim0 >= 0 ? dim0 : dim1));
if (!s.ok()) {
return s;
}
}
return absl::OkStatus();
}
bool PartialTensorShape::AsTensorShape(TensorShape* shape) const {
if (IsFullyDefined()) {
const TensorShapeRep* rep = this;
*shape = *static_cast<const TensorShape*>(rep);
return true;
}
return false;
}
bool PartialTensorShape::IsIdenticalTo(const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) {
return unknown_rank() == shape.unknown_rank();
}
if (dims() != shape.dims()) return false;
for (int i = 0; i < dims(); i++) {
if (dim_size(i) != shape.dim_size(i)) return false;
}
return true;
}
bool PartialTensorShape::IsCompatibleWith(
const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) return true;
if (dims() != shape.dims()) return false;
for (int i = 0; i < dims(); i++) {
const int64_t dim0 = dim_size(i);
const int64_t dim1 = shape.dim_size(i);
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) return false;
}
return true;
}
string PartialTensorShapeUtils::PartialShapeListString(
const absl::Span<const PartialTensorShape>& shapes) {
string result = "[";
bool first = true;
for (const PartialTensorShape& shape : shapes) {
strings::StrAppend(&result, (first ? "" : ", "), shape.DebugString());
first = false;
}
strings::StrAppend(&result, "]");
return result;
}
bool PartialTensorShapeUtils::AreCompatible(
const absl::Span<const PartialTensorShape>& shapes0,
const absl::Span<const PartialTensorShape>& shapes1) {
if (shapes0.size() == shapes1.size()) {
for (size_t i = 0; i < shapes0.size(); ++i) {
if (!shapes0[i].IsCompatibleWith(shapes1[i])) {
return false;
}
}
return true;
} else {
return false;
}
}
bool PartialTensorShapeUtils::AreIdentical(
const absl::Span<const PartialTensorShape>& shapes0,
const absl::Span<const PartialTensorShape>& shapes1) {
if (shapes0.size() == shapes1.size()) {
for (size_t i = 0; i < shapes0.size(); ++i) {
if (!shapes0[i].IsIdenticalTo(shapes1[i])) {
return false;
}
}
return true;
} else {
return false;
}
}
Status TensorShapeUtils::NumElements(absl::Span<const int64_t> shape,
int64_t* num_elements) {
int64_t n = 1;
for (auto dim : shape) {
n = MultiplyWithoutOverflow(n, dim);
if (n < 0) {
return errors::InvalidArgument("Can't compute total size of shape [",
absl::StrJoin(shape, ","),
"]; product would overflow int64");
}
}
*num_elements = n;
return absl::OkStatus();
}
template class TensorShapeBase<TensorShape>;
template class TensorShapeBase<PartialTensorShape>;
} | #include "tensorflow/core/framework/tensor_shape.h"
#include <cstdint>
#include <limits>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
class TensorShapeTestHelper {
public:
static void set_data_type(TensorShape* s, DataType t) { s->set_data_type(t); }
static uint8 data_type(const TensorShape* s) { return s->data_type(); }
};
namespace {
TEST(TensorShapeTest, Default) {
TensorShape s;
EXPECT_EQ(s.dims(), 0);
EXPECT_EQ(s.num_elements(), 1);
}
TEST(TensorShapeTest, set_dim) {
TensorShape s({10, 5});
s.set_dim(0, 20);
ASSERT_EQ(2, s.dims());
EXPECT_EQ(20, s.dim_size(0));
EXPECT_EQ(100, s.num_elements());
s.set_dim(1, 2);
ASSERT_EQ(2, s.dims());
EXPECT_EQ(2, s.dim_size(1));
EXPECT_EQ(40, s.num_elements());
}
TEST(TensorShapeTest, RemoveDim) {
TensorShape s({10, 5});
s.RemoveDim(0);
EXPECT_EQ(5, s.num_elements());
ASSERT_EQ(1, s.dims());
}
TEST(TensorShapeTest, RemoveDimWithStatus) {
TensorShape s({10, 5});
TF_EXPECT_OK(s.RemoveDimWithStatus(0));
EXPECT_EQ(s.num_elements(), 5);
EXPECT_EQ(s.dims(), 1);
EXPECT_THAT(
s.RemoveDimWithStatus(-1),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Expected dimension index to be non-negative")));
}
TEST(TensorShapeTest, RemoveAndAddDim) {
TensorShape s({10, 5, 20});
s.RemoveDim(1);
s.AddDim(100);
EXPECT_EQ(20000, s.num_elements());
ASSERT_EQ(3, s.dims());
}
TEST(TensorShapeTest, RemoveLastDims) {
TensorShape s({2, 3, 5, 7});
s.RemoveLastDims(1);
ASSERT_EQ(3, s.dims());
EXPECT_EQ(30, s.num_elements());
s.RemoveLastDims(2);
ASSERT_EQ(1, s.dims());
EXPECT_EQ(2, s.dim_size(0));
}
TEST(TensorShapeTest, RemoveLastDimsWithStatus) {
TensorShape s({2, 3, 5, 7});
TF_EXPECT_OK(s.RemoveLastDimsWithStatus(1));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.num_elements(), 30);
EXPECT_THAT(
s.RemoveLastDimsWithStatus(4),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Expected dimension index to be at most 3")));
}
TEST(TensorShapeTest, RemoveDimRange) {
TensorShape s0({2, 3, 5, 7});
for (int i = -4; i <= 4; ++i) {
s0.RemoveDimRange(i, i);
ASSERT_EQ(4, s0.dims());
ASSERT_EQ(210, s0.num_elements());
}
s0.RemoveDimRange(3, 1);
ASSERT_EQ(4, s0.dims());
ASSERT_EQ(210, s0.num_elements());
s0.RemoveDimRange(0, 3);
ASSERT_EQ(1, s0.dims());
EXPECT_EQ(7, s0.dim_size(0));
TensorShape s1({2, 3, 5, 7});
s1.RemoveDimRange(2, 3);
ASSERT_EQ(3, s1.dims());
ASSERT_EQ(42, s1.num_elements());
TensorShape s2({2, 3, 5, 7});
s2.RemoveDimRange(-2, -3);
ASSERT_EQ(4, s2.dims());
ASSERT_EQ(210, s2.num_elements());
s2.RemoveDimRange(0, -2);
ASSERT_EQ(1, s2.dims());
ASSERT_EQ(7, s2.dim_size(0));
TensorShape s3({2, 3, 5, 7});
s3.RemoveDimRange(-3, -2);
ASSERT_EQ(3, s3.dims());
ASSERT_EQ(42, s3.num_elements());
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithEmptyInterval) {
TensorShape s0({2, 3, 5, 7});
for (int i = -4; i <= 4; ++i) {
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(i, i));
EXPECT_EQ(s0.dims(), 4);
EXPECT_EQ(s0.num_elements(), 210);
}
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithPositiveBeginEnd) {
TensorShape s0({2, 3, 5, 7});
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(3, 1));
EXPECT_EQ(s0.dims(), 4);
EXPECT_EQ(s0.num_elements(), 210);
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(0, 3));
EXPECT_EQ(s0.dims(), 1);
EXPECT_EQ(s0.dim_size(0), 7);
TensorShape s1({2, 3, 5, 7});
TF_EXPECT_OK(s1.RemoveDimRangeWithStatus(2, 3));
EXPECT_EQ(s1.dims(), 3);
EXPECT_EQ(s1.num_elements(), 42);
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithNegativeBeginEnd) {
TensorShape s2({2, 3, 5, 7});
TF_EXPECT_OK(s2.RemoveDimRangeWithStatus(-2, -3));
EXPECT_EQ(s2.dims(), 4);
EXPECT_EQ(s2.num_elements(), 210);
TF_EXPECT_OK(s2.RemoveDimRangeWithStatus(0, -2));
EXPECT_EQ(s2.dims(), 1);
EXPECT_EQ(s2.dim_size(0), 7);
TensorShape s3({2, 3, 5, 7});
TF_EXPECT_OK(s3.RemoveDimRangeWithStatus(-3, -2));
EXPECT_EQ(s3.dims(), 3);
EXPECT_EQ(s3.num_elements(), 42);
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithInvalidBeginEnd) {
TensorShape s3({2, 5, 7});
EXPECT_THAT(s3.RemoveDimRangeWithStatus(-5, 0),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Start index must be non-negative")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(5, 0),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("Start index must be less than 3")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(0, -5),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("End index must be non-negative")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(0, 5),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("End index must be less than 3")));
}
TEST(TensorShapeTest, InsertDimWithStatus) {
TensorShape s({10, 20});
TF_EXPECT_OK(s.InsertDimWithStatus(1, 5));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.dim_size(1), 5);
TF_EXPECT_OK(s.InsertDimWithStatus(1, 30));
EXPECT_EQ(s.dims(), 4);
EXPECT_EQ(s.dim_size(1), 30);
EXPECT_EQ(s.dim_size(2), 5);
}
TEST(TensorShapeTest, InsertDimWithStatusWithInvalidData) {
TensorShape s({10, 5, 20});
EXPECT_THAT(s.InsertDimWithStatus(1, -5),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected a non-negative size")));
EXPECT_THAT(
s.InsertDimWithStatus(-1, 5),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"The insertion index must be non-negative")));
EXPECT_THAT(s.InsertDimWithStatus(4, 5),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"The insertion index must be at most 3")));
}
TEST(TensorShapeTest, InsertDimWithStatusWithTooManyDims) {
TensorShape s({10, 20});
int max_dims_to_add = TensorShape::MaxDimensions() - s.dims();
for (int i = 0; i < max_dims_to_add; ++i) {
TF_EXPECT_OK(s.InsertDimWithStatus(1, 1));
}
EXPECT_THAT(s.InsertDimWithStatus(1, 1),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"Shape has.*dimensions which is the maximum allowed")));
}
TEST(TensorShapeTest, TensorShapeAssignment) {
TensorShape s({std::numeric_limits<int64_t>::max()});
TensorShape s2({1, std::numeric_limits<int64_t>::max()});
TensorShape s3({10});
s = s2;
EXPECT_EQ(s.dims(), 2);
EXPECT_EQ(s.dim_size(0), 1);
EXPECT_EQ(s.dim_size(1), std::numeric_limits<int64_t>::max());
s = s3;
EXPECT_EQ(s.dims(), 1);
EXPECT_EQ(s.dim_size(0), 10);
}
TEST(TensorShapeTest, InvalidShapeProto) {
TensorShapeProto proto;
EXPECT_TRUE(TensorShape::IsValid(proto));
proto.add_dim()->set_size(357);
proto.add_dim()->set_size(982);
EXPECT_TRUE(TensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(-357);
proto.add_dim()->set_size(-982);
EXPECT_FALSE(TensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(1LL << 35);
proto.add_dim()->set_size((1LL << 35) + 1);
EXPECT_FALSE(TensorShape::IsValid(proto));
}
TEST(TensorShapeTest, TooManyDimsProto) {
TensorShapeProto proto;
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
for (int i = 0; i < TensorShape::MaxDimensions(); i++) {
proto.add_dim()->set_size(1);
}
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
proto.add_dim()->set_size(1);
EXPECT_FALSE(TensorShape::IsValid(proto));
EXPECT_FALSE(TensorShape::IsValidShape(proto).ok());
}
TEST(TensorShapeTest, SetDimForEmptyTensor) {
TensorShape s({10, 5, 20});
EXPECT_EQ(1000, s.num_elements());
s.set_dim(1, 0);
EXPECT_EQ(0, s.num_elements());
s.set_dim(1, 7);
EXPECT_EQ(1400, s.num_elements());
}
TEST(TensorShapeTest, SetDimWithLargeSizeFormat) {
TensorShape s({std::numeric_limits<uint32_t>::max() - 2});
s.set_dim(0, 5);
EXPECT_EQ(s.dim_size(0), 5);
TensorShape s2({std::numeric_limits<int64_t>::max()});
s2.set_dim(0, 10);
EXPECT_EQ(s2.dim_size(0), 10);
s.set_dim(0, std::numeric_limits<int64_t>::max());
EXPECT_EQ(s.dim_size(0), std::numeric_limits<int64_t>::max());
}
TEST(TensorShapeTest, SetDimWithStatus) {
TensorShape s({10, 5, 20});
TF_EXPECT_OK(s.SetDimWithStatus(1, 2));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.dim_size(1), 2);
EXPECT_THAT(s.SetDimWithStatus(-1, 2),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Index must be non-negative")));
EXPECT_THAT(
s.SetDimWithStatus(4, 2),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Index must be less than 3")));
EXPECT_THAT(s.SetDimWithStatus(0, -2),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected a non-negative size")));
}
TEST(TensorShapeTest, SetDimWithStatusWithLargeSizeFormat) {
TensorShape s({std::numeric_limits<uint32_t>::max() - 2});
TF_EXPECT_OK(s.SetDimWithStatus(0, 2));
s.set_dim(0, 5);
EXPECT_EQ(s.dim_size(0), 5);
TensorShape s2({std::numeric_limits<int64_t>::max()});
TF_EXPECT_OK(s2.SetDimWithStatus(0, 10));
EXPECT_EQ(s2.dim_size(0), 10);
TF_EXPECT_OK(s.SetDimWithStatus(0, std::numeric_limits<int64_t>::max()));
EXPECT_EQ(s.dim_size(0), std::numeric_limits<int64_t>::max());
}
TEST(TensorShapeTest, AppendShape64BitIndices) {
TensorShape s({10, 2147483648});
EXPECT_EQ(10, s.dim_size(0));
EXPECT_EQ(2147483648, s.dim_size(1));
TensorShape s2;
s2.AppendShape(s);
EXPECT_EQ(10, s2.dim_size(0));
EXPECT_EQ(2147483648, s2.dim_size(1));
}
TEST(TensorShapeTest, DataType) {
TensorShape s({});
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INVALID);
TensorShapeTestHelper::set_data_type(&s, DT_INT32);
s.AddDim(1);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INT32);
s.AddDim(100000);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INT32);
TensorShapeTestHelper::set_data_type(&s, DT_UINT16_REF);
s.AddDim(2);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
s.AddDim(4);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
s.AddDim(3);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
TensorShape s2 = s;
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_UINT16_REF);
s2.RemoveDim(2);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_UINT16_REF);
TensorShapeTestHelper::set_data_type(&s2, DT_FLOAT);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_FLOAT);
s2.Clear();
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_INVALID);
}
TEST(TensorShapeTest, ostream) {
TensorShape s({10, 5, 4});
std::stringstream ss;
ss << s;
EXPECT_EQ(ss.str(), "[10,5,4]");
}
TEST(TensorShapeTest, AddDimWithStatus) {
TensorShape s({10, 5, 20});
Status status = s.AddDimWithStatus(400);
EXPECT_TRUE(status.ok());
EXPECT_EQ(400000, s.num_elements());
ASSERT_EQ(4, s.dims());
status = s.AddDimWithStatus(-1);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
TensorShape s2({std::numeric_limits<int64_t>::max()});
EXPECT_THAT(s2.AddDimWithStatus(2),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(TensorShapeTest, AppendShapeWithStatus) {
TensorShape s({10, 5, 20});
TensorShape s2({400});
TF_EXPECT_OK(s.AppendShapeWithStatus(s2));
EXPECT_EQ(s.num_elements(), 400000);
EXPECT_EQ(s.dims(), 4);
TensorShape s3({std::numeric_limits<int64_t>::max()});
EXPECT_THAT(s2.AppendShapeWithStatus(s3),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(TensorShapeTest, Factory) {
TensorShape s;
Status status = TensorShape::BuildTensorShapeBase({10, 5, 20}, &s);
EXPECT_TRUE(status.ok());
EXPECT_EQ(1000, s.num_elements());
ASSERT_EQ(3, s.dims());
status = TensorShape::BuildTensorShapeBase({-10, 5, 20}, &s);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
TEST(TensorShapeTest, AsEigenDSizess) {
TensorShape s({10, 5, 20});
Eigen::DSizes<Eigen::DenseIndex, 3> dsizes_expected{10, 5, 20};
EXPECT_EQ(s.AsEigenDSizes<3>(), dsizes_expected);
EXPECT_DEATH(s.AsEigenDSizes<2>(),
"tensor of 2 dimensions from a tensor of 3 dimensions");
Eigen::DSizes<Eigen::DenseIndex, 3> dsizes_out;
TF_EXPECT_OK(s.AsEigenDSizesWithStatus<3>(&dsizes_out));
EXPECT_EQ(dsizes_out, dsizes_expected);
Eigen::DSizes<Eigen::DenseIndex, 2> dsizes_out2;
EXPECT_THAT(s.AsEigenDSizesWithStatus<2>(&dsizes_out2),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"tensor of 2 dimensions from a tensor of 3 dimensions")));
}
TEST(TensorShapeTest, AsEigenDSizesWithPadding) {
TensorShape s({10, 5, 20});
Eigen::DSizes<Eigen::DenseIndex, 5> dsizes_expected{10, 5, 20, 1, 1};
EXPECT_EQ(s.AsEigenDSizesWithPadding<5>(), dsizes_expected);
EXPECT_DEATH(s.AsEigenDSizesWithPadding<2>(),
"at most 2 dimensions from a tensor of 3 dimensions");
Eigen::DSizes<Eigen::DenseIndex, 5> dsizes_out;
TF_EXPECT_OK(s.AsEigenDSizesWithPaddingWithStatus<5>(&dsizes_out));
EXPECT_EQ(dsizes_out, dsizes_expected);
Eigen::DSizes<Eigen::DenseIndex, 2> dsizes_out2;
EXPECT_THAT(s.AsEigenDSizesWithPaddingWithStatus<2>(&dsizes_out2),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"at most 2 dimensions from a tensor of 3 dimensions")));
}
TEST(TensorShapeTest, AsProto) {
TensorShape s({10, 5});
TensorShapeProto sp;
s.AsProto(&sp);
EXPECT_EQ(sp.dim_size(), 2);
EXPECT_EQ(sp.dim(0).size(), 10);
EXPECT_EQ(sp.dim(1).size(), 5);
TensorShapeProto sp2 = s.AsProto();
EXPECT_EQ(sp.DebugString(), sp2.DebugString());
}
class TensorShapeIterOld;
class TensorShapeOld {
public:
explicit TensorShapeOld(absl::Span<const int64_t> dim_sizes);
TensorShapeOld(std::initializer_list<int64_t> dim_sizes)
: TensorShapeOld(absl::Span<const int64_t>(dim_sizes)) {}
explicit TensorShapeOld(const TensorShapeProto& proto);
TensorShapeOld();
static bool IsValid(const TensorShapeProto& proto);
static Status IsValidShape(const TensorShapeProto& proto);
void Clear();
void AddDim(int64_t size);
void AppendShape(const TensorShapeOld& shape);
void InsertDim(int d, int64_t size);
void set_dim(int d, int64_t size);
void RemoveDim(int d);
int dims() const { return dim_sizes_.size(); }
int64_t dim_size(int d) const {
DCHECK_GE(d, 0);
DCHECK_LT(d, dims());
return dim_sizes_[d];
}
absl::Span<const int64_t> dim_sizes() const { return dim_sizes_; }
int64_t num_elements() const { return num_elements_; }
bool IsSameSize(const TensorShapeOld& b) const;
bool operator==(const TensorShapeOld& b) const { return IsSameSize(b); }
void AsProto(TensorShapeProto* proto) const;
template <int NDIMS>
Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizes() const;
template <int NDIMS>
Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizesWithPadding() const;
TensorShapeIterOld begin() const;
TensorShapeIterOld end() const;
string DebugString() const;
static string DebugString(const TensorShapeProto& proto);
private:
void recompute_dims();
absl::InlinedVector<int64_t, 4UL> dim_sizes_;
int64_t num_elements_;
};
struct TensorShapeDimOld {
explicit TensorShapeDimOld(int64_t s) : size(s) {}
int64_t size;
};
class TensorShapeIterOld {
public:
TensorShapeIterOld(const TensorShapeOld* shape, int d)
: shape_(shape), d_(d) {}
bool operator==(const TensorShapeIterOld& rhs) {
DCHECK(shape_ == rhs.shape_);
return d_ == rhs.d_;
}
bool operator!=(const TensorShapeIterOld& rhs) {
DCHECK(shape_ == rhs.shape_);
return d_ != rhs.d_;
}
void operator++() { ++d_; }
TensorShapeDimOld operator*() {
return TensorShapeDimOld(shape_->dim_size(d_));
}
private:
const TensorShapeOld* shape_;
int d_;
};
static const int64_t kMaxElements = (1LL << 40);
bool TensorShapeOld::IsValid(const TensorShapeProto& proto) {
int64_t num_elements = 1;
for (const auto& d : proto.dim()) {
if (d.size() < 0) return false;
num_elements *= d.size();
if (num_elements > kMaxElements) return false;
}
return true;
}
Status TensorShapeOld::IsValidShape(const TensorShapeProto& proto) {
int64_t num_elements = 1;
for (const auto& d : proto.dim()) {
if (d.size() < 0) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" has negative dimensions; ",
"perhaps an un-fed placeholder?");
}
num_elements *= d.size();
if (num_elements > kMaxElements) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" is too large (more than ", kMaxElements,
" entries)");
}
}
return absl::OkStatus();
}
TensorShapeOld::TensorShapeOld(const TensorShapeProto& proto) {
dim_sizes_.reserve(proto.dim_size());
num_elements_ = 1;
for (const auto& d : proto.dim()) {
AddDim(d.size());
}
}
TensorShapeOld::TensorShapeOld(absl::Span<const int64_t> dim_sizes) {
dim_sizes_.reserve(dim_sizes.size());
num_elements_ = 1;
for (auto s : dim_sizes) {
AddDim(s);
}
}
TensorShapeOld::TensorShapeOld() : num_elements_(1) {}
void TensorShapeOld::Clear() {
dim_sizes_.clear();
num_elements_ = 1;
}
void TensorShapeOld::AddDim(int64_t size) {
CHECK_GE(size, 0);
dim_sizes_.push_back(size);
num_elements_ *= size;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
void TensorShapeOld::AppendShape(const TensorShapeOld& shape) {
for (auto d : shape) AddDim(d.size);
}
void TensorShapeOld::InsertDim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LE(d, dims());
CHECK_GE(size, 0);
dim_sizes_.insert(dim_sizes_.begin() + d, size);
num_elements_ *= size;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
void TensorShapeOld::set_dim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
CHECK_GE(size, 0);
dim_sizes_[d] = size;
recompute_dims();
}
void TensorShapeOld::RemoveDim(int d) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
dim_sizes_.erase(dim_sizes_.begin() + d);
recompute_dims();
}
void TensorShapeOld::recompute_dims() {
num_elements_ = 1;
for (auto s : dim_sizes_) {
num_elements_ *= s;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
}
bool TensorShapeOld::IsSameSize(const TensorShapeOld& b) const {
if (b.dims() != dims()) return false;
for (int d = 0; d < dims(); d++) {
if (dim_size(d) != b.dim_size(d)) return false;
}
return true;
}
void TensorShapeOld::AsProto(TensorShapeProto* proto) const {
proto->Clear();
for (size_t d = 0; d < dim_sizes_.size(); ++d) {
auto* dim = proto->add_dim();
dim->set_size(dim_sizes_[d]);
}
}
TensorShapeIterOld TensorShapeOld::begin() const {
return TensorShapeIterOld(this, 0);
}
TensorShapeIterOld TensorShapeOld::end() const {
return TensorShapeIterOld(this, dims());
}
string TensorShapeOld::DebugString() const {
return strings::StrCat(
"[", absl::StrJoin(absl::Span<const int64_t>(dim_sizes_), ","), "]");
}
string TensorShapeOld::DebugString(const TensorShapeProto& proto) {
string s = "[";
bool first = true;
for (const auto& d : proto.dim()) {
strings::StrAppend(&s, first ? "" : ",", d.size());
first = false;
}
strings::StrAppend(&s, "]");
return s;
}
static int64_t SkewedSize(random::SimplePhilox* gen, int64_t current_elements) {
int64_t result = 0;
do {
if (current_elements < 100) {
result = gen->Uniform(100000);
} else {
result = gen->Uniform(2);
}
} while ((result * current_elements >= 1LL << 34) ||
(result * current_elements < 0));
return result;
}
TEST(TensorShapeTest, Randomized) {
random::PhiloxRandom philox(7, 7);
random::SimplePhilox gen(&philox);
TensorShape s;
TensorShapeOld sold;
TensorShapeProto sp;
TensorShapeProto spold;
LOG(INFO) << "Sizes: " << sizeof(TensorShape) << " vs "
<< sizeof(TensorShapeOld);
for (int i = 0; i < 100000; i++) {
s.AsProto(&sp);
sold.AsProto(&spold);
EXPECT_EQ(sp.DebugString(), spold.DebugString());
if ((i % 1000) == 0) {
fprintf(stderr, "ITERATION %d: %s\n", i, sp.DebugString().c_str());
}
EXPECT_EQ(s.num_elements(), sold.num_elements());
TensorShape copy = s;
TensorShape moved(std::move(copy));
EXPECT_EQ(s, moved);
copy = s;
moved = std::move(copy);
EXPECT_EQ(s, moved);
int64_t ne = sold.num_elements();
int r = gen.Uniform(100);
if (r < 10) {
int64_t sz = SkewedSize(&gen, sold.num_elements());
s.AddDim(sz);
sold.AddDim(sz);
} else if (r < 15) {
s.Clear();
sold.Clear();
} else if (r < 35 && s.dims() > 0 && ne > 0 && ne < 100000000) {
int dim = gen.Uniform(s.dims());
s.RemoveDim(dim);
sold.RemoveDim(dim);
} else if (r < 50 && ne > 0 && ne < 100000000) {
int dim = gen.Uniform(s.dims() + 1);
int64_t sz = SkewedSize(&gen, sold.num_elements());
s.InsertDim(dim, sz);
sold.InsertDim(dim, sz);
} else {
std::vector<int64_t> sizes;
const int N = (gen.Uniform(4) == 0) ? gen.Uniform(10) : gen.Uniform(3);
int64_t num_elements = 1;
for (int i = 0; i < N; i++) {
int64_t sz = SkewedSize(&gen, num_elements);
sizes.push_back(sz);
num_elements *= std::max<int64_t>(1, sz);
}
s = TensorShape(sizes);
sold = TensorShapeOld(sizes);
}
}
}
TEST(TensorShapeTest, Large) {
int64_t one = 1;
int64_t max = std::numeric_limits<int64_t>::max();
EXPECT_EQ(TensorShape({max}).num_elements(), max);
EXPECT_EQ(TensorShape({1, max}).num_elements(), max);
EXPECT_EQ(TensorShape({max, 1}).num_elements(), max);
EXPECT_EQ(TensorShape({one << 62}).num_elements(), one << 62);
EXPECT_EQ(TensorShape({one << 20, one << 41}).num_elements(), one << 61);
EXPECT_EQ(TensorShape({1000, 1000, 1000, 1000, 1000, 1000}).num_elements(),
1e18);
}
TEST(TensorShapeTest, Overflow) {
int64_t one = 1;
std::vector<std::vector<int64_t>> overflows = {
{1 << 30, 1 << 30, 1 << 30},
{1 << 5, (one << 60) + 1},
};
for (const auto& overflow : overflows) {
TensorShapeProto proto;
for (auto dim : overflow) {
proto.add_dim()->set_size(dim);
}
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
TensorShape::IsValidShape(proto).code());
TensorShape shape;
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
TensorShapeUtils::MakeShape(overflow, &shape).code());
}
}
TEST(TensorShapeTest, UnknownRank) {
TensorShapeProto proto;
proto.set_unknown_rank(true);
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
EXPECT_EQ(TensorShape(), TensorShape(proto));
proto.add_dim()->set_size(7);
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
EXPECT_EQ(TensorShape({7}), TensorShape(proto));
}
TEST(TensorShapeUtilsTest, StartsWith) {
EXPECT_TRUE(TensorShapeUtils::StartsWith(TensorShape({}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2, 3})));
EXPECT_TRUE(TensorShapeUtils::StartsWith(TensorShape({2, 3, 4}),
TensorShape({2, 3})));
EXPECT_FALSE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({3})));
EXPECT_FALSE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2, 4})));
EXPECT_FALSE(TensorShapeUtils::StartsWith(TensorShape({2, 3}),
TensorShape({2, 3, 4})));
EXPECT_FALSE(TensorShapeUtils::StartsWith(TensorShape({2, 3, 4}),
TensorShape({3, 4})));
}
TEST(TensorShapeUtilsTest, EndsWith) {
EXPECT_TRUE(TensorShapeUtils::EndsWith(TensorShape({}), TensorShape({})));
EXPECT_TRUE(TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({3})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 3})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3, 4}), TensorShape({3, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 3, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3, 4}), TensorShape({2, 3})));
}
TEST(TensorShapeUtilsTest, ShapeListString) {
EXPECT_EQ(
TensorShapeUtils::ShapeListString({TensorShape({}), TensorShape({})}),
"[[], []]");
EXPECT_EQ(TensorShapeUtils::ShapeListString(
{TensorShape({2, 3}), TensorShape({4, 5, 6})}),
"[[2,3], [4,5,6]]");
}
TEST(TensorShapeUtilsTest, NumElements) {
int64_t num_elements = 0;
TF_EXPECT_OK(TensorShapeUtils::NumElements({}, &num_elements));
EXPECT_EQ(num_elements, 1);
TF_EXPECT_OK(TensorShapeUtils::NumElements({1}, &num_elements));
EXPECT_EQ(num_elements, 1);
TF_EXPECT_OK(TensorShapeUtils::NumElements({2, 3, 4}, &num_elements));
EXPECT_EQ(num_elements, 24);
int64_t int64_max_val = std::numeric_limits<int64_t>::max();
EXPECT_THAT(
TensorShapeUtils::NumElements({int64_max_val, int64_max_val},
&num_elements),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Can't compute total size of shape.*product would overflow")));
}
static std::vector<int64_t> MakeSizes(int arg) {
std::vector<int64_t> sizes;
switch (arg) {
case 0:
sizes = {100};
break;
case 1:
sizes = {100, 1000};
break;
case 2:
sizes = {100, 1000000};
break;
case 3:
sizes = {100, 256, 192, 3};
break;
case 4:
sizes = {1, 2, 1ll << 34, 1, 1, 1};
break;
}
return sizes;
}
void BM_TensorShape_Init(::testing::benchmark::State& state) {
const int arg = state.range(0);
auto sizes = MakeSizes(arg);
for (auto s : state) {
TensorShape shape(sizes);
tensorflow::testing::DoNotOptimize(shape.num_elements());
}
}
BENCHMARK(BM_TensorShape_Init)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
void BM_TensorShape_Assign(::testing::benchmark::State& state) {
const int arg = state.range(0);
TensorShape shape(MakeSizes(arg));
for (auto s : state) {
const TensorShape s2 = shape;
tensorflow::testing::DoNotOptimize(s2);
}
}
BENCHMARK(BM_TensorShape_Assign)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
void BM_TensorShape_SetDim(::testing::benchmark::State& state) {
const int arg = state.range(0);
TensorShape shape(MakeSizes(arg));
tensorflow::testing::DoNotOptimize(shape);
for (auto s : state) {
shape.set_dim(0, 8);
}
}
BENCHMARK(BM_TensorShape_SetDim)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81a66a6c-8535-493f-811b-5d2d96798c77 | cpp | tensorflow/tensorflow | bfloat16 | tensorflow/core/framework/bfloat16.cc | tensorflow/core/framework/bfloat16_test.cc | #include "tensorflow/core/framework/bfloat16.h"
#include "Eigen/Core"
namespace tensorflow {
void RoundFloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
Eigen::Map<const Eigen::ArrayXf> src_eigen(src, size);
Eigen::Map<Eigen::Array<bfloat16, Eigen::Dynamic, 1>> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<bfloat16>();
}
void FloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
for (; size != 0; src++, dst++, size--) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
memcpy(dst, src, sizeof(bfloat16));
#else
memcpy(
dst,
reinterpret_cast<const char*>(src) + sizeof(float) - sizeof(bfloat16),
sizeof(bfloat16));
#endif
}
}
void BFloat16ToFloat(const bfloat16* src, float* dst, int64_t size) {
Eigen::Map<const Eigen::Array<bfloat16, Eigen::Dynamic, 1>> src_eigen(src,
size);
Eigen::Map<Eigen::ArrayXf> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<float>();
}
} | #include "tensorflow/core/framework/bfloat16.h"
#include "absl/base/casts.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(Bfloat16Test, Conversion) {
float a[100];
for (int i = 0; i < 100; ++i) {
a[i] = i + 1.25;
}
bfloat16 b[100];
float c[100];
FloatToBFloat16(a, b, 100);
BFloat16ToFloat(b, c, 100);
for (int i = 0; i < 100; ++i) {
EXPECT_LE(fabs(c[i] - a[i]) / a[i], 1.0 / 128);
}
}
void BM_FloatToBFloat16(::testing::benchmark::State& state) {
static const int N = 32 << 20;
float* inp = new float[N];
bfloat16* out = new bfloat16[N];
for (auto s : state) {
FloatToBFloat16(inp, out, N);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_FloatToBFloat16);
void BM_RoundFloatToBFloat16(::testing::benchmark::State& state) {
static const int N = 32 << 20;
float* inp = new float[N];
bfloat16* out = new bfloat16[N];
for (auto s : state) {
RoundFloatToBFloat16(inp, out, N);
tensorflow::testing::DoNotOptimize(inp);
tensorflow::testing::DoNotOptimize(out);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_RoundFloatToBFloat16);
void BM_BFloat16ToFloat(::testing::benchmark::State& state) {
static const int N = 32 << 20;
bfloat16* inp = new bfloat16[N];
float* out = new float[N];
for (auto s : state) {
BFloat16ToFloat(inp, out, N);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_BFloat16ToFloat);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/bfloat16.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/bfloat16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5e0a49a-1f40-435f-80d7-28442c2c513e | cpp | tensorflow/tensorflow | shape_inference_testutil | tensorflow/core/framework/shape_inference_testutil.cc | tensorflow/core/framework/shape_inference_testutil_test.cc | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace shape_inference {
using errors::Unknown;
Status ShapeInferenceTestutil::InferShapes(ShapeInferenceTestOp op,
const string& ins,
const string& expected_outs) {
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
std::vector<string> ins_v = str_util::Split(ins, ';');
InferenceContext::ShapeManager manager;
std::vector<ShapeHandle> in_shapes;
for (const string& spec : ins_v) {
ShapeHandle shape;
TF_RETURN_IF_ERROR(MakeShapeFromString(&manager, spec, &shape));
in_shapes.push_back(shape);
}
std::vector<std::unique_ptr<std::vector<shape_inference::ShapeAndType>>>
input_resource_handle_shapes_and_types;
for (const auto p : op.input_resource_handle_shapes_and_types) {
if (p == nullptr) {
input_resource_handle_shapes_and_types.push_back(nullptr);
} else {
std::unique_ptr<std::vector<ShapeAndType>> v(
new std::vector<ShapeAndType>());
for (const auto& shape_and_type : *p) {
ShapeHandle shape;
TF_RETURN_IF_ERROR(
MakeShapeFromString(&manager, shape_and_type.first, &shape));
v->emplace_back(shape, shape_and_type.second);
}
input_resource_handle_shapes_and_types.emplace_back(v.release());
}
}
shape_inference::InferenceContext c(
op.graph_def_version, op.node_def, op_reg_data->op_def, in_shapes,
op.input_tensors, {}, std::move(input_resource_handle_shapes_and_types));
TF_RETURN_IF_ERROR(c.construction_status());
if (op_reg_data->shape_inference_fn == nullptr) {
return errors::InvalidArgument(
"No shape inference function exists for op '", op.name,
"', did you forget to define it?");
}
TF_RETURN_IF_ERROR(c.Run(op_reg_data->shape_inference_fn));
const int num_outputs = c.num_outputs();
if (expected_outs == "e") {
return Unknown("Shape inference should have returned error");
}
std::vector<string> expected_outs_v = str_util::Split(expected_outs, ';');
if (num_outputs != expected_outs_v.size()) {
return Unknown("The expected output string lists the wrong number of ",
"outputs. It lists ", expected_outs_v.size(),
" but should list ", num_outputs);
}
for (int i = 0; i < num_outputs; ++i) {
StringPiece expected(expected_outs_v[i]);
shape_inference::ShapeHandle out = c.output(i);
string err_prefix = strings::StrCat("Output ", i);
string err_suffix =
strings::StrCat(". Output shape was ", c.DebugString(out));
int in_index = -1;
for (int i = 0; i < c.num_inputs(); ++i) {
if (c.input(i).SameHandle(out)) {
in_index = i;
}
}
if (absl::StartsWith(expected, "in")) {
if (in_index == -1) {
return Unknown(err_prefix,
" should have matched an input shape by "
"handle, but matched no input shape. This means the ",
"shape function was expected to pass an input "
"ShapeHandle through for this output, but did not",
err_suffix);
}
auto v = str_util::Split(expected, '|');
if (std::find(v.begin(), v.end(), strings::StrCat("in", in_index)) ==
v.end()) {
return Unknown(
err_prefix, " matched input ", in_index,
" by handle, but should have matched one of (", expected,
") instead. This means the shape function passed the ShapeHandle ",
"for input ", in_index,
" to the output, but should have passed a different input ",
"ShapeHandle through", err_suffix);
}
continue;
}
if (in_index != -1) {
return Unknown(err_prefix, " matched input ", in_index,
" by ShapeHandle, but was expected to not match an input ",
"shape by handle", err_suffix);
}
if (expected == "?") {
if (c.RankKnown(out)) {
return Unknown(err_prefix, " expected to be unknown", err_suffix);
}
continue;
}
CHECK(absl::StartsWith(expected, "[") && absl::EndsWith(expected, "]"))
<< expected;
expected.remove_prefix(1);
expected.remove_suffix(1);
auto expected_dims = str_util::Split(expected, ',');
if (!c.RankKnown(out)) {
return Unknown(err_prefix, " expected rank ", expected_dims.size(),
" but was ?", err_suffix);
}
if (c.Rank(out) != expected_dims.size()) {
return Unknown(err_prefix, " expected rank ", expected_dims.size(),
" but was ", c.Rank(out), err_suffix);
}
for (int j = 0; j < expected_dims.size(); ++j) {
err_prefix = strings::StrCat("Output dim ", i, ",", j);
StringPiece expected_dim(expected_dims[j]);
DimensionHandle out_dim = c.Dim(out, j);
std::pair<int, int> in_dim_idx(-1, -1);
for (int i = 0; i < c.num_inputs(); ++i) {
auto in = c.input(i);
for (int j = 0; j < c.Rank(in); ++j) {
if (c.Dim(in, j).SameHandle(out_dim)) {
in_dim_idx = std::make_pair(i, j);
}
}
}
if (expected_dim == "?") {
if (in_dim_idx.first != -1) {
return Unknown(err_prefix,
" expected to be an unknown but matched input d",
in_dim_idx.first, "_", in_dim_idx.second,
". The shape function passed through ",
"a DimensionHandle from an input instead of making ",
"a new unknown dimension", err_suffix);
} else if (c.ValueKnown(out_dim)) {
return Unknown(err_prefix, " expected to be unknown but was ",
c.Value(out_dim), err_suffix);
}
} else if (absl::StartsWith(expected_dim, "d")) {
auto v = str_util::Split(expected_dim, '|');
if (in_dim_idx.first == -1) {
return Unknown(
err_prefix, " was expected to match the dimension of an input, ",
"but did not match any input dimension. The shape ",
"function was expected to pass through a ",
"DimensionHandle for an input, but did not", err_suffix);
}
if (std::find(v.begin(), v.end(),
strings::StrCat("d", in_dim_idx.first, "_",
in_dim_idx.second)) == v.end()) {
return Unknown(err_prefix, " matched input d", in_dim_idx.first, "_",
in_dim_idx.second,
", but should have matched one of (", expected_dim,
"). The shape function passed through "
"the DimensionHandle for an input, but ",
"was expected to pass a different one", err_suffix);
}
} else {
int64_t value = -1;
if (!strings::safe_strto64(expected_dim, &value)) {
return Unknown(err_prefix, ": the expected dimension value '",
expected_dim, "' failed to parse as int64",
err_suffix);
}
if (in_dim_idx.first != -1) {
return Unknown(
err_prefix, " expected to be ", value, " but matched input d",
in_dim_idx.first, "_", in_dim_idx.second,
". The shape function was not expected to pass a DimensionHandle "
"from the input to the output, but did. Note that even if the "
"passed through output has the same dimension value as the "
"expected value, this is considered a failure for the test; "
"switch to using d#_# syntax if passing through the "
"DimensionHandle should be the expected behavior",
err_suffix);
} else if (value != c.Value(out_dim)) {
return Unknown(err_prefix, " expected to be ", value, " but was ",
c.DebugString(out_dim), err_suffix);
}
}
}
}
return absl::OkStatus();
}
Status ShapeInferenceTestutil::MakeShapeFromString(
InferenceContext::ShapeManager* manager, const string& spec,
ShapeHandle* output) {
if (spec == "?") {
*output = manager->UnknownShape();
return absl::OkStatus();
}
std::vector<DimensionHandle> dims;
strings::Scanner scanner(spec);
scanner.OneLiteral("[");
while (scanner.Peek() != ']') {
if (scanner.Peek() == '?') {
scanner.OneLiteral("?");
dims.push_back(manager->MakeDim(InferenceContext::kUnknownDim));
} else {
scanner.RestartCapture().Many(strings::Scanner::DIGIT);
StringPiece match;
int64_t dim_size = 0;
if (!scanner.GetResult(nullptr, &match) ||
!strings::safe_strto64(match, &dim_size)) {
return errors::InvalidArgument("Could not parse number in ", spec);
}
dims.push_back(manager->MakeDim(dim_size));
}
if (scanner.Peek() == ',') {
scanner.OneLiteral(",");
} else if (scanner.Peek() != ']') {
return errors::InvalidArgument(
"Invalid input spec (] not found in dim shape): ", spec);
}
}
if (!scanner.OneLiteral("]").Eos().GetResult()) {
return errors::InvalidArgument("Malformed shape spec: did not end in ']'.");
}
*output = manager->MakeShape(dims);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include <string>
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace shape_inference {
namespace {
#define EXPECT_CONTAINS(str, substr) \
do { \
string s = (str); \
EXPECT_TRUE(absl::StrContains(s, substr)) << "String: " << s; \
} while (false)
static OpShapeInferenceFn* global_fn_ptr = nullptr;
REGISTER_OP("OpOneOut")
.Input("inputs: N * T")
.Output("o1: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) { return (*global_fn_ptr)(c); });
REGISTER_OP("OpTwoOut")
.Input("inputs: N * T")
.Output("o1: T")
.Output("o2: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) { return (*global_fn_ptr)(c); });
string RunInferShapes(const string& op_name, const string& ins,
const string& expected_outs, OpShapeInferenceFn fn) {
ShapeInferenceTestOp op(op_name);
const int num_inputs = 1 + std::count(ins.begin(), ins.end(), ';');
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
NodeDef node_def;
TF_CHECK_OK(NodeDefBuilder("dummy", op_name)
.Input(src_list)
.Attr("N", num_inputs)
.Finalize(&op.node_def));
global_fn_ptr = &fn;
return std::string(
ShapeInferenceTestutil::InferShapes(op, ins, expected_outs).message());
}
}
TEST(ShapeInferenceTestutilTest, Failures) {
auto fn_copy_input_0 = [](InferenceContext* c) {
c->set_output(0, c->input(0));
return absl::OkStatus();
};
auto fn_copy_input_2 = [](InferenceContext* c) {
c->set_output(0, c->input(2));
return absl::OkStatus();
};
auto fn_output_unknown_shapes = [](InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->UnknownShape());
}
return absl::OkStatus();
};
auto fn_output_1_2 = [](InferenceContext* c) {
c->set_output(0, c->Matrix(1, 2));
return absl::OkStatus();
};
auto fn_output_u_2 = [](InferenceContext* c) {
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 2));
return absl::OkStatus();
};
const string& op = "OpOneOut";
EXPECT_EQ("Shape inference should have returned error",
RunInferShapes(op, "[1];[2];[1]", "e", fn_copy_input_0));
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "[1];[2]", fn_copy_input_0),
"wrong number of outputs");
auto s = ShapeInferenceTestutil::InferShapes(ShapeInferenceTestOp("NoSuchOp"),
"", "");
EXPECT_TRUE(
absl::StartsWith(s.message(), "Op type not registered 'NoSuchOp'"));
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "?", fn_copy_input_0),
"expected to not match");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in2", fn_copy_input_0),
"should have matched one of (in2)");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in1|in2", fn_copy_input_0),
"should have matched one of (in1|in2)");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "[1]", fn_copy_input_2),
"but was expected to not match");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in0|in1", fn_output_1_2),
"Output 0 should have matched an input shape");
EXPECT_EQ("Output 0 expected to be unknown. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "?", fn_output_1_2));
EXPECT_EQ("Output 0 expected rank 3 but was 2. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[1,2,3]", fn_output_1_2));
EXPECT_EQ(
"Output 0 expected rank 2 but was ?. Output shape was ?",
RunInferShapes(op, "[1];[2];[1]", "[1,2]", fn_output_unknown_shapes));
EXPECT_EQ("Output 1 expected rank 3 but was ?. Output shape was ?",
RunInferShapes("OpTwoOut", "[1];[2];[1]", "?;[1,2,3]",
fn_output_unknown_shapes));
EXPECT_EQ("Output dim 0,1 expected to be 3 but was 2. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[1,3]", fn_output_1_2));
EXPECT_EQ("Output dim 0,0 expected to be 2 but was 1. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[2,2]", fn_output_1_2));
EXPECT_EQ(
"Output dim 0,0 expected to be unknown but was 1. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[?,2]", fn_output_1_2));
EXPECT_EQ("Output dim 0,1 expected to be 1 but was 2. Output shape was [?,2]",
RunInferShapes(op, "[1];[2];[1]", "[?,1]", fn_output_u_2));
EXPECT_EQ("Output dim 0,0 expected to be 1 but was ?. Output shape was [?,2]",
RunInferShapes(op, "[0,1,?];[2];[1]", "[1,2]", fn_output_u_2));
auto fn = [](InferenceContext* c) {
c->set_output(0, c->MakeShape({c->Dim(c->input(0), 1), c->MakeDim(2),
c->UnknownDim(), c->Dim(c->input(2), 0)}));
return absl::OkStatus();
};
const string ins = "[0,1,?];[2];[1]";
EXPECT_CONTAINS(RunInferShapes(op, ins, "[?,2,?,d2_0]", fn),
"Output dim 0,0 expected to be an unknown");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[0,2,?,d2_0]", fn),
"Output dim 0,0 expected to be 0 but matched input d0_1.");
EXPECT_CONTAINS(
RunInferShapes(op, ins, "[d0_0,2,?,d2_0]", fn),
"dim 0,0 matched input d0_1, but should have matched one of (d0_0).");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[x,2,?,d2_0]", fn),
"Output dim 0,0: the expected dimension value 'x' failed to "
"parse as int64.");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_0|d0_2,2,?,d2_0]", fn),
"dim 0,0 matched input d0_1, but should have matched one of "
"(d0_0|d0_2).");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_1,?,?,d0_0|d2_0]", fn),
("Output dim 0,1 expected to be unknown but was 2. "
"Output shape was [1,2,?,1]"));
EXPECT_EQ(
"Output dim 0,2 expected to be 8 but was ?. Output shape was [1,2,?,1]",
RunInferShapes(op, ins, "[d0_1,2,8,d0_0|d2_0]", fn));
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_1,2,d0_1|d2_0,d0_0|d2_0]", fn),
"expected to match");
EXPECT_EQ("",
RunInferShapes(op, ins, "[d0_1,2,?,d0_0|d2_0]", fn));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/shape_inference_testutil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/shape_inference_testutil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3c22fae-00a0-46f8-a12c-8b501d62205a | cpp | tensorflow/tensorflow | tensor_util | tensorflow/core/runtime_fallback/kernel/tensor_util.cc | tensorflow/core/runtime_fallback/util/tensor_util_test.cc | #include "tensorflow/core/runtime_fallback/kernel/tensor_util.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h"
#include "tfrt/host_context/device.h"
#include "tfrt/support/string_util.h"
namespace tensorflow {
namespace tfd {
tfrt::AsyncValueRef<KernelFallbackTensor> TransferTensorToDevice(
const tfrt::ExecutionContext& exec_ctx, const KernelFallbackTensor& tensor,
const tfrt::Device& src_device, const tfrt::Device& dst_device) {
const tensorflow::Tensor& src = *tensor.GetTensor();
auto expected_src = GetTfDevice(exec_ctx, src_device);
if (!expected_src) {
return tfrt::MakeErrorAsyncValueRef(tfrt::StrCat(expected_src.takeError()));
}
auto expected_dst = GetTfDevice(exec_ctx, dst_device);
if (!expected_dst) {
return tfrt::MakeErrorAsyncValueRef(tfrt::StrCat(expected_dst.takeError()));
}
tensorflow::Device* srcd = expected_src.get();
tensorflow::Device* dstd = expected_dst.get();
return TransferTensorToDevice<KernelFallbackTensor>(exec_ctx, src, srcd,
dstd);
}
llvm::Expected<Device*> GetTfDevice(const tfrt::ExecutionContext& exec_ctx,
const tfrt::Device& device) {
auto eager_context_expected =
exec_ctx.resource_context()
->GetOrCreateResource<tfd::EagerContextResource>(
tfd::kEagerContextResourceName)
->GetTFEagerContext();
if (!eager_context_expected) {
return eager_context_expected.takeError();
}
Device* tf_device;
Status s = eager_context_expected.get()->FindDeviceFromName(
device.name().data(), &tf_device);
if (!s.ok()) {
return tfrt::MakeStringError(s.message());
}
return tf_device;
}
}
} | #include "tensorflow/core/runtime_fallback/util/tensor_util.h"
#include <cstddef>
#include <memory>
#include <gmock/gmock.h>
#include "Eigen/Core"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/dtype/dtype.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_buffer.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/tensor/dense_host_tensor.h"
#include "tfrt/tensor/string_host_tensor.h"
#include "tfrt/tensor/tensor_metadata.h"
#include "tfrt/tensor/tensor_shape.h"
namespace tensorflow {
namespace tfd {
namespace {
using ::tensorflow::protobuf::TextFormat;
std::unique_ptr<tfrt::HostContext> CreateTestHostContext() {
return std::make_unique<tfrt::HostContext>(
[](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(),
tfrt::CreateSingleThreadedWorkQueue());
}
TEST(TensorUtilTest, MoveHostBufferToTfTensorOk) {
ssize_t size = 4 * 4;
auto host_context = CreateTestHostContext();
tfrt::TensorMetadata metadata(tfrt::DType(tfrt::DType::I32),
llvm::ArrayRef(4));
auto* ptr =
host_context->AllocateBytes(size, EIGEN_MAX_ALIGN_BYTES);
tfrt::DenseHostTensor dht(
metadata, tfrt::HostBuffer::CreateFromExternal(
ptr, size, [&host_context](void* ptr, size_t size) {
host_context->DeallocateBytes(ptr, size);
}));
tensorflow::Tensor tensor =
MoveHostBufferToTfTensor(dht.ReleaseBuffer(), dht.dtype(), dht.shape());
EXPECT_EQ(tensor.dtype(), DT_INT32);
EXPECT_EQ(tensor.NumElements(), 4);
}
TEST(TensorUtilTest, CopyShtToTfTensorOk) {
auto host_context = CreateTestHostContext();
tfrt::TensorShape shape(1);
auto sht =
tfrt::StringHostTensor::CreateUninitialized(shape, host_context.get());
sht->strings()[0] = "Tensorflow runtime";
tensorflow::Tensor tensor = CopyShtToTfTensor(*sht);
EXPECT_THAT(reinterpret_cast<char*>(tensor.data()),
::testing::HasSubstr("Tensorflow runtime"));
}
TEST(TensorUtilTest, GetTfShapeOk) {
tfrt::TensorShape shape{{2, 3}};
tensorflow::TensorShape tf_shape = GetTfShape(shape);
EXPECT_EQ(tf_shape.dims(), 2);
EXPECT_EQ(tf_shape.dim_size(0), 2);
EXPECT_EQ(tf_shape.dim_size(1), 3);
}
TEST(TensorUtilTest, GetTensorMetadataOk) {
tensorflow::TensorProto tensor_pb;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
dtype: DT_BOOL
tensor_shape {
dim:
[ { size: 3 }
, { size: 2 }]
}
bool_val: [ false, false, false, false, false, false ]
)pb",
&tensor_pb));
tensorflow::Tensor tensor;
ASSERT_TRUE(tensor.FromProto(tensor_pb));
tfrt::TensorMetadata metadata = GetTensorMetadata(tensor);
EXPECT_EQ(metadata.dtype, tfrt::DType::I1);
EXPECT_EQ(metadata.shape, tfrt::TensorShape({3, 2}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/tensor_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/util/tensor_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db2ed32f-50df-41fd-a494-8b379eaecd49 | cpp | tensorflow/tensorflow | kernel_def_util | tensorflow/core/framework/kernel_def_util.cc | tensorflow/core/framework/kernel_def_util_test.cc | #include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
namespace {
bool InTypeList(DataType dt, const AttrValue& type_list) {
for (int in_list : type_list.list().type()) {
if (dt == in_list) return true;
}
return false;
}
}
Status KernelAttrsMatch(const KernelDef& kernel_def, AttrSlice attrs,
bool* match) {
*match = false;
for (const auto& constraint : kernel_def.constraint()) {
auto constraint_value_case = AttrValue::VALUE_NOT_SET;
int value_type_num = 0;
if (constraint.allowed_values().list().type_size() > 0) {
constraint_value_case = AttrValue::kType;
value_type_num++;
}
if (constraint.allowed_values().list().s_size() > 0) {
constraint_value_case = AttrValue::kS;
value_type_num++;
}
if (constraint.allowed_values().list().i_size() > 0) {
constraint_value_case = AttrValue::kI;
value_type_num++;
}
if (constraint.allowed_values().list().b_size() > 0) {
constraint_value_case = AttrValue::kB;
value_type_num++;
}
if (value_type_num == 0) {
return errors::Unimplemented(
"KernelDef '", kernel_def.ShortDebugString(),
" has constraint on attr '", constraint.name(),
"' with unsupported type: ",
SummarizeAttrValue(constraint.allowed_values()));
}
if (value_type_num > 1) {
return errors::InvalidArgument(
"KernelDef '", kernel_def.ShortDebugString(),
" has constraint on attr '", constraint.name(),
"' with more than one value type: ",
SummarizeAttrValue(constraint.allowed_values()));
}
const AttrValue* attr_value = attrs.Find(constraint.name());
if (attr_value == nullptr) {
return errors::InvalidArgument(
"OpKernel '", kernel_def.op(), "' has constraint on attr '",
constraint.name(), "' not in NodeDef '", attrs.SummarizeNode(),
"', KernelDef: '", kernel_def.ShortDebugString(), "'");
}
#define RETURN_IF_ATTR_NOT_FOUND(n, oneof_case, type_str) \
do { \
if (constraint_value_case == AttrValue::oneof_case) { \
Status s = AttrValueHasType(*attr_value, type_str); \
if (!s.ok()) { \
return errors::InvalidArgument( \
"KernelDef '", kernel_def.ShortDebugString(), \
"' has constraint on attr '", constraint.name(), \
"' that has value '", SummarizeAttrValue(*attr_value), \
"' that does not have the same type in NodeDef " \
"'", \
attrs.SummarizeNode(), "'"); \
} \
bool found = false; \
for (auto& value : constraint.allowed_values().list().n()) { \
if (value == attr_value->n()) { \
found = true; \
break; \
} \
} \
if (!found) { \
return OkStatus(); \
} \
} \
} while (false)
RETURN_IF_ATTR_NOT_FOUND(s, kS, "string");
RETURN_IF_ATTR_NOT_FOUND(i, kI, "int");
RETURN_IF_ATTR_NOT_FOUND(b, kB, "bool");
#undef RETURN_IF_ATTR_NOT_FOUND
if (constraint_value_case != AttrValue::kType) {
continue;
}
if (attr_value->type() != DT_INVALID) {
if (!InTypeList(attr_value->type(), constraint.allowed_values())) {
return absl::OkStatus();
}
} else {
if (!AttrValueHasType(*attr_value, "list(type)").ok()) {
return errors::InvalidArgument(
"KernelDef '", kernel_def.ShortDebugString(),
"' has constraint on attr '", constraint.name(),
"' that has value '", SummarizeAttrValue(*attr_value),
"' that does not have type 'type' or 'list(type)' in NodeDef "
"'",
attrs.SummarizeNode(), "'");
}
for (int t : attr_value->list().type()) {
if (!InTypeList(static_cast<DataType>(t),
constraint.allowed_values())) {
return absl::OkStatus();
}
}
}
}
*match = true;
return absl::OkStatus();
}
} | #include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
NodeDef NodeDefFromText(const string& text) {
NodeDef node_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return node_def;
}
KernelDef KernelDefFromText(const string& text) {
KernelDef kernel_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &kernel_def));
return kernel_def;
}
class AttrsMatchTest : public ::testing::Test {
protected:
void ExpectStatus(const string& node_def_str, const string& kernel_def_str,
error::Code code) {
bool match;
auto status = KernelAttrsMatch(KernelDefFromText(kernel_def_str),
NodeDefFromText(node_def_str), &match);
LOG(INFO) << "status: " << status;
EXPECT_EQ(code, status.code());
if (!status.ok()) {
EXPECT_FALSE(match)
<< "Expect no match between the given NodeDef and KernelDef";
}
}
};
TEST_F(AttrsMatchTest, ValidConstraint) {
string node_def_str = R"(
name: "ValidConstraint-op"
op: "ValidConstraint"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "ValidConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
type: DT_FLOAT
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::OK);
}
TEST_F(AttrsMatchTest, BadConstraint) {
string node_def_str = R"(
name: "BadConstraint-op"
op: "BadConstraint"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "BadConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
type: DT_FLOAT
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::INVALID_ARGUMENT);
}
TEST_F(AttrsMatchTest, Unimplemented) {
string node_def_str = R"(
name: "BadConstraint-op"
op: "BadConstraint"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
)";
string kernel_def_str = R"(
op: "BadConstraint"
device_type: "CPU"
constraint {
name: "T"
allowed_values {
list {
}
}
}
)";
ExpectStatus(node_def_str, kernel_def_str, error::UNIMPLEMENTED);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7de02f3-1f9b-4d79-9383-5b28d3fecda7 | cpp | tensorflow/tensorflow | op_def_util | tensorflow/python/framework/op_def_util.cc | tensorflow/core/framework/op_def_util_test.cc | #include "tensorflow/python/framework/op_def_util.h"
#include <map>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/python/lib/core/safe_pyobject_ptr.h"
#include "tensorflow/python/util/util.h"
using ::tensorflow::swig::GetRegisteredPyObject;
#if PY_MAJOR_VERSION < 3
#define PY_STRING_CHECK(x) (PyString_Check(x) || PyUnicode_Check(x))
#define PY_STRING_FROMSTRING(x) (PyString_FromString(x))
#define PY_INT_CHECK(x) (PyInt_Check(x))
#define PY_INT_TYPE PyInt_Type
#define PY_INT_FROM_LONG(x) (PyInt_FromLong(x))
#else
#define PY_STRING_CHECK(x) (PyBytes_Check(x) || PyUnicode_Check(x))
#define PY_STRING_FROMSTRING(x) (PyUnicode_FromString(x))
#define PY_INT_CHECK(x) (PyLong_Check(x))
#define PY_INT_TYPE PyLong_Type
#define PY_INT_FROM_LONG(x) (PyLong_FromLong(x))
#endif
namespace tensorflow {
namespace {
const std::map<std::string, AttributeType>* AttributeTypeNameMap() {
static auto* type_map = new std::map<std::string, AttributeType>(
{{"any", AttributeType::ANY},
{"float", AttributeType::FLOAT},
{"int", AttributeType::INT},
{"string", AttributeType::STRING},
{"bool", AttributeType::BOOL},
{"shape", AttributeType::SHAPE},
{"type", AttributeType::DTYPE},
{"tensor", AttributeType::TENSOR},
{"list(any)", AttributeType::LIST_ANY},
{"list(float)", AttributeType::LIST_FLOAT},
{"list(int)", AttributeType::LIST_INT},
{"list(string)", AttributeType::LIST_STRING},
{"list(bool)", AttributeType::LIST_BOOL},
{"list(type)", AttributeType::LIST_DTYPE},
{"list(shape)", AttributeType::LIST_SHAPE},
{"list(tensor)", AttributeType::LIST_TENSOR}});
return type_map;
}
struct ConvertAnyFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Py_INCREF(value);
return Safe_PyObjectPtr(value);
}
};
struct ConvertFloatFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PyFloat_Check(value)) {
Py_INCREF(value);
result.reset(value);
} else if (!PY_STRING_CHECK(value)) {
result.reset(PyObject_CallFunctionObjArgs(
reinterpret_cast<PyObject*>(&PyFloat_Type), value, nullptr));
}
return result;
}
};
struct ConvertIntFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PY_INT_CHECK(value)) {
Py_INCREF(value);
result.reset(value);
} else if (!PY_STRING_CHECK(value)) {
result.reset(PyObject_CallFunctionObjArgs(
reinterpret_cast<PyObject*>(&PY_INT_TYPE), value, nullptr));
}
return result;
}
};
struct ConvertStringFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PY_STRING_CHECK(value)) {
Py_INCREF(value);
result.reset(value);
}
return result;
}
};
struct ConvertBoolFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
if (PyBool_Check(value)) {
Py_INCREF(value);
result.reset(value);
}
return result;
}
};
struct ConvertDTypeFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* dtype = GetRegisteredPyObject("tf.dtypes.DType");
static PyObject* as_dtype = GetRegisteredPyObject("tf.dtypes.as_dtype");
if (reinterpret_cast<PyObject*>(value->ob_type) == dtype) {
Py_INCREF(value);
result.reset(value);
} else {
result.reset(PyObject_CallFunctionObjArgs(as_dtype, value, nullptr));
}
return result;
}
};
struct ConvertTensorShapeFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* shape = GetRegisteredPyObject("tf.TensorShape");
static PyObject* as_shape = GetRegisteredPyObject("tf.as_shape");
if (reinterpret_cast<PyObject*>(value->ob_type) == shape) {
Py_INCREF(value);
result.reset(value);
} else {
result.reset(PyObject_CallFunctionObjArgs(as_shape, value, nullptr));
}
return result;
}
};
struct ConvertTensorProtoFunctor {
Safe_PyObjectPtr operator()(PyObject* value) {
Safe_PyObjectPtr result;
static PyObject* tensor_proto = GetRegisteredPyObject("tf.TensorProto");
static PyObject* text_format_parse =
GetRegisteredPyObject("text_format.Parse");
if (reinterpret_cast<PyObject*>(value->ob_type) == tensor_proto) {
Py_INCREF(value);
result.reset(value);
} else if (PY_STRING_CHECK(value)) {
result.reset(PyObject_CallObject(tensor_proto, nullptr));
if (result) {
if (!PyObject_CallFunctionObjArgs(text_format_parse, value,
result.get(), nullptr)) {
return nullptr;
}
}
}
return result;
}
};
template <typename T>
Safe_PyObjectPtr ConvertListAttr(PyObject* value, T convert_functor) {
Safe_PyObjectPtr result(PySequence_List(value));
if (!result) return nullptr;
Py_ssize_t len = PySequence_Fast_GET_SIZE(result.get());
PyObject** items = PySequence_Fast_ITEMS(result.get());
for (Py_ssize_t i = 0; i < len; ++i) {
if (!PyFloat_Check(value)) {
Safe_PyObjectPtr item = convert_functor(items[i]);
if (!item) return nullptr;
PySequence_SetItem(result.get(), i, item.get());
}
}
return result;
}
Safe_PyObjectPtr ConvertAttrOrNull(PyObject* value, AttributeType attr_type) {
switch (attr_type) {
case AttributeType::ANY:
return ConvertAnyFunctor()(value);
case AttributeType::FLOAT:
return ConvertFloatFunctor()(value);
case AttributeType::INT:
return ConvertIntFunctor()(value);
case AttributeType::STRING:
return ConvertStringFunctor()(value);
case AttributeType::BOOL:
return ConvertBoolFunctor()(value);
case AttributeType::DTYPE:
return ConvertDTypeFunctor()(value);
case AttributeType::SHAPE:
return ConvertTensorShapeFunctor()(value);
case AttributeType::TENSOR:
return ConvertTensorProtoFunctor()(value);
case AttributeType::LIST_ANY:
return ConvertListAttr(value, ConvertAnyFunctor());
case AttributeType::LIST_FLOAT:
return ConvertListAttr(value, ConvertFloatFunctor());
case AttributeType::LIST_INT:
return ConvertListAttr(value, ConvertIntFunctor());
case AttributeType::LIST_STRING:
return ConvertListAttr(value, ConvertStringFunctor());
case AttributeType::LIST_BOOL:
return ConvertListAttr(value, ConvertBoolFunctor());
case AttributeType::LIST_DTYPE:
return ConvertListAttr(value, ConvertDTypeFunctor());
case AttributeType::LIST_SHAPE:
return ConvertListAttr(value, ConvertTensorShapeFunctor());
case AttributeType::LIST_TENSOR:
return ConvertListAttr(value, ConvertTensorProtoFunctor());
default:
return nullptr;
}
}
PyObject* PyBool_FromBool(bool b) {
PyObject* result = b ? Py_True : Py_False;
Py_INCREF(result);
return result;
}
Safe_PyObjectPtr AttrValueListToPyObject(AttrValue::ListValue list) {
if (list.s_size()) {
Safe_PyObjectPtr result(PyList_New(list.s_size()));
for (int i = 0; i < list.s_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PY_STRING_FROMSTRING(list.s(i).c_str()));
}
return result;
} else if (list.i_size()) {
Safe_PyObjectPtr result(PyList_New(list.i_size()));
for (int i = 0; i < list.i_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PY_INT_FROM_LONG(list.i(i)));
}
return result;
} else if (list.f_size()) {
Safe_PyObjectPtr result(PyList_New(list.f_size()));
for (int i = 0; i < list.f_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PyFloat_FromDouble(list.f(i)));
}
return result;
} else if (list.b_size()) {
Safe_PyObjectPtr result(PyList_New(list.b_size()));
for (int i = 0; i < list.b_size(); ++i) {
PyList_SET_ITEM(result.get(), i, PyBool_FromBool(list.b(i)));
}
return result;
} else if (list.type_size()) {
Safe_PyObjectPtr result(PyList_New(list.type_size()));
for (int i = 0; i < list.type_size(); ++i) {
Safe_PyObjectPtr item(DataTypeToPyObject(list.type(i)));
Py_INCREF(item.get());
PyList_SET_ITEM(result.get(), i, item.get());
}
return result;
} else if (list.shape_size()) {
Safe_PyObjectPtr result(PyList_New(list.shape_size()));
for (int i = 0; i < list.shape_size(); ++i) {
Safe_PyObjectPtr item(TensorShapeProtoToPyObject(list.shape(i)));
Py_INCREF(item.get());
PyList_SET_ITEM(result.get(), i, item.get());
}
return result;
} else if (list.tensor_size() || list.func_size()) {
PyErr_SetString(PyExc_TypeError, "Unsupported AttrValue type");
return nullptr;
} else {
return Safe_PyObjectPtr(PyList_New(0));
}
}
}
AttributeType AttributeTypeFromName(const std::string& type_name) {
const auto* type_map = AttributeTypeNameMap();
auto it = type_map->find(type_name);
return it != type_map->end() ? it->second : AttributeType::UNKNOWN;
}
std::string AttributeTypeToName(AttributeType attr_type) {
for (const auto& pair : *AttributeTypeNameMap()) {
if (pair.second == attr_type) {
return pair.first;
}
}
return "<unknown>";
}
Safe_PyObjectPtr ConvertPyObjectToAttributeType(PyObject* value,
AttributeType type) {
Safe_PyObjectPtr result = ConvertAttrOrNull(value, type);
if (!result) {
auto err = absl::StrCat("Failed to convert value of type '",
value->ob_type->tp_name, "' to type '",
AttributeTypeToName(type), "'.");
PyErr_SetString(PyExc_TypeError, err.c_str());
}
return result;
}
Safe_PyObjectPtr AttrValueToPyObject(const AttrValue& attr_value) {
switch (attr_value.value_case()) {
case tensorflow::AttrValue::kS:
return Safe_PyObjectPtr(PY_STRING_FROMSTRING(attr_value.s().c_str()));
case tensorflow::AttrValue::kI:
return Safe_PyObjectPtr(PY_INT_FROM_LONG(attr_value.i()));
case tensorflow::AttrValue::kF:
return Safe_PyObjectPtr(PyFloat_FromDouble(attr_value.f()));
case tensorflow::AttrValue::kB:
return Safe_PyObjectPtr(PyBool_FromBool(attr_value.b()));
case tensorflow::AttrValue::kType:
return DataTypeToPyObject(attr_value.type());
case tensorflow::AttrValue::kShape:
return TensorShapeProtoToPyObject(attr_value.shape());
case tensorflow::AttrValue::kList:
return AttrValueListToPyObject(attr_value.list());
default:
PyErr_SetString(PyExc_ValueError, "Unsupported AttrValue type");
return nullptr;
}
}
Safe_PyObjectPtr DataTypeToPyObject(const DataType& data_type) {
Safe_PyObjectPtr enum_value(PY_INT_FROM_LONG(data_type));
return ConvertDTypeFunctor()(enum_value.get());
}
Safe_PyObjectPtr TensorShapeProtoToPyObject(
const TensorShapeProto& tensor_shape) {
if (tensor_shape.unknown_rank()) {
return ConvertTensorShapeFunctor()(Py_None);
} else {
Safe_PyObjectPtr dims(PyTuple_New(tensor_shape.dim_size()));
for (int i = 0; i < tensor_shape.dim_size(); ++i) {
PyTuple_SET_ITEM(dims.get(), i,
PY_INT_FROM_LONG(tensor_shape.dim(i).size()));
}
return ConvertTensorShapeFunctor()(dims.get());
}
}
} | #include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef FromText(const string& text) {
OpDef op_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &op_def));
return op_def;
}
OpDef::AttrDef ADef(const string& text) {
OpDef::AttrDef attr_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &attr_def));
return attr_def;
}
class ValidateOpDefTest : public ::testing::Test {
protected:
Status TestProto(const string& text) { return ValidateOpDef(FromText(text)); }
Status TestBuilder(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
Status status = builder.Finalize(&op_reg_data);
TF_EXPECT_OK(status);
if (!status.ok()) {
return status;
} else {
return ValidateOpDef(op_reg_data.op_def);
}
}
};
namespace {
void ExpectFailure(const Status& status, const string& message) {
EXPECT_FALSE(status.ok()) << "Did not see error with: " << message;
if (!status.ok()) {
LOG(INFO) << "message: " << status;
EXPECT_TRUE(absl::StrContains(status.ToString(), message))
<< "Actual: " << status << "\nExpected to contain: " << message;
}
}
}
TEST_F(ValidateOpDefTest, OpDefValid) {
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Input("a: int32")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Output("a: bool")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("t: type").Input("a: t")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5 = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: numbertype")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Uppercase")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X").Attr("a: int")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X>Y").Attr("a: int")));
}
TEST_F(ValidateOpDefTest, InvalidName) {
ExpectFailure(TestBuilder(OpDefBuilder("lower").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("BadSuffix 7%")), "Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder(">OpName").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>b").Attr("a: int")),
"Invalid name");
ExpectFailure(TestBuilder(OpDefBuilder("OpName>A>>B").Attr("a: int")),
"Invalid name");
}
TEST_F(ValidateOpDefTest, DuplicateName) {
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Input("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(
OpDefBuilder("DupeName").Input("a: int32").Output("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(
OpDefBuilder("DupeName").Output("a: int32").Output("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Attr("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Output("a: int32").Attr("a: float")),
"Duplicate name: a");
ExpectFailure(
TestBuilder(OpDefBuilder("DupeName").Attr("a: int").Attr("a: float")),
"Duplicate name: a");
}
TEST_F(ValidateOpDefTest, BadAttrName) {
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("int32: int")),
"Attr can't have name int32 that matches a data type");
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("float: string")),
"Attr can't have name float that matches a data type");
}
TEST_F(ValidateOpDefTest, BadAttrType) {
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'illegal' }"),
"Unrecognized type");
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'list(illegal)' }"),
"Unrecognized type");
ExpectFailure(
TestProto("name: 'BadAttrType' attr { name: 'a' type: 'int extra' }"),
"Extra ' extra' at the end");
ExpectFailure(
TestProto(
"name: 'BadAttrType' attr { name: 'a' type: 'list(int extra)' }"),
"'list(' is missing ')' in attr");
ExpectFailure(
TestProto(
"name: 'BadAttrType' attr { name: 'a' type: 'list(int) extra' }"),
"Extra ' extra' at the end");
}
TEST_F(ValidateOpDefTest, BadAttrDefault) {
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'int' default_value { s: 'x' } }"),
"AttrValue had value with type 'string' when 'int' expected\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'int' default_value { f: 0.5 } }"),
"AttrValue had value with type 'float' when 'int' expected\n"
"\t for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'int' "
"default_value { i: 5 list { i: [2] } } }"),
"AttrValue had value with type 'list(int)' when 'int' expected\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { f: 0.5 } }"),
"AttrValue had value with type 'float' when 'list(int)' expected\n\t "
"for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(
TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'list(int)' "
"default_value { list { i: [5] f: [0.5] } } }"),
"AttrValue had value with type 'list(float)' when 'list(int)' "
"expected\n\t for attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'type' default_value { } }"),
"AttrValue missing value with expected type 'type'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'shape' default_value { } }"),
"AttrValue missing value with expected type 'shape'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'tensor' default_value { } }"),
"AttrValue missing value with expected type 'tensor'\n\t for "
"attr 'a'\n\t in Op 'BadAttrDef'");
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { } }"));
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { list { } } }"));
TF_EXPECT_OK(
TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(int) = []")));
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
"type: 'list(int)' has_minimum: true minimum: 2 "
"default_value { list { } } }"),
"Length for attr 'a' of 0 must be at least minimum 2\n\t in Op "
"'BadAttrDef'");
ExpectFailure(
TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(bool) >=2 = []")),
"Length for attr 'a' of 0 must be at least minimum 2\n\t in Op "
"'GoodAttrDef'");
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' type: "
"'list(string)' has_minimum: true minimum: 2 "
"default_value { list { s: ['foo'] } } }"),
"Length for attr 'a' of 1 must be at least minimum 2\n\t in Op "
"'BadAttrDef'");
ExpectFailure(
TestBuilder(
OpDefBuilder("GoodAttrDef").Attr("a: list(type) >=2 = [DT_STRING]")),
"Length for attr 'a' of 1 must be at least minimum 2\n\t in Op "
"'GoodAttrDef'");
}
TEST_F(ValidateOpDefTest, NoRefTypes) {
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrDef").Input("i: float_ref")),
"Illegal use of ref type 'float_ref'. "
"Use 'Ref(type)' instead for input 'i'");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrDef").Attr("T: type = DT_INT32_REF")),
"AttrValue must not have reference type value of int32_ref");
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrDef").Attr("T: list(type) = [DT_STRING_REF]")),
"AttrValue must not have reference type value of string_ref");
}
TEST_F(ValidateOpDefTest, BadAttrMin) {
ExpectFailure(TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'string' "
"has_minimum: true minimum: 0 }"),
"minimum for unsupported type string");
ExpectFailure(
TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'int' default_value "
"{ i: 2 } has_minimum: true minimum: 7 }"),
"Value for attr 'a' of 2 must be at least minimum 7\n\t in Op "
"'BadAttrMin'");
ExpectFailure(
TestProto("name: 'BadAttrMin' attr { name: 'a' "
"type: 'list(string)' has_minimum: true minimum: -5 }"),
"list type must have a non-negative minimum, not -5");
TF_EXPECT_OK(
TestProto("name: 'GoodAttrMin' attr { name: 'a' type: 'list(string)' "
"has_minimum: true minimum: 1 }"));
ExpectFailure(TestProto("name: 'NoHasMin' attr { name: 'a' "
"type: 'list(string)' minimum: 3 }"),
"Attr 'a' with has_minimum = false but minimum 3 not equal to "
"default of 0");
}
TEST_F(ValidateOpDefTest, BadAttrAllowed) {
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: numbertype = DT_INT32")));
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrtude").Attr("x: numbertype = DT_STRING")),
"attr 'x' of string is not in the list of allowed values");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
"attr 'x' of complex64 is not in the list of allowed values");
ExpectFailure(
TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list(realnumbertype) = [DT_COMPLEX128]")),
"attr 'x' of complex128 is not in the list of allowed values");
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
ExpectFailure(
TestBuilder(
OpDefBuilder("BadAttrtude").Attr("x: {'foo', 'bar'} = 'baz'")),
"attr 'x' of \"baz\" is not in the list of allowed values");
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude")
.Attr("x: list({'foo', 'bar'}) = ['baz']")),
"attr 'x' of \"baz\" is not in the list of allowed values");
ExpectFailure(TestProto("name: 'BadAttrtude' attr { name: 'a' "
"type: 'string' allowed_values { s: 'not list' } }"),
"with type 'string' when 'list(string)' expected");
ExpectFailure(
TestProto("name: 'BadAttrtude' attr { name: 'a' "
"type: 'string' allowed_values { list { i: [6] } } }"),
"with type 'list(int)' when 'list(string)' expected");
}
TEST_F(ValidateOpDefTest, BadArgType) {
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type: DT_INT32 } input_arg { name: 'b' }"),
"Missing type for input 'b'");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type: DT_INT32 } output_arg { name: 'b' }"),
"Missing type for output 'b'");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' type: "
"DT_INT32 type_attr: 'x' } attr { name: 'x' type: 'type' }"),
"Exactly one of type, type_attr, type_list_attr must be set for input "
"'a'");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' } attr { name: 'x' type: 'int' }"),
"Attr 'x' used as type_attr for input 'a' has type int");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' } attr { name: 'x' type: 'list(type)' }"),
"Attr 'x' used as type_attr for input 'a' has type list(type)");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_list_attr: 'x' } attr { name: 'x' type: 'int' }"),
"Attr 'x' used as type_list_attr for input 'a' has type int");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_list_attr: 'x' } attr { name: 'x' type: 'type' }"),
"Attr 'x' used as type_list_attr for input 'a' has type type");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' "
"type_attr: 'x' }"),
"No attr with name 'x' for input 'a'");
ExpectFailure(
TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: 'n' "
"type_attr: 'x' } attr { name: 'x' type: 'list(type)' } "
"attr { name: 'n' type: 'int' has_minimum: true minimum: 1 }"),
"Attr 'x' used as type_attr for input 'a' has type list(type)");
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type_list_attr: 'x' } "
"attr { name: 'x' type: 'list(type)' } attr { name: 'n' type: 'int' "
"has_minimum: true minimum: 1 }"));
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: 'n' } "
"attr { name: 'n' type: 'int' has_minimum: true minimum: 0 }"));
ExpectFailure(TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 "
"number_attr: 'n' }"),
"No attr with name 'n'");
ExpectFailure(
TestProto(
"name: 'Arg' input_arg { name: 'a' type: "
"DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'string' }"),
"Attr 'n' used as length for input 'a' has type string");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' type: "
"DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'int' }"),
"Attr 'n' used as length for input 'a' must have minimum;");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: "
"'n' } attr { name: 'n' type: 'int' has_minimum: true minimum: "
"-5 }"),
"Attr 'n' used as length for input 'a' must have minimum >= 0;");
ExpectFailure(
TestProto("name: 'Arg' input_arg { name: 'a' number_attr: 'n' } attr { "
"name: 'n' type: 'int' has_minimum: true minimum: 2 }"),
"Missing type for input 'a'; in OpDef:");
ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: "
"'n' type_list_attr: 'x' } attr { name: 'n' type: "
"'int' has_minimum: true minimum: 1 } attr { name: "
"'x' type: 'list(type)' }"),
"Can't have both number_attr and type_list_attr for input 'a'");
}
void ExpectDifferent(const OpDef::AttrDef& a1, const OpDef::AttrDef& a2) {
EXPECT_FALSE(AttrDefEqual(a1, a2));
EXPECT_FALSE(AttrDefEqual(a2, a1));
EXPECT_NE(AttrDefHash(a1), AttrDefHash(a2));
}
TEST(AttrDefUtilTest, EqualAndHash) {
OpDef::AttrDef a = ADef(
"name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
EXPECT_TRUE(AttrDefEqual(a, a));
EXPECT_EQ(AttrDefHash(a), AttrDefHash(a));
ExpectDifferent(
a,
ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 3 } allowed_values { i: 5 }"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 6 }"));
a = ADef(
"name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2");
EXPECT_TRUE(AttrDefEqual(a, a));
EXPECT_EQ(AttrDefHash(a), AttrDefHash(a));
ExpectDifferent(
a,
ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false "
"minimum: 2"));
ExpectDifferent(
a,
ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3"));
}
protobuf::RepeatedPtrField<OpDef::AttrDef> Rep(
const std::vector<OpDef::AttrDef>& defs) {
protobuf::RepeatedPtrField<OpDef::AttrDef> rep;
for (const OpDef::AttrDef& def : defs) {
rep.Add()->MergeFrom(def);
}
return rep;
}
void ExpectEqual(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
EXPECT_TRUE(RepeatedAttrDefEqual(a1, a2));
EXPECT_TRUE(RepeatedAttrDefEqual(a2, a1));
EXPECT_EQ(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2));
}
void ExpectDifferent(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1,
const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) {
EXPECT_FALSE(RepeatedAttrDefEqual(a1, a2));
EXPECT_FALSE(RepeatedAttrDefEqual(a2, a1));
EXPECT_NE(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2));
}
TEST(AttrDefUtilTest, EqualAndHash_Repeated) {
OpDef::AttrDef a1 = ADef(
"name: 'foo1' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a2 = ADef(
"name: 'foo2' type: 'string' description: 'cool' has_minimum: true "
"minimum: 2 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a3 = ADef(
"name: 'foo1' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }");
OpDef::AttrDef a4 = ADef(
"name: 'foo3' type: 'string' description: 'cool' has_minimum: true "
"minimum: 3 default_value { i: 2 } allowed_values { i: 5 }");
ExpectEqual(Rep({}), Rep({}));
ExpectEqual(Rep({a1}), Rep({a1}));
ExpectEqual(Rep({a1, a2}), Rep({a1, a2}));
ExpectEqual(Rep({a1, a2}), Rep({a2, a1}));
ExpectEqual(Rep({a1, a4}), Rep({a4, a1}));
ExpectDifferent(Rep({a1}), Rep({}));
ExpectDifferent(Rep({a1}), Rep({a2}));
ExpectDifferent(Rep({a1}), Rep({a3}));
ExpectDifferent(Rep({a1}), Rep({a4}));
ExpectDifferent(Rep({a1}), Rep({a1, a2}));
ExpectDifferent(Rep({a1, a2}), Rep({a1, a4}));
ExpectDifferent(Rep({a1, a2}), Rep({a1, a2, a4}));
}
void ExpectEqual(const OpDef& o1, const OpDef& o2) {
EXPECT_TRUE(OpDefEqual(o1, o2));
EXPECT_TRUE(OpDefEqual(o2, o1));
EXPECT_EQ(OpDefHash(o1), OpDefHash(o2));
}
void ExpectDifferent(const OpDef& o1, const OpDef& o2) {
EXPECT_FALSE(OpDefEqual(o1, o2));
EXPECT_FALSE(OpDefEqual(o2, o1));
EXPECT_NE(OpDefHash(o1), OpDefHash(o2));
}
TEST(OpDefEqualityTest, EqualAndHash) {
string a1 = "attr { name: 'a' type: 'string' } ";
string a2 = "attr { name: 'b' type: 'string' } ";
string a3 = "attr { name: 'c' type: 'int32' } ";
OpDef o1 = FromText(strings::StrCat("name: 'MatMul' ", a1));
OpDef o2 = FromText(strings::StrCat("name: 'MatMul' ", a2));
OpDef o3 = FromText(strings::StrCat("name: 'MatMul' ", a1, a2));
OpDef o4 = FromText(strings::StrCat("name: 'MatMul' ", a2, a1));
ExpectEqual(o1, o1);
ExpectEqual(o3, o4);
ExpectDifferent(o1, o2);
ExpectDifferent(o1, o3);
}
TEST(OpDefAttrDefaultsUnchangedTest, Foo) {
const auto& op1 = FromText("name: 'op1' attr { name: 'n' type: 'string'}");
const auto& op2 = FromText(
"name: 'op2' attr { name: 'n' type: 'string' default_value: {s: 'x'}}");
const auto& op3 = FromText(
"name: 'op3' attr { name: 'n' type: 'string' default_value: {s: 'y'}}");
TF_EXPECT_OK(OpDefAttrDefaultsUnchanged(op1, op2));
Status changed_attr = OpDefAttrDefaultsUnchanged(op2, op3);
ExpectFailure(changed_attr,
"Attr 'n' has changed it's default value; from \"x\" to \"y\"");
Status removed_attr = OpDefAttrDefaultsUnchanged(op2, op1);
ExpectFailure(removed_attr,
"Attr 'n' has removed it's default; from \"x\" to no default");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/op_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff8c6da2-f799-4128-be68-719d72d110fb | cpp | tensorflow/tensorflow | op_segment | tensorflow/core/framework/op_segment.cc | tensorflow/core/framework/op_segment_test.cc | #include "tensorflow/core/framework/op_segment.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
OpSegment::Item::~Item() {
for (const auto& kv : name_kernel) delete kv.second;
}
OpSegment::OpSegment() {}
OpSegment::~OpSegment() {
for (const auto& kv : sessions_) delete kv.second;
}
Status OpSegment::FindOrCreate(const string& session_handle,
const string& node_name, OpKernel** kernel,
CreateKernelFn create_fn) {
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
*kernel = gtl::FindPtrOrNull(item->name_kernel, node_name);
if (*kernel != nullptr) {
return absl::OkStatus();
}
}
Status s = create_fn(kernel);
if (!s.ok()) {
LOG(ERROR) << "Create kernel failed: " << s;
return s;
}
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
OpKernel** p_kernel = &(item->name_kernel[node_name]);
if (*p_kernel == nullptr) {
*p_kernel = *kernel;
} else {
delete *kernel;
*kernel = *p_kernel;
}
}
return absl::OkStatus();
}
void OpSegment::AddHold(const string& session_handle) {
mutex_lock l(mu_);
Item** item = &sessions_[session_handle];
if (*item == nullptr) {
*item = new Item;
} else {
++((*item)->num_holds);
}
}
void OpSegment::RemoveHold(const string& session_handle) {
Item* item = nullptr;
{
mutex_lock l(mu_);
auto siter = sessions_.find(session_handle);
if (siter == sessions_.end()) {
VLOG(1) << "Session " << session_handle << " is not found.";
return;
}
item = siter->second;
if (--(item->num_holds) > 0) {
return;
} else {
sessions_.erase(siter);
}
}
delete item;
}
bool OpSegment::ShouldOwnKernel(FunctionLibraryRuntime* lib,
const string& node_op) {
return lib->IsStateful(node_op) &&
lib->GetFunctionLibraryDefinition()->Find(node_op) == nullptr &&
node_op != "PartitionedCall" && node_op != "StatefulPartitionedCall";
}
} | #include "tensorflow/core/framework/op_segment.h"
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class OpSegmentTest : public ::testing::Test {
protected:
DeviceBase device_;
std::vector<NodeDef> int32_nodedefs_;
std::vector<NodeDef> float_nodedefs_;
OpSegmentTest() : device_(Env::Default()) {
for (int i = 0; i < 10; ++i) {
NodeDef def;
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_INT32)
.Input("y", 0, DT_INT32)
.Finalize(&def));
int32_nodedefs_.push_back(def);
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_FLOAT)
.Input("y", 0, DT_FLOAT)
.Finalize(&def));
float_nodedefs_.push_back(def);
}
}
void ValidateOpAndTypes(OpKernel* op, const NodeDef& expected, DataType dt) {
ASSERT_NE(op, nullptr);
EXPECT_EQ(expected.DebugString(), op->def().DebugString());
EXPECT_EQ(2, op->num_inputs());
EXPECT_EQ(dt, op->input_type(0));
EXPECT_EQ(dt, op->input_type(1));
EXPECT_EQ(1, op->num_outputs());
EXPECT_EQ(dt, op->output_type(0));
}
OpSegment::CreateKernelFn GetFn(const NodeDef* ndef) {
return [this, ndef](OpKernel** kernel) {
Status s;
auto created = CreateOpKernel(DEVICE_CPU, &device_, cpu_allocator(),
*ndef, TF_GRAPH_DEF_VERSION, &s);
if (s.ok()) {
*kernel = created.release();
}
return s;
};
}
};
TEST_F(OpSegmentTest, Basic) {
OpSegment opseg;
OpKernel* op;
opseg.AddHold("A");
opseg.AddHold("B");
for (int i = 0; i < 10; ++i) {
auto* ndef = &float_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_FLOAT);
ndef = &int32_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_INT32);
}
auto reterr = [](OpKernel** kernel) {
return errors::Internal("Should not be called");
};
for (int i = 0; i < 10; ++i) {
TF_EXPECT_OK(
opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, float_nodedefs_[i], DT_FLOAT);
TF_EXPECT_OK(
opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, int32_nodedefs_[i], DT_INT32);
}
opseg.RemoveHold("A");
opseg.RemoveHold("B");
}
TEST_F(OpSegmentTest, SessionNotFound) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
}
TEST_F(OpSegmentTest, CreateFailure) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
def.set_op("nonexistop");
opseg.AddHold("A");
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
opseg.RemoveHold("A");
}
TEST_F(OpSegmentTest, AddRemoveHolds) {
OpSegment opseg;
OpKernel* op;
const auto& ndef = int32_nodedefs_[0];
opseg.RemoveHold("null");
opseg.AddHold("foo");
TF_EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
opseg.AddHold("foo");
opseg.RemoveHold("foo");
ValidateOpAndTypes(op, ndef, DT_INT32);
opseg.RemoveHold("foo");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3e95488-5275-405c-a295-dbce3315969e | cpp | tensorflow/tensorflow | graph_to_functiondef | tensorflow/core/framework/graph_to_functiondef.cc | tensorflow/core/framework/graph_to_functiondef_test.cc | #include "tensorflow/core/framework/graph_to_functiondef.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/base64.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace {
class NodeNameMapping {
public:
NodeNameMapping() = default;
string GetInputName(const string& name);
string GetOutputName(const string& name);
string Uniquify(const string& name);
Status UseOutputName(const string& name);
string Lookup(const string& name) const;
private:
string UniquifyHelper(const string& name);
static string Normalize(string name);
absl::flat_hash_map<string, uint64> used_names_;
absl::flat_hash_map<string, string> name_mapping_;
};
string NodeNameMapping::Normalize(string name) {
if (name.empty()) return "unknown";
const int n = name.size();
for (int i = 0; i < n; ++i) {
char c = name[i];
if (isalnum(c)) {
if (isupper(c)) {
name[i] = tolower(c);
}
} else {
name[i] = '_';
}
}
int i = 0;
for (; i < n; ++i) {
if (isalpha(name[i])) break;
}
return i == n ? "unknown" : name.substr(i);
}
string NodeNameMapping::UniquifyHelper(const string& name) {
auto it = used_names_.emplace(name, 0);
if (it.second) return name;
while (true) {
const string candidate = strings::StrCat(name, "_", it.first->second);
it.first->second++;
if (used_names_.emplace(candidate, 0).second) return candidate;
}
}
string NodeNameMapping::GetInputName(const string& name) {
const string& input_name = UniquifyHelper(Normalize(name));
name_mapping_[name] = input_name;
return input_name;
}
string NodeNameMapping::GetOutputName(const string& name) {
const string& input_name = UniquifyHelper(Normalize(name));
return input_name;
}
string NodeNameMapping::Uniquify(const string& name) {
const string uniqued = UniquifyHelper(name);
name_mapping_[name] = uniqued;
return uniqued;
}
Status NodeNameMapping::UseOutputName(const string& name) {
const auto& iter = used_names_.find(name);
if (iter != used_names_.end()) {
return errors::InvalidArgument(
"Cannot have duplicate output names. Name '", name,
"' appears more than once in 'output_names' array.");
}
used_names_.emplace(name, 0);
return absl::OkStatus();
}
string NodeNameMapping::Lookup(const string& name) const {
const auto iter = name_mapping_.find(name);
if (iter == name_mapping_.end()) return string();
return iter->second;
}
Status FillFunctionBody(
const string& fn_name, const NodeNameMapping& node_names,
const std::vector<const Node*>& body_nodes,
const absl::flat_hash_map<string, string>& tensor_renaming,
bool set_stateful_from_nodes, bool copy_placeholder_attrs_from_nodes,
bool allow_destructive_reads, FunctionDef* fdef) {
absl::flat_hash_set<string> func_attr_names;
for (const auto& func_attr : fdef->signature().attr()) {
func_attr_names.insert(func_attr.name());
}
std::vector<const Edge*> in_edges;
std::vector<const Edge*> control_edges;
for (const Node* node : body_nodes) {
NodeDef* node_def = fdef->add_node_def();
NodeDebugInfo debug_info(node->def());
if (allow_destructive_reads) {
Node* mutable_node = const_cast<Node*>(node);
*node_def->mutable_op() =
node->def()
.op();
*node_def->mutable_attr() =
std::move(*mutable_node->mutable_def()->mutable_attr());
if (node->def().has_experimental_debug_info()) {
*node_def->mutable_experimental_debug_info() = std::move(
*mutable_node->mutable_def()->mutable_experimental_debug_info());
}
if (node->def().has_experimental_type()) {
*node_def->mutable_experimental_type() = std::move(
*mutable_node->mutable_def()->mutable_experimental_type());
}
} else {
*node_def = node->def();
MergeDebugInfo(NodeDebugInfo(node->def()), node_def);
node_def->clear_input();
}
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
node_def->set_name(node_names.Lookup(node->name()));
in_edges.clear();
in_edges.resize(node->num_inputs(), nullptr);
control_edges.clear();
for (const Edge* edge : node->in_edges()) {
if (edge->src()->IsSource()) continue;
if (edge->IsControlEdge()) {
control_edges.push_back(edge);
} else {
in_edges[edge->dst_input()] = edge;
}
}
std::sort(control_edges.begin(), control_edges.end(),
[](const Edge* a, const Edge* b) {
return a->src()->name() < b->src()->name();
});
for (size_t i = 0; i < in_edges.size(); ++i) {
const Edge* edge = in_edges[i];
std::string original_input_name;
if (edge == nullptr) {
if (i >= node->requested_inputs().size()) {
return errors::InvalidArgument(
"Graph to be converted to function appears to be malformed. ",
"Node ", node->name(), " is missing input edge ", i);
}
original_input_name =
ParseTensorName(node->requested_inputs()[i]).ToString();
} else {
original_input_name =
strings::StrCat(edge->src()->name(), ":", edge->src_output());
}
const auto iter = tensor_renaming.find(original_input_name);
if (iter == tensor_renaming.end()) {
return errors::InvalidArgument(
"Input ", i, ", '", original_input_name, "', of node '",
node->name(), "' in function '", fn_name,
"' is not available. You might need to include it in inputs "
"or include its source node in the body");
}
node_def->add_input(iter->second);
}
for (const Edge* edge : control_edges) {
const string normalized = node_names.Lookup(edge->src()->name());
if (normalized.empty()) {
return errors::InvalidArgument(
"The source of control edge ", edge->DebugString(),
" is not in the body. Encountered while creating function '",
fn_name, "'");
}
node_def->add_input(strings::StrCat("^", normalized));
}
if (set_stateful_from_nodes && node->op_def().is_stateful()) {
fdef->mutable_signature()->set_is_stateful(true);
}
if (!copy_placeholder_attrs_from_nodes) {
continue;
}
for (const auto& iter : node_def->attr()) {
if (iter.second.placeholder().empty()) {
continue;
}
const std::string& func_attr_name = iter.second.placeholder();
if (func_attr_names.find(func_attr_name) != func_attr_names.end()) {
continue;
}
const std::string& node_attr_name = iter.first;
const OpDef::AttrDef* node_attr_def = nullptr;
for (const auto& node_attr : node->op_def().attr()) {
if (node_attr.name() == node_attr_name) {
node_attr_def = &node_attr;
}
}
if (!node_attr_def) {
return errors::Unimplemented(
"Placeholder value is not supported for attributes not in OpDef. "
"Attribute: ",
node_attr_name, ", OpDef: ", node->op_def().DebugString());
}
OpDef::AttrDef* attr_def = fdef->mutable_signature()->add_attr();
attr_def->set_name(func_attr_name);
attr_def->set_type(node_attr_def->type());
func_attr_names.insert(func_attr_name);
}
}
return absl::OkStatus();
}
Status GraphToFunctionDefHelper(
const Graph& fn_body, const string& fn_name, bool append_hash_to_fn_name,
bool set_stateful_from_nodes, bool copy_placeholder_attrs_from_nodes,
const std::vector<const Node*>& body_nodes,
const std::vector<OutputTensor>& inputs,
const std::vector<OutputTensor>& outputs,
const std::vector<string>& output_names,
const std::vector<const Node*>& control_outputs,
const std::vector<string>& control_output_names, const char* description,
bool allow_destructive_reads, FunctionDef* fdef) {
if (!output_names.empty()) {
DCHECK_EQ(output_names.size(), outputs.size());
}
if (description != nullptr) {
fdef->mutable_signature()->set_description(description);
}
NodeNameMapping node_names;
absl::flat_hash_map<string, string> tensor_renaming;
for (size_t i = 0; i < outputs.size(); ++i) {
const Node* node = outputs[i].node;
int idx = outputs[i].index;
OpDef::ArgDef* argdef = fdef->mutable_signature()->add_output_arg();
if (node->IsRetval()) {
argdef->set_type(node->input_type(idx));
} else {
argdef->set_type(node->output_type(idx));
}
if (!output_names.empty()) {
TF_RETURN_IF_ERROR(node_names.UseOutputName(output_names[i]));
argdef->set_name(output_names[i]);
} else {
argdef->set_name(node_names.GetOutputName(node->name()));
}
}
for (size_t i = 0; i < inputs.size(); ++i) {
const Node* node = inputs[i].node;
int idx = inputs[i].index;
OpDef::ArgDef* argdef = fdef->mutable_signature()->add_input_arg();
argdef->set_type(node->output_type(idx));
const string& input_name = node_names.GetInputName(node->name());
argdef->set_name(input_name);
FunctionDef::ArgAttrs arg_attrs;
int64_t resource_arg_unique_id = -1;
for (const auto& attr : node->attrs()) {
if (absl::StartsWith(attr.first, "_")) {
arg_attrs.mutable_attr()->insert(attr);
} else if (attr.first == "shape" && argdef->type() != DT_RESOURCE) {
AttrValue value;
*(value.mutable_list()->add_shape()) = attr.second.shape();
arg_attrs.mutable_attr()->insert({"_output_shapes", value});
} else if (attr.first == "value" && node->type_string() == "Const") {
AttrValue value;
*(value.mutable_list()->add_shape()) =
attr.second.tensor().tensor_shape();
arg_attrs.mutable_attr()->insert({"_output_shapes", value});
}
if (attr.first == "_resource_arg_unique_id") {
resource_arg_unique_id = attr.second.i();
}
}
if (arg_attrs.attr_size() > 0) {
(*fdef->mutable_arg_attr())[i] = std::move(arg_attrs);
}
if (resource_arg_unique_id >= 0) {
(*fdef->mutable_resource_arg_unique_id())[idx] = resource_arg_unique_id;
}
tensor_renaming[strings::StrCat(node->name(), ":", idx)] = input_name;
}
for (const Node* node : body_nodes) {
const string& node_name = node_names.Uniquify(node->name());
NameRangeMap output_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(*node, node->op_def(), nullptr, &output_ranges));
for (const auto& output : output_ranges) {
const StringPiece& output_name = output.first;
int index_start = output.second.first;
int index_end = output.second.second;
for (int i = index_start; i < index_end; ++i) {
const string& original_name = strings::StrCat(node->name(), ":", i);
const string& new_name =
strings::StrCat(node_name, ":", output_name, ":", i - index_start);
if (tensor_renaming.find(original_name) == tensor_renaming.end()) {
tensor_renaming[original_name] = new_name;
}
}
}
}
TF_RETURN_IF_ERROR(FillFunctionBody(
fn_name, node_names, body_nodes, tensor_renaming, set_stateful_from_nodes,
copy_placeholder_attrs_from_nodes, allow_destructive_reads, fdef));
for (int r = 0; r < fdef->signature().output_arg_size(); ++r) {
const string& ret_name = fdef->signature().output_arg(r).name();
string return_value;
if (outputs[r].node->IsRetval()) {
Edge const* edge;
TF_RETURN_IF_ERROR(outputs[r].node->input_edge(0, &edge));
return_value =
strings::StrCat(edge->src()->name(), ":", edge->src_output());
} else {
return_value =
strings::StrCat(outputs[r].node->name(), ":", outputs[r].index);
}
const auto iter = tensor_renaming.find(return_value);
if (iter == tensor_renaming.end()) {
return errors::InvalidArgument(
"TF_Output ", return_value, " is neither in the function body ",
"nor among function inputs. Encountered while creating function '",
fn_name, "'");
}
(*fdef->mutable_ret())[ret_name] = iter->second;
}
if (append_hash_to_fn_name) {
const uint64 hash = FunctionDefHash(*fdef);
string encoded;
TF_RETURN_IF_ERROR(Base64Encode(
StringPiece(reinterpret_cast<const char*>(&hash), sizeof(hash)),
&encoded));
std::replace(encoded.begin(), encoded.end(), '-', 'a');
std::replace(encoded.begin(), encoded.end(), '_', 'A');
fdef->mutable_signature()->set_name(strings::StrCat(fn_name, "_", encoded));
} else {
fdef->mutable_signature()->set_name(fn_name);
}
if (!control_output_names.empty() &&
(control_outputs.size() != control_output_names.size())) {
return errors::InvalidArgument(
"Expected number of control outputs (", control_outputs.size(),
") and the number of control output names (",
control_output_names.size(), ") to match but they do not.");
}
std::set<string> control_output_names_set;
for (int i = 0; i < control_outputs.size(); ++i) {
string signature_name;
if (!control_output_names.empty()) {
signature_name = control_output_names[i];
} else {
signature_name = control_outputs[i]->name();
}
if (signature_name.empty()) {
return errors::InvalidArgument("Control output name must be not empty");
}
if (!control_output_names_set.insert(signature_name).second) {
return errors::InvalidArgument("Repeated control output name: ",
signature_name);
}
const string control_output_node =
node_names.Lookup(control_outputs[i]->name());
if (control_output_node.empty()) {
return errors::InvalidArgument(
"Control output node name must be not empty");
}
(*fdef->mutable_control_ret())[signature_name] = control_output_node;
}
for (const string& control_output : control_output_names_set) {
fdef->mutable_signature()->add_control_output(control_output);
}
return absl::OkStatus();
}
Status GraphToFunctionDefHelper(
const Graph& graph, const string& name,
const std::function<absl::optional<string>(const Node*)>& control_ret,
const std::vector<string>& output_names, bool allow_destructive_reads,
FunctionDef* fdef) {
auto add_arg_or_retval = [](Node* node,
std::vector<OutputTensor>* args_or_retvals) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &index));
if (index >= args_or_retvals->size()) {
args_or_retvals->resize(index + 1);
}
if ((*args_or_retvals)[index].node == nullptr) {
(*args_or_retvals)[index].node = node;
} else {
return errors::InvalidArgument(
"Multiple '", node->type_string(), "' nodes found with index ", index,
"; originally we already have:\n",
(*args_or_retvals)[index].node->DebugString(), "\nNow we have:\n",
node->DebugString());
}
return absl::OkStatus();
};
std::vector<const Node*> body_nodes;
std::vector<OutputTensor> inputs;
std::vector<OutputTensor> outputs;
std::vector<const Node*> control_outputs;
std::vector<string> control_output_names;
for (Node* node : graph.op_nodes()) {
if (node->IsArg()) {
TF_RETURN_IF_ERROR(add_arg_or_retval(node, &inputs));
continue;
}
if (node->IsRetval()) {
TF_RETURN_IF_ERROR(add_arg_or_retval(node, &outputs));
continue;
}
if (control_ret) {
auto control_ret_name = control_ret(node);
if (control_ret_name.has_value()) {
control_outputs.push_back(node);
control_output_names.push_back(control_ret_name.value());
}
}
body_nodes.push_back(node);
}
auto validate_args_retvals =
[](const std::vector<OutputTensor>& args_or_retvals,
const string& op_type) {
for (int i = 0, e = args_or_retvals.size(); i < e; ++i) {
if (args_or_retvals[i].node == nullptr) {
return errors::InvalidArgument("Missing '", op_type,
"' node at index ", i);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(validate_args_retvals(inputs, "_Arg"));
TF_RETURN_IF_ERROR(validate_args_retvals(outputs, "_Retval"));
return GraphToFunctionDefHelper(
graph, name, false,
false,
false, body_nodes, inputs, outputs,
output_names, control_outputs, control_output_names,
nullptr, allow_destructive_reads, fdef);
}
}
Status GraphToFunctionDef(const Graph& fn_body, const string& fn_name,
bool append_hash_to_fn_name,
bool set_stateful_from_nodes,
bool copy_placeholder_attrs_from_nodes,
const std::vector<const Node*>& body_nodes,
const std::vector<OutputTensor>& inputs,
const std::vector<OutputTensor>& outputs,
const std::vector<string>& output_names,
const std::vector<const Node*>& control_outputs,
const std::vector<string>& control_output_names,
const char* description, FunctionDef* fdef) {
return GraphToFunctionDefHelper(
fn_body, fn_name, append_hash_to_fn_name, set_stateful_from_nodes,
copy_placeholder_attrs_from_nodes, body_nodes, inputs, outputs,
output_names, control_outputs, control_output_names, description,
false, fdef);
return absl::OkStatus();
}
Status GraphToFunctionDef(
const Graph& graph, const string& name,
const std::function<absl::optional<string>(const Node*)>& control_ret,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(graph, name, control_ret,
{},
false, fdef);
}
Status GraphToFunctionDef(const Graph& graph, const string& name,
FunctionDef* fdef) {
return GraphToFunctionDef(graph, name, nullptr, fdef);
}
Status GraphToFunctionDef(const Graph& graph, const string& name,
const std::vector<std::string>& output_names,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(graph, name, nullptr,
output_names,
false, fdef);
}
Status GraphToFunctionDef(
std::unique_ptr<Graph> graph, const string& name,
const std::function<std::optional<string>(const Node*)>& control_ret,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(*graph, name, control_ret,
{},
true, fdef);
}
} | #include "tensorflow/core/framework/graph_to_functiondef.h"
#include <utility>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/base64.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
FunctionDef RemoveDebugInfo(const FunctionDef& def) {
FunctionDef copy = def;
for (auto& node_def : *copy.mutable_node_def()) {
node_def.clear_experimental_debug_info();
}
return copy;
}
bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
string* diff) {
if (a.DebugString() != b.DebugString()) {
if (diff) {
*diff = strings::StrCat("Definition mismatch for function ",
a.signature().name(), ":\n", a.DebugString(),
"\n ---- vs. ----\n", b.DebugString());
}
return false;
}
return true;
}
TEST(GraphToFunctionDefTest, Basics) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Arg(root.WithOpName("B"), DT_FLOAT, 1);
auto c = ops::_Arg(root.WithOpName("C"), DT_FLOAT, 2);
auto d = ops::Add(root.WithOpName("D"), a, b);
auto e = ops::Add(root.WithOpName("b"), d, c);
auto f = ops::Neg(root.WithOpName("h"), e);
auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f});
auto h = ops::_Retval(root.WithOpName("H"), g, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", &fdef));
FunctionDef fdef_expected = FunctionDefHelper::Create(
"test_fn",
{"a: float", "b: float", "c: float"},
{"h: float"},
{},
{
{{"D"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"b_0"}, "Add", {"D:z:0", "c"}, {{"T", DT_FLOAT}}},
{{"h_0"}, "Neg", {"b_0:z:0"}, {{"T", DT_FLOAT}}},
{{"G"}, "AddN", {"b_0:z:0", "h_0:y:0"}, {{"N", 2}, {"T", DT_FLOAT}}},
},
{{"h", "G:sum:0"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, OverrideOutputNames) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Retval(root.WithOpName("H"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", {"b"}, &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, DuplicatedOutputNames) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
auto c = ops::_Retval(root.WithOpName("C"), a, 1);
FunctionDef fdef;
auto status = GraphToFunctionDef(*root.graph(), "test_fn", {"d", "d"}, &fdef);
EXPECT_THAT(status, tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"Cannot have duplicate output names. Name 'd' "
"appears more than once in 'output_names' array."));
}
TEST(GraphToFunctionDefTest, ArgAttrShape) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
AttrValue shape_attr;
*(shape_attr.mutable_shape()) = TensorShape({1, 2}).AsProto();
a.node()->AddAttr("shape", shape_attr);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
FunctionDef::ArgAttrs attrs;
AttrValue output_shapes;
*(output_shapes.mutable_list()->add_shape()) = TensorShape({1, 2}).AsProto();
attrs.mutable_attr()->insert({"_output_shapes", output_shapes});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ArgAttrPrivateAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
AttrValue private_attr;
*(private_attr.mutable_s()) = "value";
a.node()->AddAttr("_name", private_attr);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
FunctionDef::ArgAttrs attrs;
attrs.mutable_attr()->insert({"_name", private_attr});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ArgAttrConstInput) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
Tensor t(DT_FLOAT, TensorShape({2, 2}));
TensorProto t_proto;
t.AsProtoField(&t_proto);
AttrValue attr;
*(attr.mutable_tensor()) = std::move(t_proto);
a.node()->AddAttr("value", attr);
a.node()->AddAttr("index", 0);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
std::vector<OutputTensor> inputs;
std::vector<OutputTensor> outputs;
auto add_arg_or_retval = [](Node* node,
std::vector<OutputTensor>* args_or_retvals) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &index));
if (index >= args_or_retvals->size()) {
args_or_retvals->resize(index + 1);
}
(*args_or_retvals)[index].node = node;
return absl::OkStatus();
};
for (Node* node : root.graph()->op_nodes()) {
if (node->IsConstant()) {
TF_EXPECT_OK(add_arg_or_retval(node, &inputs));
} else {
TF_EXPECT_OK(add_arg_or_retval(node, &outputs));
}
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
false, {}, inputs,
outputs,
{}, {}, {},
"ArgAttrConstInput", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
AttrValue value;
*(value.mutable_list()->add_shape()) = TensorShape({2, 2}).AsProto();
FunctionDef::ArgAttrs attrs;
attrs.mutable_attr()->insert({"_output_shapes", value});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
(*fdef_expected.mutable_signature()->mutable_description()) =
"ArgAttrConstInput";
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, AppendHashToFnName) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("attr_name_not_found", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", true,
false,
false, body_nodes,
{},
{},
{}, {}, {},
nullptr, &fdef));
EXPECT_TRUE(absl::StartsWith(fdef.signature().name(), "test_fn_"));
}
TEST(GraphToFunctionDefTest, CopyPlaceholderAttrsFromNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::VarHandleOp(root.WithOpName("var"), DT_FLOAT, {});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("shared_name", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
true, body_nodes, {},
{},
{}, {}, {},
nullptr, &fdef));
}
TEST(GraphToFunctionDefTest, CopyPlaceholderAttrsFromNodesUnImplemented) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("attr_name_not_found", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
auto status = GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
true, body_nodes, {},
{},
{}, {}, {},
nullptr, &fdef);
EXPECT_EQ(status.code(), error::UNIMPLEMENTED);
}
TEST(GraphToFunctionDefTest, ControlDependencies) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("a"), DT_FLOAT, 0);
auto b = ops::Neg(root.WithOpName("b").WithControlDependencies(a), a);
auto c = ops::_Retval(root.WithOpName("c").WithControlDependencies(b), b, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", &fdef));
FunctionDef fdef_expected = FunctionDefHelper::Create(
"test_fn",
{"a: float"},
{"c: float"},
{},
{
{{"b"}, "Neg", {"a", "^a"}, {{"T", DT_FLOAT}}},
},
{{"c", "b:y:0"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ControlOutputs) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("a"), DT_FLOAT, 0);
auto b = ops::Neg(root.WithOpName("b"), a);
auto c = ops::_Retval(root.WithOpName("c"), b, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
const auto control_ret = [](const Node* n) -> absl::optional<string> {
if (n->name() == "b") return absl::make_optional<string>("must_execute");
return absl::nullopt;
};
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", control_ret, &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"c: float"},
{},
{
{{"b"}, "Neg", {"a"}, {{"T", DT_FLOAT}}},
},
{{"c", "b:y:0"}},
{{"must_execute", "b"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_to_functiondef.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_to_functiondef_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7423b8b9-1d55-4ce1-ba4f-6e17980cd9a9 | cpp | tensorflow/tensorflow | variant_op_registry | tensorflow/core/framework/variant_op_registry.cc | tensorflow/core/framework/variant_op_registry_test.cc | #include "tensorflow/core/framework/variant_op_registry.h"
#include <string>
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
const char* VariantUnaryOpToString(VariantUnaryOp op) {
switch (op) {
case INVALID_VARIANT_UNARY_OP:
return "INVALID";
case ZEROS_LIKE_VARIANT_UNARY_OP:
return "ZEROS_LIKE";
case CONJ_VARIANT_UNARY_OP:
return "CONJ";
}
}
const char* VariantBinaryOpToString(VariantBinaryOp op) {
switch (op) {
case INVALID_VARIANT_BINARY_OP:
return "INVALID";
case ADD_VARIANT_BINARY_OP:
return "ADD";
}
}
std::unordered_set<string>* UnaryVariantOpRegistry::PersistentStringStorage() {
static std::unordered_set<string>* string_storage =
new std::unordered_set<string>();
return string_storage;
}
UnaryVariantOpRegistry* UnaryVariantOpRegistryGlobal() {
static UnaryVariantOpRegistry* global_unary_variant_op_registry = nullptr;
if (global_unary_variant_op_registry == nullptr) {
global_unary_variant_op_registry = new UnaryVariantOpRegistry;
}
return global_unary_variant_op_registry;
}
UnaryVariantOpRegistry::VariantDecodeFn* UnaryVariantOpRegistry::GetDecodeFn(
StringPiece type_name) {
auto found = decode_fns.find(type_name);
if (found == decode_fns.end()) return nullptr;
return &found->second;
}
void UnaryVariantOpRegistry::RegisterDecodeFn(
const string& type_name, const VariantDecodeFn& decode_fn) {
CHECK(!type_name.empty()) << "Need a valid name for UnaryVariantDecode";
VariantDecodeFn* existing = GetDecodeFn(type_name);
CHECK_EQ(existing, nullptr)
<< "Unary VariantDecodeFn for type_name: " << type_name
<< " already registered";
decode_fns.insert(std::pair<StringPiece, VariantDecodeFn>(
GetPersistentStringPiece(type_name), decode_fn));
}
bool DecodeUnaryVariant(Variant* variant) {
CHECK_NOTNULL(variant);
if (variant->TypeName().empty()) {
VariantTensorDataProto* t = variant->get<VariantTensorDataProto>();
if (t == nullptr || !t->metadata().empty() || !t->tensors().empty()) {
return false;
} else {
variant->clear();
return true;
}
}
UnaryVariantOpRegistry::VariantDecodeFn* decode_fn =
UnaryVariantOpRegistry::Global()->GetDecodeFn(variant->TypeName());
if (decode_fn == nullptr) {
return false;
}
const string type_name = variant->TypeName();
bool decoded = (*decode_fn)(variant);
if (!decoded) return false;
if (variant->TypeName() != type_name) {
LOG(ERROR) << "DecodeUnaryVariant: Variant type_name before decoding was: "
<< type_name
<< " but after decoding was: " << variant->TypeName()
<< ". Treating this as a failure.";
return false;
}
return true;
}
#define REGISTER_VARIANT_DECODE_TYPE(T) \
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(T, TF_STR(T));
REGISTER_VARIANT_DECODE_TYPE(int);
REGISTER_VARIANT_DECODE_TYPE(float);
REGISTER_VARIANT_DECODE_TYPE(bool);
REGISTER_VARIANT_DECODE_TYPE(double);
#undef REGISTER_VARIANT_DECODE_TYPE
Status VariantDeviceCopy(
const VariantDeviceCopyDirection direction, const Variant& from,
Variant* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy_fn) {
UnaryVariantOpRegistry::AsyncVariantDeviceCopyFn* device_copy_fn =
UnaryVariantOpRegistry::Global()->GetDeviceCopyFn(direction,
from.TypeId());
if (device_copy_fn == nullptr) {
return errors::Internal(
"No unary variant device copy function found for direction: ",
direction, " and Variant type_index: ",
port::MaybeAbiDemangle(from.TypeId().name()));
}
return (*device_copy_fn)(from, to, copy_fn);
}
namespace {
template <typename T>
Status DeviceCopyPrimitiveType(
const T& in, T* out,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copier) {
*out = in;
return absl::OkStatus();
}
}
#define REGISTER_VARIANT_DEVICE_COPY_TYPE(T) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \
T, VariantDeviceCopyDirection::HOST_TO_DEVICE, \
DeviceCopyPrimitiveType<T>); \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \
T, VariantDeviceCopyDirection::DEVICE_TO_HOST, \
DeviceCopyPrimitiveType<T>); \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \
T, VariantDeviceCopyDirection::DEVICE_TO_DEVICE, \
DeviceCopyPrimitiveType<T>);
REGISTER_VARIANT_DEVICE_COPY_TYPE(int);
REGISTER_VARIANT_DEVICE_COPY_TYPE(float);
REGISTER_VARIANT_DEVICE_COPY_TYPE(double);
REGISTER_VARIANT_DEVICE_COPY_TYPE(bool);
#undef REGISTER_VARIANT_DEVICE_COPY_TYPE
namespace {
template <typename T>
Status ZerosLikeVariantPrimitiveType(OpKernelContext* ctx, const T& t,
T* t_out) {
*t_out = T(0);
return absl::OkStatus();
}
}
#define REGISTER_VARIANT_ZEROS_LIKE_TYPE(T) \
REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION(ZEROS_LIKE_VARIANT_UNARY_OP, \
DEVICE_CPU, T, \
ZerosLikeVariantPrimitiveType<T>);
REGISTER_VARIANT_ZEROS_LIKE_TYPE(int);
REGISTER_VARIANT_ZEROS_LIKE_TYPE(float);
REGISTER_VARIANT_ZEROS_LIKE_TYPE(double);
REGISTER_VARIANT_ZEROS_LIKE_TYPE(bool);
#undef REGISTER_VARIANT_ZEROS_LIKE_TYPE
namespace {
template <typename T>
Status AddVariantPrimitiveType(OpKernelContext* ctx, const T& a, const T& b,
T* out) {
*out = a + b;
return absl::OkStatus();
}
}
#define REGISTER_VARIANT_ADD_TYPE(T) \
REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION(ADD_VARIANT_BINARY_OP, DEVICE_CPU, \
T, AddVariantPrimitiveType<T>);
REGISTER_VARIANT_ADD_TYPE(int);
REGISTER_VARIANT_ADD_TYPE(float);
REGISTER_VARIANT_ADD_TYPE(double);
REGISTER_VARIANT_ADD_TYPE(bool);
#undef REGISTER_VARIANT_ADD_TYPE
} | #include <memory>
#include "tensorflow/core/lib/strings/str_util.h"
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "tensorflow/core/framework/variant_op_registry.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
struct VariantValue {
string TypeName() const { return "TEST VariantValue"; }
static Status CPUZerosLikeFn(OpKernelContext* ctx, const VariantValue& v,
VariantValue* v_out) {
if (v.early_exit) {
return errors::InvalidArgument("early exit zeros_like!");
}
v_out->value = 1;
return absl::OkStatus();
}
static Status GPUZerosLikeFn(OpKernelContext* ctx, const VariantValue& v,
VariantValue* v_out) {
if (v.early_exit) {
return errors::InvalidArgument("early exit zeros_like!");
}
v_out->value = 2;
return absl::OkStatus();
}
static Status CPUAddFn(OpKernelContext* ctx, const VariantValue& a,
const VariantValue& b, VariantValue* out) {
if (a.early_exit) {
return errors::InvalidArgument("early exit add!");
}
out->value = a.value + b.value;
return absl::OkStatus();
}
static Status GPUAddFn(OpKernelContext* ctx, const VariantValue& a,
const VariantValue& b, VariantValue* out) {
if (a.early_exit) {
return errors::InvalidArgument("early exit add!");
}
out->value = -(a.value + b.value);
return absl::OkStatus();
}
static Status CPUToGPUCopyFn(
const VariantValue& from, VariantValue* to,
const std::function<Status(const Tensor&, Tensor*)>& copier) {
TF_RETURN_IF_ERROR(copier(Tensor(), nullptr));
to->value = 0xdeadbeef;
return absl::OkStatus();
}
bool early_exit;
int value;
};
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(VariantValue, "TEST VariantValue");
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(
VariantValue, VariantDeviceCopyDirection::HOST_TO_DEVICE,
VariantValue::CPUToGPUCopyFn);
REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION(ZEROS_LIKE_VARIANT_UNARY_OP,
DEVICE_CPU, VariantValue,
VariantValue::CPUZerosLikeFn);
REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION(ZEROS_LIKE_VARIANT_UNARY_OP,
DEVICE_GPU, VariantValue,
VariantValue::GPUZerosLikeFn);
REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION(ADD_VARIANT_BINARY_OP, DEVICE_CPU,
VariantValue, VariantValue::CPUAddFn);
REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION(ADD_VARIANT_BINARY_OP, DEVICE_GPU,
VariantValue, VariantValue::GPUAddFn);
}
TEST(VariantOpDecodeRegistryTest, TestBasic) {
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetDecodeFn("YOU SHALL NOT PASS"),
nullptr);
auto* decode_fn =
UnaryVariantOpRegistry::Global()->GetDecodeFn("TEST VariantValue");
EXPECT_NE(decode_fn, nullptr);
VariantValue vv{true };
Variant v = vv;
VariantTensorData data;
v.Encode(&data);
VariantTensorDataProto proto;
data.ToProto(&proto);
Variant encoded = std::move(proto);
EXPECT_TRUE((*decode_fn)(&encoded));
VariantValue* decoded = encoded.get<VariantValue>();
EXPECT_NE(decoded, nullptr);
EXPECT_EQ(decoded->early_exit, true);
}
TEST(VariantOpDecodeRegistryTest, TestEmpty) {
VariantTensorDataProto empty_proto;
Variant empty_encoded = std::move(empty_proto);
EXPECT_TRUE(DecodeUnaryVariant(&empty_encoded));
EXPECT_TRUE(empty_encoded.is_empty());
VariantTensorData data;
Variant number = 3.0f;
number.Encode(&data);
VariantTensorDataProto proto;
data.ToProto(&proto);
proto.set_type_name("");
Variant encoded = std::move(proto);
EXPECT_FALSE(DecodeUnaryVariant(&encoded));
}
TEST(VariantOpDecodeRegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::VariantDecodeFn f;
string kTypeName = "fjfjfj";
registry.RegisterDecodeFn(kTypeName, f);
EXPECT_DEATH(registry.RegisterDecodeFn(kTypeName, f),
"fjfjfj already registered");
}
TEST(VariantOpCopyToGPURegistryTest, TestBasic) {
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetDeviceCopyFn(
VariantDeviceCopyDirection::DEVICE_TO_DEVICE,
TypeIndex::Make<VariantValue>()),
nullptr);
auto* copy_to_gpu_fn = UnaryVariantOpRegistry::Global()->GetDeviceCopyFn(
VariantDeviceCopyDirection::HOST_TO_DEVICE,
TypeIndex::Make<VariantValue>());
EXPECT_NE(copy_to_gpu_fn, nullptr);
VariantValue vv{true };
Variant v = vv;
Variant v_out;
bool dummy_executed = false;
auto dummy_copy_fn = [&dummy_executed](const Tensor& from,
Tensor* to) -> Status {
dummy_executed = true;
return absl::OkStatus();
};
TF_EXPECT_OK((*copy_to_gpu_fn)(v, &v_out, dummy_copy_fn));
EXPECT_TRUE(dummy_executed);
VariantValue* copied_value = v_out.get<VariantValue>();
EXPECT_NE(copied_value, nullptr);
EXPECT_EQ(copied_value->value, 0xdeadbeef);
}
TEST(VariantOpCopyToGPURegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::AsyncVariantDeviceCopyFn f;
class FjFjFj {};
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterDeviceCopyFn(VariantDeviceCopyDirection::HOST_TO_DEVICE,
kTypeIndex, f);
EXPECT_DEATH(registry.RegisterDeviceCopyFn(
VariantDeviceCopyDirection::HOST_TO_DEVICE, kTypeIndex, f),
"FjFjFj already registered");
}
TEST(VariantOpZerosLikeRegistryTest, TestBasicCPU) {
class Blah {};
EXPECT_EQ(
UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true , 0 };
Variant v = vv_early_exit;
Variant v_out = VariantValue();
OpKernelContext* null_context_pointer = nullptr;
Status s0 = UnaryOpVariant<CPUDevice>(null_context_pointer,
ZEROS_LIKE_VARIANT_UNARY_OP, v, &v_out);
EXPECT_FALSE(s0.ok());
EXPECT_TRUE(absl::StrContains(s0.message(), "early exit zeros_like"));
VariantValue vv_ok{false , 0 };
v = vv_ok;
TF_EXPECT_OK(UnaryOpVariant<CPUDevice>(
null_context_pointer, ZEROS_LIKE_VARIANT_UNARY_OP, v, &v_out));
VariantValue* vv_out = CHECK_NOTNULL(v_out.get<VariantValue>());
EXPECT_EQ(vv_out->value, 1);
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST(VariantOpUnaryOpRegistryTest, TestBasicGPU) {
class Blah {};
EXPECT_EQ(
UnaryVariantOpRegistry::Global()->GetUnaryOpFn(
ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_GPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true , 0 };
Variant v = vv_early_exit;
Variant v_out = VariantValue();
OpKernelContext* null_context_pointer = nullptr;
Status s0 = UnaryOpVariant<GPUDevice>(null_context_pointer,
ZEROS_LIKE_VARIANT_UNARY_OP, v, &v_out);
EXPECT_FALSE(s0.ok());
EXPECT_TRUE(absl::StrContains(s0.message(), "early exit zeros_like"));
VariantValue vv_ok{false , 0 };
v = vv_ok;
TF_EXPECT_OK(UnaryOpVariant<GPUDevice>(
null_context_pointer, ZEROS_LIKE_VARIANT_UNARY_OP, v, &v_out));
VariantValue* vv_out = CHECK_NOTNULL(v_out.get<VariantValue>());
EXPECT_EQ(vv_out->value, 2);
}
#endif
TEST(VariantOpUnaryOpRegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::VariantUnaryOpFn f;
class FjFjFj {};
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterUnaryOpFn(ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU,
kTypeIndex, f);
EXPECT_DEATH(registry.RegisterUnaryOpFn(ZEROS_LIKE_VARIANT_UNARY_OP,
DEVICE_CPU, kTypeIndex, f),
"FjFjFj already registered");
registry.RegisterUnaryOpFn(ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_GPU,
kTypeIndex, f);
EXPECT_DEATH(registry.RegisterUnaryOpFn(ZEROS_LIKE_VARIANT_UNARY_OP,
DEVICE_GPU, kTypeIndex, f),
"FjFjFj already registered");
}
TEST(VariantOpAddRegistryTest, TestBasicCPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetBinaryOpFn(
ADD_VARIANT_BINARY_OP, DEVICE_CPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true , 3 };
VariantValue vv_other{true , 4 };
Variant v_a = vv_early_exit;
Variant v_b = vv_other;
Variant v_out = VariantValue();
OpKernelContext* null_context_pointer = nullptr;
Status s0 = BinaryOpVariants<CPUDevice>(
null_context_pointer, ADD_VARIANT_BINARY_OP, v_a, v_b, &v_out);
EXPECT_FALSE(s0.ok());
EXPECT_TRUE(absl::StrContains(s0.message(), "early exit add"));
VariantValue vv_ok{false , 3 };
v_a = vv_ok;
TF_EXPECT_OK(BinaryOpVariants<CPUDevice>(
null_context_pointer, ADD_VARIANT_BINARY_OP, v_a, v_b, &v_out));
VariantValue* vv_out = CHECK_NOTNULL(v_out.get<VariantValue>());
EXPECT_EQ(vv_out->value, 7);
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST(VariantOpAddRegistryTest, TestBasicGPU) {
class Blah {};
EXPECT_EQ(UnaryVariantOpRegistry::Global()->GetBinaryOpFn(
ADD_VARIANT_BINARY_OP, DEVICE_GPU, TypeIndex::Make<Blah>()),
nullptr);
VariantValue vv_early_exit{true , 3 };
VariantValue vv_other{true , 4 };
Variant v_a = vv_early_exit;
Variant v_b = vv_other;
Variant v_out = VariantValue();
OpKernelContext* null_context_pointer = nullptr;
Status s0 = BinaryOpVariants<GPUDevice>(
null_context_pointer, ADD_VARIANT_BINARY_OP, v_a, v_b, &v_out);
EXPECT_FALSE(s0.ok());
EXPECT_TRUE(absl::StrContains(s0.message(), "early exit add"));
VariantValue vv_ok{false , 3 };
v_a = vv_ok;
TF_EXPECT_OK(BinaryOpVariants<GPUDevice>(
null_context_pointer, ADD_VARIANT_BINARY_OP, v_a, v_b, &v_out));
VariantValue* vv_out = CHECK_NOTNULL(v_out.get<VariantValue>());
EXPECT_EQ(vv_out->value, -7);
}
#endif
TEST(VariantOpAddRegistryTest, TestDuplicate) {
UnaryVariantOpRegistry registry;
UnaryVariantOpRegistry::VariantBinaryOpFn f;
class FjFjFj {};
const auto kTypeIndex = TypeIndex::Make<FjFjFj>();
registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_CPU, kTypeIndex, f);
EXPECT_DEATH(registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_CPU,
kTypeIndex, f),
"FjFjFj already registered");
registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_GPU, kTypeIndex, f);
EXPECT_DEATH(registry.RegisterBinaryOpFn(ADD_VARIANT_BINARY_OP, DEVICE_GPU,
kTypeIndex, f),
"FjFjFj already registered");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/variant_op_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/variant_op_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b3cd866-7823-44dc-8ae9-23c7a7b89396 | cpp | tensorflow/tensorflow | resource_handle | tensorflow/core/framework/resource_handle.cc | tensorflow/core/framework/resource_handle_test.cc | #include "tensorflow/core/framework/resource_handle.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/demangle.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace {
std::string DtypeAndShapesToString(
const std::vector<DtypeAndPartialTensorShape>& dtype_and_shapes) {
std::vector<std::string> dtype_and_shape_strings;
dtype_and_shape_strings.reserve(dtype_and_shapes.size());
for (const DtypeAndPartialTensorShape& dtype_and_shape : dtype_and_shapes) {
dtype_and_shape_strings.push_back(
absl::StrFormat("DType enum: %d, Shape: %s", dtype_and_shape.dtype,
dtype_and_shape.shape.DebugString()));
}
return absl::StrFormat("[ %s ]", absl::StrJoin(dtype_and_shape_strings, ","));
}
}
constexpr const char* ResourceHandle::ANONYMOUS_NAME;
ResourceHandle::ResourceHandle() {}
ResourceHandle::ResourceHandle(const ResourceHandleProto& proto) {
TF_CHECK_OK(FromProto(proto));
}
Status ResourceHandle::BuildResourceHandle(const ResourceHandleProto& proto,
ResourceHandle* out) {
if (out == nullptr)
return errors::Internal(
"BuildResourceHandle() was called with nullptr for the output");
return out->FromProto(proto);
}
ResourceHandle::~ResourceHandle() {}
void ResourceHandle::AsProto(ResourceHandleProto* proto) const {
proto->set_device(device());
proto->set_container(container());
proto->set_name(name());
proto->set_hash_code(hash_code());
proto->set_maybe_type_name(maybe_type_name());
for (const auto& dtype_and_shape_pair : dtypes_and_shapes_) {
auto dtype_and_shape = proto->add_dtypes_and_shapes();
dtype_and_shape->set_dtype(dtype_and_shape_pair.dtype);
dtype_and_shape_pair.shape.AsProto(dtype_and_shape->mutable_shape());
}
}
Status ResourceHandle::FromProto(const ResourceHandleProto& proto) {
set_device(proto.device());
set_container(proto.container());
set_name(proto.name());
set_hash_code(proto.hash_code());
set_maybe_type_name(proto.maybe_type_name());
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
for (const auto& dtype_and_shape : proto.dtypes_and_shapes()) {
DataType dtype = dtype_and_shape.dtype();
PartialTensorShape shape;
Status s = PartialTensorShape::BuildPartialTensorShape(
dtype_and_shape.shape(), &shape);
if (!s.ok()) {
return s;
}
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{dtype, shape});
}
dtypes_and_shapes_ = std::move(dtypes_and_shapes);
return absl::OkStatus();
}
string ResourceHandle::SerializeAsString() const {
ResourceHandleProto proto;
AsProto(&proto);
return proto.SerializeAsString();
}
bool ResourceHandle::ParseFromString(const string& s) {
ResourceHandleProto proto;
return proto.ParseFromString(s) && FromProto(proto).ok();
}
string ResourceHandle::DebugString() const {
return absl::StrFormat(
"device: %s container: %s name: %s hash_code: 0x%X maybe_type_name %s, "
"dtype and shapes : %s",
device(), container(), name(), hash_code(),
port::Demangle(maybe_type_name()),
DtypeAndShapesToString(dtypes_and_shapes()));
}
string ResourceHandle::SummarizeValue() const {
return absl::StrFormat(
"ResourceHandle(name=\"%s\", device=\"%s\", container=\"%s\", "
"type=\"%s\", dtype and shapes : \"%s\")",
name(), device(), container(), port::Demangle(maybe_type_name()),
DtypeAndShapesToString(dtypes_and_shapes()));
}
ResourceHandle ResourceHandle::MakeRefCountingHandle(
ResourceBase* resource, const string& device_name,
const TypeIndex& type_index,
const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes,
const absl::optional<ManagedStackTrace>& definition_stack_trace) {
ResourceHandle result;
result.resource_.reset(resource, false);
result.set_device(device_name);
result.set_container("Anonymous");
result.set_definition_stack_trace(definition_stack_trace);
auto resource_id = GenerateUniqueId();
std::string handle_name = resource->MakeRefCountingHandleName(resource_id);
result.set_name(handle_name);
result.set_hash_code(type_index.hash_code());
result.set_maybe_type_name(type_index.name());
result.set_dtypes_and_shapes(dtypes_and_shapes);
return result;
}
Status ResourceHandle::ValidateType(const TypeIndex& type_index) const {
if (type_index.hash_code() != hash_code()) {
return errors::InvalidArgument(
"Trying to access a handle's resource using the wrong type. ",
"The handle points to a resource (name '", name(), "') of type '",
port::Demangle(maybe_type_name()), "' (hash code ", hash_code(),
") but you are trying to access the resource as type '",
port::Demangle(type_index.name()), "' (hash code ",
type_index.hash_code(), ")");
}
return absl::OkStatus();
}
std::atomic<int64_t> ResourceHandle::current_id_;
int64_t ResourceHandle::GenerateUniqueId() { return current_id_.fetch_add(1); }
string ProtoDebugString(const ResourceHandle& handle) {
return handle.DebugString();
}
void EncodeResourceHandleList(const ResourceHandle* p, int64_t n,
std::unique_ptr<port::StringListEncoder> e) {
ResourceHandleProto proto;
for (int i = 0; i < n; ++i) {
p[i].AsProto(&proto);
e->Append(proto);
}
e->Finalize();
}
bool DecodeResourceHandleList(std::unique_ptr<port::StringListDecoder> d,
ResourceHandle* ps, int64_t n) {
std::vector<uint32> sizes(n);
if (!d->ReadSizes(&sizes)) return false;
ResourceHandleProto proto;
for (int i = 0; i < n; ++i) {
if (!proto.ParseFromArray(d->Data(sizes[i]), sizes[i])) {
return false;
}
if (!ps[i].FromProto(proto).ok()) {
return false;
}
}
return true;
}
} | #include "tensorflow/core/framework/resource_handle.h"
#include <memory>
#include <string>
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class MockResource : public ResourceBase {
public:
MockResource(bool* alive, int payload) : alive_(alive), payload_(payload) {
if (alive_ != nullptr) {
*alive_ = true;
}
}
~MockResource() override {
if (alive_ != nullptr) {
*alive_ = false;
}
}
string DebugString() const override { return ""; }
bool* alive_;
int payload_;
};
class ResourceHandleTest : public ::testing::Test {};
TEST_F(ResourceHandleTest, RefCounting) {
const int payload = -123;
bool alive = false;
auto resource = new MockResource(&alive, payload);
EXPECT_TRUE(alive);
{
auto handle =
ResourceHandle::MakeRefCountingHandle(resource, "cpu", {}, {});
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 1);
{
auto handle_copy = handle;
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 2);
}
EXPECT_TRUE(alive);
EXPECT_EQ(resource->RefCount(), 1);
}
EXPECT_FALSE(alive);
}
TEST_F(ResourceHandleTest, SummarizeValue) {
ResourceHandleProto proto;
TensorShapeProto shape;
shape.add_dim()->set_size(4);
shape.add_dim()->set_size(8);
proto.set_device("cpu:0");
proto.set_container("test_container");
proto.set_name("test_var");
auto dtypes_and_shapes = proto.add_dtypes_and_shapes();
dtypes_and_shapes->set_dtype(DT_INT32);
dtypes_and_shapes->mutable_shape()->MergeFrom(shape);
auto handle = std::make_unique<ResourceHandle>(proto);
EXPECT_EQ(handle->SummarizeValue(),
"ResourceHandle(name=\"test_var\", device=\"cpu:0\", "
"container=\"test_container\", type=\"\", dtype and shapes : \"[ "
"DType enum: 3, Shape: [4,8] ]\")");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a1c6abc-0c40-4ed1-b36e-8bfe93401374 | cpp | tensorflow/tensorflow | common_shape_fns | tensorflow/core/framework/common_shape_fns.cc | tensorflow/core/framework/common_shape_fns_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/util/einsum_op_util.h"
#include "tensorflow/core/util/tensor_format.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace shape_inference {
Status GetWindowedOutputSizeFromDimsV2(
shape_inference::InferenceContext* c,
shape_inference::DimensionHandle input_size,
shape_inference::DimensionOrConstant filter_size, int64_t dilation_rate,
int64_t stride, Padding padding_type, int64_t padding_before,
int64_t padding_after, shape_inference::DimensionHandle* output_size) {
if (stride <= 0) {
return errors::InvalidArgument("Stride must be > 0, but got ", stride);
}
if (dilation_rate < 1) {
return errors::InvalidArgument("Dilation rate must be >= 1, but got ",
dilation_rate);
}
switch (padding_type) {
case Padding::VALID:
padding_before = padding_after = 0;
TF_FALLTHROUGH_INTENDED;
case Padding::EXPLICIT:
TF_RETURN_IF_ERROR(
c->Add(input_size, padding_before + padding_after, &input_size));
if (dilation_rate > 1) {
DimensionHandle window_size;
TF_RETURN_IF_ERROR(
c->Subtract(c->MakeDim(filter_size), 1, &window_size));
TF_RETURN_IF_ERROR(
c->Multiply(window_size, dilation_rate, &window_size));
TF_RETURN_IF_ERROR(c->Add(window_size, 1, &window_size));
TF_RETURN_IF_ERROR(c->Subtract(input_size, window_size, output_size));
} else {
TF_RETURN_IF_ERROR(c->Subtract(input_size, filter_size, output_size));
}
TF_RETURN_IF_ERROR(c->Add(*output_size, stride, output_size));
TF_RETURN_IF_ERROR(c->Divide(*output_size, stride,
false, output_size));
break;
case Padding::SAME:
TF_RETURN_IF_ERROR(c->Add(input_size, stride - 1, output_size));
TF_RETURN_IF_ERROR(c->Divide(*output_size, stride,
false, output_size));
break;
}
return absl::OkStatus();
}
Status GetWindowedOutputSizeFromDims(
shape_inference::InferenceContext* c,
shape_inference::DimensionHandle input_size,
shape_inference::DimensionOrConstant filter_size, int64_t stride,
Padding padding_type, shape_inference::DimensionHandle* output_size) {
if (padding_type == Padding::EXPLICIT) {
return errors::Internal(
"GetWindowedOutputSizeFromDims does not handle EXPLICIT padding; call "
"GetWindowedOutputSizeFromDimsV2 instead");
}
return GetWindowedOutputSizeFromDimsV2(c, input_size, filter_size,
1, stride,
padding_type,
-1, -1, output_size);
}
Status UnchangedShape(shape_inference::InferenceContext* c) {
c->set_output(0, c->input(0));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr) {
c->set_output_handle_shapes_and_types(0, *handle_data);
}
return absl::OkStatus();
}
Status MatMulShape(shape_inference::InferenceContext* c) {
ShapeHandle a;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &a));
ShapeHandle b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &b));
bool transpose_a, transpose_b;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
DimensionHandle output_rows = transpose_a ? c->Dim(a, 1) : c->Dim(a, 0);
DimensionHandle output_cols = transpose_b ? c->Dim(b, 0) : c->Dim(b, 1);
DimensionHandle inner_a = transpose_a ? c->Dim(a, 0) : c->Dim(a, 1);
DimensionHandle inner_b = transpose_b ? c->Dim(b, 1) : c->Dim(b, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(c->Merge(inner_a, inner_b, &merged));
c->set_output(0, c->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
namespace {
Status ValidateEinsumEllipsis(absl::string_view subscript,
bool* found_ellipsis) {
const int num_periods = absl::c_count(subscript, '.');
if (num_periods != 0 && num_periods != 3) {
return errors::InvalidArgument(
"Expected at most one ellipsis (...), but found ", num_periods,
" periods (.) in the input subscript: ", subscript);
}
if (num_periods == 3 && !absl::StrContains(subscript, "...")) {
return errors::InvalidArgument(
"Periods found outside of ellipsis in subscript: ", subscript);
}
*found_ellipsis = num_periods > 0;
return absl::OkStatus();
}
}
Status EinsumShape(shape_inference::InferenceContext* c) {
string equation;
TF_RETURN_IF_ERROR(c->GetAttr("equation", &equation));
absl::InlinedVector<string, 2> input_labels;
string output_labels;
TF_RETURN_IF_ERROR(
ValidateEinsumEquation(equation, &input_labels, &output_labels));
if (c->num_inputs() == 0 || c->num_inputs() > 2) {
return errors::InvalidArgument("Expected either 1 or 2 inputs but got: ",
c->num_inputs());
}
const int input_labels_size = input_labels.size();
if (c->num_inputs() != input_labels_size) {
return errors::InvalidArgument("Expected ", input_labels.size(),
" inputs for equation ", equation,
" but got: ", c->num_inputs());
}
absl::flat_hash_map<char, DimensionHandle> label_to_dimension;
absl::InlinedVector<ShapeHandle, 2> input_bcast_shapes(c->num_inputs());
for (int i = 0, end = c->num_inputs(); i < end; ++i) {
bool has_ellipsis = false;
TF_RETURN_IF_ERROR(ValidateEinsumEllipsis(input_labels[i], &has_ellipsis));
ShapeHandle input_shape = c->input(i);
if (c->RankKnown(input_shape)) {
if (has_ellipsis) {
const int num_named_labels =
static_cast<int>(input_labels[i].size()) - 3;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRankAtLeast(input_shape, num_named_labels, &input_shape),
" for ", i, "th input and equation: ", equation);
} else {
const int num_named_labels = static_cast<int>(input_labels[i].size());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRank(input_shape, num_named_labels, &input_shape), " for ",
i, "th input and equation: ", equation);
}
}
bool seen_ellipsis = false;
input_bcast_shapes[i] = c->Scalar();
for (int label_idx = 0, end = input_labels[i].size(); label_idx < end;
++label_idx) {
const char label = input_labels[i][label_idx];
const int64_t axis_before_ellipsis = label_idx;
const int64_t axis_after_ellipsis =
c->RankKnown(input_shape)
? label_idx + c->Rank(input_shape) - input_labels[i].size()
: -1;
if (label == '.') {
if (!c->RankKnown(input_shape)) {
input_bcast_shapes[i] = c->UnknownShape();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input_shape, axis_before_ellipsis,
axis_after_ellipsis + 3,
&input_bcast_shapes[i]));
}
label_idx += 2;
seen_ellipsis = true;
continue;
}
int64_t axis = seen_ellipsis ? axis_after_ellipsis : axis_before_ellipsis;
DimensionHandle new_dim = c->RankKnown(input_shape)
? c->Dim(input_shape, axis)
: c->UnknownDim();
if (label_to_dimension.contains(label)) {
DimensionHandle merged;
TF_RETURN_IF_ERROR(
c->Merge(label_to_dimension[label], new_dim, &merged));
label_to_dimension[label] = merged;
} else {
label_to_dimension[label] = new_dim;
}
}
}
ShapeHandle output_bcast_shape;
if (input_bcast_shapes.size() == 1) {
output_bcast_shape = input_bcast_shapes[0];
} else if (input_bcast_shapes.size() == 2) {
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, input_bcast_shapes[0], input_bcast_shapes[1], true,
&output_bcast_shape));
}
bool output_has_ellipsis = false;
TF_RETURN_IF_ERROR(
ValidateEinsumEllipsis(output_labels, &output_has_ellipsis));
if (output_has_ellipsis) {
if (!c->RankKnown(output_bcast_shape)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
} else {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
c->WithRankAtMost(output_bcast_shape, 0, &output_bcast_shape),
" for einsum equation '", equation,
"' without ellipsis (...) in the output subscripts where input(s) have "
"non-empty broadcasting shape");
output_bcast_shape = c->Scalar();
}
std::vector<DimensionHandle> output_dims;
for (int label_idx = 0, end = output_labels.size(); label_idx < end;
++label_idx) {
const char label = output_labels[label_idx];
if (label == '.') {
for (int k = 0; k < c->Rank(output_bcast_shape); ++k) {
output_dims.push_back(c->Dim(output_bcast_shape, k));
}
label_idx += 2;
continue;
}
auto dimension_it = label_to_dimension.find(label);
if (dimension_it == label_to_dimension.end()) {
return errors::InvalidArgument(
"Einsum output subscripts for equation '", equation, "' has label '",
label, "' which is not present in the input subscripts");
}
output_dims.push_back(dimension_it->second);
}
c->set_output(0, c->MakeShape(output_dims));
return absl::OkStatus();
}
Status BatchMatMulV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle a_shape;
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
bool adj_x;
bool adj_y;
TF_RETURN_IF_ERROR(c->GetAttr("adj_x", &adj_x));
TF_RETURN_IF_ERROR(c->GetAttr("adj_y", &adj_y));
DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2);
DimensionHandle output_cols = c->Dim(b_shape, adj_y ? -2 : -1);
DimensionHandle inner_merged;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1),
c->Dim(b_shape, adj_y ? -1 : -2), &inner_merged));
ShapeHandle a_batch_shape;
ShapeHandle b_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, a_batch_shape, b_batch_shape, true, &output_batch_shape));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->Concatenate(
output_batch_shape, c->Matrix(output_rows, output_cols), &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status BatchMatMulShape(shape_inference::InferenceContext* c) {
ShapeHandle a_shape;
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
bool adj_x;
bool adj_y;
TF_RETURN_IF_ERROR(c->GetAttr("adj_x", &adj_x));
TF_RETURN_IF_ERROR(c->GetAttr("adj_y", &adj_y));
DimensionHandle output_rows = c->Dim(a_shape, adj_x ? -1 : -2);
DimensionHandle output_cols = c->Dim(b_shape, adj_y ? -2 : -1);
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, adj_x ? -2 : -1),
c->Dim(b_shape, adj_y ? -1 : -2), &unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status BiasAddShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
if (s.ok() && data_format == "NCHW") {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 3, &input_shape));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
}
ShapeHandle bias_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &bias_shape));
DimensionHandle bias_dim = c->Dim(bias_shape, 0);
if (!c->RankKnown(input_shape)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
ShapeHandle output_shape;
if (s.ok() && data_format == "NCHW") {
ShapeHandle first;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 0, 1, &first));
ShapeHandle last;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 2, &last));
DimensionHandle input_bias_dim = c->Dim(input_shape, 1);
DimensionHandle merged_bias_dim;
TF_RETURN_IF_ERROR(c->Merge(input_bias_dim, bias_dim, &merged_bias_dim));
ShapeHandle merged_bias = c->Vector(merged_bias_dim);
ShapeHandle temp;
TF_RETURN_IF_ERROR(c->Concatenate(first, merged_bias, &temp));
TF_RETURN_IF_ERROR(c->Concatenate(temp, last, &output_shape));
} else {
ShapeHandle all_but_bias;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, 0, -1, &all_but_bias));
DimensionHandle input_bias_dim = c->Dim(input_shape, -1);
DimensionHandle merged_bias_dim;
TF_RETURN_IF_ERROR(c->Merge(input_bias_dim, bias_dim, &merged_bias_dim));
ShapeHandle merged_bias = c->Vector(merged_bias_dim);
TF_RETURN_IF_ERROR(
c->Concatenate(all_but_bias, merged_bias, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status BiasAddGradShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
if (s.ok() && data_format == "NCHW") {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 3, &input_shape));
c->set_output(0, c->Vector(c->Dim(input_shape, 1)));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
c->set_output(0, c->Vector(c->Dim(input_shape, -1)));
}
return absl::OkStatus();
}
Status CheckFormatConstraintsOnShape(const TensorFormat tensor_format,
const ShapeHandle shape_handle,
const string& tensor_name,
shape_inference::InferenceContext* c) {
if (tensor_format == FORMAT_NCHW_VECT_C) {
const int num_dims = c->Rank(shape_handle);
DimensionHandle vect_dim = c->Dim(
shape_handle, GetTensorInnerFeatureDimIndex(num_dims, tensor_format));
int64_t vect_dim_val = c->Value(vect_dim);
if (vect_dim_val != 4 && vect_dim_val != 32) {
return errors::InvalidArgument(
"VECT_C dimension must be 4 or 32, but is ", vect_dim_val);
}
}
return absl::OkStatus();
}
Status DatasetIteratorShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
std::vector<PartialTensorShape> output_shapes;
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
const int output_shapes_size = output_shapes.size();
if (output_shapes_size != c->num_outputs()) {
return errors::InvalidArgument(
"`output_shapes` must be the same length as `output_types` (",
output_shapes.size(), " vs. ", c->num_outputs());
}
for (size_t i = 0; i < output_shapes.size(); ++i) {
shape_inference::ShapeHandle output_shape_handle;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(
output_shapes[i], &output_shape_handle));
c->set_output(static_cast<int>(i), output_shape_handle);
}
return absl::OkStatus();
}
Status MakeShapeFromFormat(TensorFormat format, DimensionOrConstant N,
const std::vector<DimensionOrConstant>& spatial,
DimensionOrConstant C, ShapeHandle* out,
shape_inference::InferenceContext* context) {
const int num_dims = GetTensorDimsFromSpatialDims(spatial.size(), format);
std::vector<DimensionHandle> dims_actual(num_dims);
dims_actual[GetTensorBatchDimIndex(num_dims, format)] = context->MakeDim(N);
int outer_c_index = GetTensorFeatureDimIndex(num_dims, format);
dims_actual[outer_c_index] = context->MakeDim(C);
if (format == FORMAT_NCHW_VECT_C) {
dims_actual[GetTensorInnerFeatureDimIndex(num_dims, format)] =
context->MakeDim(4);
} else if (format == FORMAT_NHWC_VECT_W) {
dims_actual[GetTensorInnerWidthDimIndex(num_dims, format)] =
context->MakeDim(4);
}
for (int spatial_dim = 0, end = spatial.size(); spatial_dim < end;
spatial_dim++) {
dims_actual[GetTensorSpatialDimIndex(num_dims, format, spatial_dim)] =
context->MakeDim(spatial[spatial_dim]);
}
*out = context->MakeShape(dims_actual);
return absl::OkStatus();
}
Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
DimensionHandle* batch_dim,
absl::Span<DimensionHandle> spatial_dims,
DimensionHandle* filter_dim,
InferenceContext* context) {
const int32_t rank =
GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
*batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format));
for (int spatial_dim_index = 0, end = spatial_dims.size();
spatial_dim_index < end; ++spatial_dim_index) {
spatial_dims[spatial_dim_index] = context->Dim(
shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index));
}
*filter_dim = context->Dim(shape, GetTensorFeatureDimIndex(rank, format));
if (format == FORMAT_NCHW_VECT_C) {
TF_RETURN_IF_ERROR(context->Multiply(
*filter_dim,
context->Dim(shape, GetTensorInnerFeatureDimIndex(rank, format)),
filter_dim));
}
return absl::OkStatus();
}
Status ShapeFromDimensions(DimensionHandle batch_dim,
absl::Span<const DimensionHandle> spatial_dims,
DimensionHandle filter_dim, TensorFormat format,
absl::optional<DimensionHandle> vect_size,
InferenceContext* context, ShapeHandle* shape) {
const int32_t rank =
GetTensorDimsFromSpatialDims(spatial_dims.size(), format);
std::vector<DimensionHandle> out_dims(rank);
out_dims[tensorflow::GetTensorBatchDimIndex(rank, format)] = batch_dim;
for (int spatial_dim_index = 0, end = spatial_dims.size();
spatial_dim_index < end; ++spatial_dim_index) {
out_dims[tensorflow::GetTensorSpatialDimIndex(
rank, format, spatial_dim_index)] = spatial_dims[spatial_dim_index];
}
if (format == tensorflow::FORMAT_NCHW_VECT_C) {
CHECK(vect_size.has_value());
TF_RETURN_IF_ERROR(context->Divide(
filter_dim, *vect_size, true,
&out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)]));
out_dims[GetTensorInnerFeatureDimIndex(rank, format)] = *vect_size;
} else {
out_dims[tensorflow::GetTensorFeatureDimIndex(rank, format)] = filter_dim;
}
*shape = context->MakeShape(out_dims);
return absl::OkStatus();
}
namespace {
Status Conv2DShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
string data_format_str, filter_format_str;
if (!c->GetAttr("data_format", &data_format_str).ok()) {
data_format_str = "NHWC";
}
if (!c->GetAttr("filter_format", &filter_format_str).ok()) {
filter_format_str =
data_format_str == "NCHW_VECT_C" ? "OIHW_VECT_I" : "HWIO";
}
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
FilterTensorFormat filter_format;
if (!FilterFormatFromString(filter_format_str, &filter_format)) {
return errors::InvalidArgument("Invalid filter format string: ",
filter_format_str);
}
constexpr int num_spatial_dims = 2;
const int rank = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format);
ShapeHandle conv_input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &conv_input_shape));
TF_RETURN_IF_ERROR(CheckFormatConstraintsOnShape(
data_format, conv_input_shape, "conv_input", c));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &filter_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, filter_shape, "filter", c));
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.size() != 4) {
return errors::InvalidArgument(
"Conv2D requires the dilation attribute to contain 4 values, but got: ",
dilations.size());
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument("Conv2D on data format ", data_format_str,
" requires the stride attribute to contain"
" 4 values, but got: ",
strides.size());
}
const int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
const int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
const int32_t dilation_rows = GetTensorDim(dilations, data_format, 'H');
const int32_t dilation_cols = GetTensorDim(dilations, data_format, 'W');
DimensionHandle batch_size_dim;
DimensionHandle input_depth_dim;
absl::InlinedVector<DimensionHandle, 2> input_spatial_dims(2);
TF_RETURN_IF_ERROR(DimensionsFromShape(
conv_input_shape, data_format, &batch_size_dim,
absl::MakeSpan(input_spatial_dims), &input_depth_dim, c));
DimensionHandle output_depth_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'O'));
DimensionHandle filter_rows_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'H'));
DimensionHandle filter_cols_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'W'));
DimensionHandle filter_input_depth_dim;
if (filter_format == FORMAT_OIHW_VECT_I) {
TF_RETURN_IF_ERROR(c->Multiply(
c->Dim(filter_shape,
GetFilterDimIndex<num_spatial_dims>(filter_format, 'I')),
c->Dim(filter_shape,
GetFilterTensorInnerInputChannelsDimIndex(rank, filter_format)),
&filter_input_depth_dim));
} else {
filter_input_depth_dim = c->Dim(
filter_shape, GetFilterDimIndex<num_spatial_dims>(filter_format, 'I'));
}
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0)
return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")");
if (input_depth_value != filter_input_depth_value) {
int64_t num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64_t output_depth_value = c->Value(output_depth_dim);
if (num_groups == 0)
return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
") is not a multiple of the number of groups (", num_groups, ")");
}
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status s = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!s.ok() && !errors::IsNotFound(s)) {
return s;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
if (padding == Padding::EXPLICIT) {
return errors::InvalidArgument(
"Expected non-explicit padding but got explicit padding");
}
std::vector<int64_t> p_list;
Status s_p_list = c->GetAttr("padding_list", &p_list);
if (!s_p_list.ok() && !errors::IsNotFound(s_p_list)) {
return s_p_list;
}
if (s_p_list.ok() && !p_list.empty()) {
padding = Padding::EXPLICIT;
explicit_paddings = p_list;
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
}
}
DimensionHandle output_rows, output_cols;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, input_spatial_dims[0], filter_rows_dim, dilation_rows, stride_rows,
padding, pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, input_spatial_dims[1], filter_cols_dim, dilation_cols, stride_cols,
padding, pad_cols_before, pad_cols_after, &output_cols));
absl::optional<DimensionHandle> vect_size;
if (data_format == FORMAT_NCHW_VECT_C) {
vect_size.emplace(c->Dim(conv_input_shape,
GetTensorInnerFeatureDimIndex(rank, data_format)));
}
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(ShapeFromDimensions(
batch_size_dim, {output_rows, output_cols}, output_depth_dim, data_format,
vect_size, c, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
}
Status ConvShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape = c->input(0);
ShapeHandle filter_shape = c->input(1);
int input_rank = c->Rank(input_shape);
int filter_rank = c->Rank(filter_shape);
if (input_rank == InferenceContext::kUnknownRank ||
filter_rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int batch_dims;
TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims));
if (batch_dims < 0) {
return absl::InvalidArgumentError("Batch dims must be non-negative.");
}
int standard_input_rank = input_rank - (batch_dims - 1);
if (standard_input_rank != 4 && standard_input_rank != 5) {
return absl::InvalidArgumentError(
absl::StrCat("Input tensor must be rank 4 or 5, excluding extra "
"batch dimensions, but got: ",
standard_input_rank));
}
if (filter_rank != 4 && filter_rank != 5) {
return absl::InvalidArgumentError(absl::StrCat(
"Filter tensor must be rank 4 or 5, but got: ", standard_input_rank));
}
if (filter_rank != standard_input_rank) {
return absl::InvalidArgumentError(
"Input tensor rank must be the same as filter rank.");
}
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
bool channels_last_format;
if (data_format_str == "CHANNELS_LAST") {
channels_last_format = true;
} else if (data_format_str == "CHANNELS_FIRST") {
channels_last_format = false;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Invalid data format: ", data_format_str));
}
TensorFormat data_format = channels_last_format ? FORMAT_NHWC : FORMAT_NCHW;
FilterTensorFormat filter_format = FORMAT_HWIO;
int spatial_dims = standard_input_rank - 2;
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.empty()) {
for (int i = 0; i < standard_input_rank; ++i) dilations.push_back(1);
}
if (dilations.size() != standard_input_rank) {
return absl::InvalidArgumentError(absl::StrCat(
"Conv requires the dilation attribute to contain ", standard_input_rank,
" values, but got: ", dilations.size()));
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != standard_input_rank) {
return absl::InvalidArgumentError(
absl::StrCat("Stride attribute should contain ", standard_input_rank,
" values, but got: ", strides.size()));
}
auto dim_index = [&](char dimension) {
if (spatial_dims == 2)
return GetTensorDimIndex<2>(data_format, dimension);
else
return GetTensorDimIndex<3>(data_format, dimension);
};
std::vector<int32_t> stride_dims(spatial_dims);
std::vector<int32_t> dilation_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
stride_dims[i] = strides[dim_index(static_cast<char>('0' + i))];
dilation_dims[i] = dilations[dim_index(static_cast<char>('0' + i))];
}
std::vector<DimensionHandle> batch_size_dim(batch_dims);
for (int i = 0; i < batch_dims; ++i) {
batch_size_dim[i] = c->Dim(input_shape, i);
}
std::vector<DimensionHandle> in_spatial_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
in_spatial_dims[i] = c->Dim(
input_shape, (batch_dims - 1) + dim_index(static_cast<char>('0' + i)));
}
DimensionHandle input_depth_dim =
c->Dim(input_shape, (batch_dims - 1) + dim_index('C'));
auto filter_dim_index = [&](char dimension) {
if (spatial_dims == 2)
return GetFilterDimIndex<2>(filter_format, dimension);
else
return GetFilterDimIndex<3>(filter_format, dimension);
};
std::vector<DimensionHandle> filter_spatial_dims(spatial_dims);
for (int i = 0; i < spatial_dims; ++i) {
filter_spatial_dims[i] =
c->Dim(filter_shape, filter_dim_index(static_cast<char>('0' + i)));
}
DimensionHandle output_depth_dim =
c->Dim(filter_shape, filter_dim_index('O'));
DimensionHandle filter_input_depth_dim;
filter_input_depth_dim = c->Dim(filter_shape, filter_dim_index('I'));
int groups;
TF_RETURN_IF_ERROR(c->GetAttr("groups", &groups));
if (groups < 1) {
return absl::InvalidArgumentError(
"Groups attribute should be a positive integer");
} else if (c->ValueKnown(input_depth_dim) &&
c->Value(input_depth_dim) % groups != 0) {
return absl::InvalidArgumentError(
"Number of groups should divide input depth");
} else if (c->ValueKnown(output_depth_dim) &&
c->Value(output_depth_dim) % groups != 0) {
return absl::InvalidArgumentError(
"Number of groups should divide output depth");
}
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0) {
return absl::InvalidArgumentError("Depth of filter must not be 0");
}
if (input_depth_value % filter_input_depth_value != 0) {
return absl::InvalidArgumentError(
absl::StrCat("Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")"));
}
if (input_depth_value / filter_input_depth_value != groups) {
return absl::InvalidArgumentError(
absl::StrCat("Input depth divided by filter input depth does not "
"match with groups parameter (",
groups, ")"));
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
if (spatial_dims == 3 && padding == Padding::EXPLICIT) {
return absl::InvalidArgumentError(
"Explicit padding not supported for 3D Convolution");
}
std::vector<int64_t> explicit_paddings;
Status s = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!s.ok() && !absl::IsNotFound(s)) {
return s;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
std::vector<DimensionHandle> output_spatial_dims(spatial_dims);
std::vector<int64_t> pad_before(spatial_dims, -1);
std::vector<int64_t> pad_after(spatial_dims, -1);
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_before[0], &pad_after[0]);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_before[1], &pad_after[1]);
}
for (int i = 0; i < spatial_dims; ++i) {
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_spatial_dims[i], filter_spatial_dims[i], dilation_dims[i],
stride_dims[i], padding, pad_before[i], pad_after[i],
&output_spatial_dims[i]));
}
ShapeHandle output_shape;
std::vector<DimensionHandle> output_shape_vector(input_rank);
for (int i = 0; i < batch_dims; ++i) {
output_shape_vector[i] = batch_size_dim[i];
}
if (channels_last_format) {
for (int i = 0; i < spatial_dims; ++i) {
output_shape_vector[batch_dims + i] = output_spatial_dims[i];
}
output_shape_vector[batch_dims + spatial_dims] = output_depth_dim;
} else {
output_shape_vector[batch_dims] = output_depth_dim;
for (int i = 0; i < spatial_dims; ++i) {
output_shape_vector[batch_dims + 1 + i] = output_spatial_dims[i];
}
}
output_shape = c->MakeShape(output_shape_vector);
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Conv2DShapeWithExplicitPadding(shape_inference::InferenceContext* c) {
return Conv2DShapeImpl(c, true);
}
Status Conv2DShape(shape_inference::InferenceContext* c) {
return Conv2DShapeImpl(c, false);
}
Status Conv3DShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 5, &filter_shape));
string data_format;
Status s = c->GetAttr("data_format", &data_format);
std::vector<int32> dilations;
TF_RETURN_IF_ERROR(c->GetAttr("dilations", &dilations));
if (dilations.size() != 5) {
return errors::InvalidArgument(
"Conv3D requires the dilation attribute to contain 5 values, but got: ",
dilations.size());
}
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 5) {
return errors::InvalidArgument(
"Conv3D requires the stride attribute to contain 5 values, but got: ",
strides.size());
}
int32_t stride_planes, stride_rows, stride_cols;
int32_t dilation_planes, dilation_rows, dilation_cols;
if (s.ok() && data_format == "NCDHW") {
auto dim = [&](char dimension) {
return c->Dim(input_shape, GetTensorDimIndex<3>(FORMAT_NCHW, dimension));
};
input_shape =
c->MakeShape({{dim('N'), dim('0'), dim('1'), dim('2'), dim('C')}});
stride_planes = strides[2];
stride_rows = strides[3];
stride_cols = strides[4];
dilation_planes = dilations[2];
dilation_cols = dilations[3];
dilation_rows = dilations[4];
} else {
stride_planes = strides[1];
stride_rows = strides[2];
stride_cols = strides[3];
dilation_planes = dilations[1];
dilation_cols = dilations[2];
dilation_rows = dilations[3];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_planes_dim = c->Dim(input_shape, 1);
DimensionHandle in_rows_dim = c->Dim(input_shape, 2);
DimensionHandle in_cols_dim = c->Dim(input_shape, 3);
DimensionHandle input_depth_dim = c->Dim(input_shape, 4);
DimensionHandle filter_planes_dim = c->Dim(filter_shape, 0);
DimensionHandle filter_rows_dim = c->Dim(filter_shape, 1);
DimensionHandle filter_cols_dim = c->Dim(filter_shape, 2);
DimensionHandle filter_input_depth_dim = c->Dim(filter_shape, 3);
DimensionHandle output_depth_dim = c->Dim(filter_shape, 4);
if (c->ValueKnown(input_depth_dim) && c->ValueKnown(filter_input_depth_dim)) {
int64_t input_depth_value = c->Value(input_depth_dim),
filter_input_depth_value = c->Value(filter_input_depth_dim);
if (filter_input_depth_value == 0)
return errors::InvalidArgument("Depth of filter must not be 0");
if (input_depth_value % filter_input_depth_value != 0)
return errors::InvalidArgument(
"Depth of input (", input_depth_value,
") is not a multiple of input depth of filter (",
filter_input_depth_value, ")");
if (input_depth_value != filter_input_depth_value) {
int64_t num_groups = input_depth_value / filter_input_depth_value;
if (c->ValueKnown(output_depth_dim)) {
int64_t output_depth_value = c->Value(output_depth_dim);
if (num_groups == 0)
return errors::InvalidArgument("Number of groups must not be 0");
if (output_depth_value % num_groups != 0)
return errors::InvalidArgument(
"Depth of output (", output_depth_value,
") is not a multiple of the number of groups (", num_groups, ")");
}
}
}
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_planes, output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_planes_dim, filter_planes_dim, dilation_planes, stride_planes,
padding, -1, -1, &output_planes));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding, -1,
-1, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding, -1,
-1, &output_cols));
ShapeHandle output_shape;
if (data_format == "NCDHW") {
output_shape = c->MakeShape({batch_size_dim, output_depth_dim,
output_planes, output_rows, output_cols});
} else {
output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows,
output_cols, output_depth_dim});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Conv2DBackpropInputShape(shape_inference::InferenceContext* c) {
string data_format_str;
if (!c->GetAttr("data_format", &data_format_str).ok()) {
data_format_str = "NHWC";
}
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
ShapeHandle output_grad_shape = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(output_grad_shape, 4, &output_grad_shape));
ShapeHandle filter_shape = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(filter_shape, 4, &filter_shape));
DimensionHandle batch_size_dim;
DimensionHandle output_grad_depth_dim;
absl::InlinedVector<DimensionHandle, 2> output_grad_spatial_dims(2);
TF_RETURN_IF_ERROR(DimensionsFromShape(
output_grad_shape, data_format, &batch_size_dim,
absl::MakeSpan(output_grad_spatial_dims), &output_grad_depth_dim, c));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(output_grad_depth_dim, c->Dim(filter_shape, 3), &unused));
ShapeHandle specified_input_grad_shape;
TF_RETURN_IF_ERROR(
c->MakeShapeFromShapeTensor(0, &specified_input_grad_shape));
if (c->Rank(specified_input_grad_shape) == InferenceContext::kUnknownRank) {
TF_RETURN_IF_ERROR(c->WithRank(specified_input_grad_shape, 4,
&specified_input_grad_shape));
}
DimensionHandle input_grad_depth_dim;
absl::InlinedVector<DimensionHandle, 2> specified_input_grad_spatial_dims(2);
int specified_input_grad_rank = c->Rank(specified_input_grad_shape);
if (specified_input_grad_rank == 4) {
DimensionHandle specified_batch_size_dim;
TF_RETURN_IF_ERROR(DimensionsFromShape(
specified_input_grad_shape, data_format, &specified_batch_size_dim,
absl::MakeSpan(specified_input_grad_spatial_dims),
&input_grad_depth_dim, c));
TF_RETURN_IF_ERROR(
c->Merge(specified_batch_size_dim, batch_size_dim, &unused));
} else if (specified_input_grad_rank == 2) {
specified_input_grad_spatial_dims[0] =
c->Dim(specified_input_grad_shape, 0);
specified_input_grad_spatial_dims[1] =
c->Dim(specified_input_grad_shape, 1);
input_grad_depth_dim = c->Dim(filter_shape, 2);
} else {
return errors::InvalidArgument(
"Conv2DBackpropInput requires input_sizes to contain 4 values or 2 "
"values, but got: ",
specified_input_grad_rank);
}
ShapeHandle input_grad_shape;
TF_RETURN_IF_ERROR(ShapeFromDimensions(
batch_size_dim, specified_input_grad_spatial_dims, input_grad_depth_dim,
data_format, absl::nullopt, c, &input_grad_shape));
c->set_output(0, input_grad_shape);
return absl::OkStatus();
}
Status Conv2DBackpropFilterWithBiasShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
string data_format;
Status s = c->GetAttr("data_format", &data_format);
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
if (s.ok() && data_format == "NCHW") {
c->set_output(1, c->Vector(c->Dim(input_shape, -3)));
} else {
c->set_output(1, c->Vector(c->Dim(input_shape, -1)));
}
ShapeHandle sh;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &sh));
TF_RETURN_IF_ERROR(c->WithRank(sh, 4, &sh));
c->set_output(0, sh);
return absl::OkStatus();
}
namespace {
Status DepthwiseConv2DNativeShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape));
ShapeHandle filter_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &filter_shape));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the stride attribute to contain 4 values, "
"but got: ",
strides.size());
}
std::vector<int32> dilations;
if (!c->GetAttr("dilations", &dilations).ok()) {
dilations.resize(4, 1);
}
if (dilations.size() != 4) {
return errors::InvalidArgument(
"DepthwiseConv2D requires the dilations attribute to contain 4 values, "
"but got: ",
dilations.size());
}
string data_format_str;
Status s = c->GetAttr("data_format", &data_format_str);
TensorFormat data_format;
if (!s.ok() || !FormatFromString(data_format_str, &data_format)) {
data_format = FORMAT_NHWC;
}
int32_t stride_rows;
int32_t stride_cols;
int32_t dilation_rows;
int32_t dilation_cols;
if (data_format == FORMAT_NCHW) {
input_shape =
c->MakeShape({{c->Dim(input_shape, 0), c->Dim(input_shape, 2),
c->Dim(input_shape, 3), c->Dim(input_shape, 1)}});
stride_rows = strides[2];
stride_cols = strides[3];
dilation_rows = dilations[2];
dilation_cols = dilations[3];
} else {
stride_rows = strides[1];
stride_cols = strides[2];
dilation_rows = dilations[1];
dilation_cols = dilations[2];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_rows_dim = c->Dim(input_shape, 1);
DimensionHandle in_cols_dim = c->Dim(input_shape, 2);
DimensionHandle filter_rows_dim = c->Dim(filter_shape, 0);
DimensionHandle filter_cols_dim = c->Dim(filter_shape, 1);
DimensionHandle input_depth = c->Dim(filter_shape, 2);
DimensionHandle depth_multiplier = c->Dim(filter_shape, 3);
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input_shape, 3), input_depth, &input_depth));
DimensionHandle output_depth;
TF_RETURN_IF_ERROR(c->Multiply(input_depth, depth_multiplier, &output_depth));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status status = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!status.ok() && !errors::IsNotFound(status)) {
return status;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
DCHECK(padding != Padding::EXPLICIT);
}
DimensionHandle output_rows, output_cols;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, filter_rows_dim, dilation_rows, stride_rows, padding,
pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, filter_cols_dim, dilation_cols, stride_cols, padding,
pad_cols_before, pad_cols_after, &output_cols));
ShapeHandle output_shape;
if (data_format == FORMAT_NCHW) {
output_shape =
c->MakeShape({batch_size_dim, output_depth, output_rows, output_cols});
} else {
output_shape =
c->MakeShape({batch_size_dim, output_rows, output_cols, output_depth});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
};
Status DepthwiseConv2DNativeShape(shape_inference::InferenceContext* c) {
return DepthwiseConv2DNativeShapeImpl(c, false);
}
Status DepthwiseConv2DNativeShapeWithExplicitPadding(
shape_inference::InferenceContext* c) {
return DepthwiseConv2DNativeShapeImpl(c, true);
}
Status AvgPoolShape(shape_inference::InferenceContext* c) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"AvgPool requires the stride attribute to contain 4 values, but got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"AvgPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols}, depth_dim,
&output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status AvgPoolGradShape(shape_inference::InferenceContext* c) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s));
TF_RETURN_IF_ERROR(c->WithRank(s, 4, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status FusedBatchNormShape(shape_inference::InferenceContext* c) {
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &x));
bool is_training;
TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
float exponential_avg_factor;
if (!c->GetAttr("exponential_avg_factor", &exponential_avg_factor).ok()) {
exponential_avg_factor = 1.0f;
}
int number_inputs = (is_training && exponential_avg_factor == 1.0f) ? 3 : 5;
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(x, channel_dim_index);
for (int i = 1; i < number_inputs; ++i) {
ShapeHandle vec;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec));
TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim));
}
ShapeHandle y;
TF_RETURN_IF_ERROR(c->ReplaceDim(x, channel_dim_index, channel_dim, &y));
c->set_output(0, y);
ShapeHandle vector_shape = c->Vector(channel_dim);
c->set_output(1, vector_shape);
c->set_output(2, vector_shape);
c->set_output(3, vector_shape);
c->set_output(4, vector_shape);
return absl::OkStatus();
}
Status FusedBatchNormV3Shape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormShape(c));
c->set_output(5, c->UnknownShape());
return absl::OkStatus();
}
Status FusedBatchNormExShape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormV3Shape(c));
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &x));
int channel_dim_index = GetTensorFeatureDimIndex(4, data_format);
DimensionHandle channel_dim = c->Dim(x, channel_dim_index);
if (c->ValueKnown(channel_dim) && c->Value(channel_dim) % 4 != 0) {
return errors::InvalidArgument(
"_FusedBatchNormEx channel dimension must be divisible by 4.");
}
return absl::OkStatus();
}
Status FusedBatchNormGradShape(shape_inference::InferenceContext* c) {
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle y_backprop;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &y_backprop));
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &x));
bool is_training;
TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(y_backprop, channel_dim_index);
TF_RETURN_IF_ERROR(
c->Merge(channel_dim, c->Dim(x, channel_dim_index), &channel_dim));
for (int i = 2; i < 5; ++i) {
ShapeHandle vec;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec));
TF_RETURN_IF_ERROR(c->Merge(channel_dim, c->Dim(vec, 0), &channel_dim));
}
ShapeHandle x_backprop;
TF_RETURN_IF_ERROR(
c->ReplaceDim(y_backprop, channel_dim_index, channel_dim, &x_backprop));
c->set_output(0, x_backprop);
c->set_output(1, c->Vector(channel_dim));
c->set_output(2, c->Vector(channel_dim));
c->set_output(3, c->Vector(0));
c->set_output(4, c->Vector(0));
return absl::OkStatus();
}
Status FusedBatchNormGradExShape(shape_inference::InferenceContext* c) {
TF_RETURN_IF_ERROR(FusedBatchNormGradShape(c));
int num_side_inputs;
TF_RETURN_IF_ERROR(c->GetAttr("num_side_inputs", &num_side_inputs));
if (num_side_inputs == 0) {
return absl::OkStatus();
}
string data_format_str;
TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str));
TensorFormat data_format;
if (!FormatFromString(data_format_str, &data_format)) {
return errors::InvalidArgument("Invalid data format string: ",
data_format_str);
}
const int rank =
(data_format_str == "NDHWC" || data_format_str == "NCDHW") ? 5 : 4;
ShapeHandle y_backprop;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &y_backprop));
ShapeHandle x;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), rank, &x));
int channel_dim_index = GetTensorFeatureDimIndex(rank, data_format);
DimensionHandle channel_dim = c->Dim(y_backprop, channel_dim_index);
TF_RETURN_IF_ERROR(
c->Merge(channel_dim, c->Dim(x, channel_dim_index), &channel_dim));
ShapeHandle side_input_backprop;
TF_RETURN_IF_ERROR(c->ReplaceDim(y_backprop, channel_dim_index, channel_dim,
&side_input_backprop));
c->set_output(5, side_input_backprop);
return absl::OkStatus();
}
Status ReadDiagIndex(InferenceContext* c, const Tensor* diag_index_tensor,
int32* lower_diag_index, int32* upper_diag_index) {
if (diag_index_tensor->dims() == 0) {
*lower_diag_index = diag_index_tensor->scalar<int32>()();
*upper_diag_index = *lower_diag_index;
} else {
int32_t num_elements = diag_index_tensor->dim_size(0);
if (num_elements == 1) {
*lower_diag_index = diag_index_tensor->vec<int32>()(0);
*upper_diag_index = *lower_diag_index;
} else if (num_elements == 2) {
*lower_diag_index = diag_index_tensor->vec<int32>()(0);
*upper_diag_index = diag_index_tensor->vec<int32>()(1);
} else {
return errors::InvalidArgument(
"diag_index must be a vector with one or two elements. It has ",
num_elements, " elements.");
}
}
return absl::OkStatus();
}
Status MatrixDiagPartV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_index_shape, unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &diag_index_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_shape));
const Tensor* diag_index_tensor = c->input_tensor(1);
if (!c->RankKnown(input_shape) || !c->FullyDefined(diag_index_shape) ||
diag_index_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
const int32_t input_rank = c->Rank(input_shape);
const int32_t num_rows = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t num_cols = c->Value(c->Dim(input_shape, input_rank - 1));
int32_t max_diag_len = InferenceContext::kUnknownDim;
if (num_rows != InferenceContext::kUnknownDim &&
num_cols != InferenceContext::kUnknownDim) {
if (lower_diag_index != 0 &&
(-num_rows >= lower_diag_index || lower_diag_index >= num_cols)) {
return errors::InvalidArgument("lower_diag_index is out of bound.");
}
if (upper_diag_index != 0 &&
(-num_rows >= upper_diag_index || upper_diag_index >= num_cols)) {
return errors::InvalidArgument("upper_diag_index is out of bound.");
}
max_diag_len = std::min(num_rows + std::min(upper_diag_index, 0),
num_cols - std::max(lower_diag_index, 0));
}
std::vector<DimensionHandle> dims;
dims.reserve(input_rank - 2);
for (int i = 0; i < input_rank - 2; ++i) {
dims.push_back(c->Dim(input_shape, i));
}
if (lower_diag_index < upper_diag_index) {
dims.push_back(c->MakeDim(upper_diag_index - lower_diag_index + 1));
}
dims.push_back(c->MakeDim(max_diag_len));
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status MatrixDiagV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_index_shape, unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &diag_index_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused_shape));
const Tensor* diag_index_tensor = c->input_tensor(1);
if (!c->RankKnown(input_shape) || !c->FullyDefined(diag_index_shape) ||
diag_index_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
const int32_t input_rank = c->Rank(input_shape);
if (lower_diag_index < upper_diag_index) {
const int32_t num_diags = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t other_dim = c->Value(c->Dim(input_shape, input_rank - 1));
if (num_diags != (upper_diag_index - lower_diag_index + 1)) {
return errors::InvalidArgument(
"The number of rows of `diagonal` doesn't match the number of "
"diagonals implied from `d_lower` and `d_upper`.\n",
"num_diags = ", num_diags, ", d_lower = ", lower_diag_index,
", d_upper = ", upper_diag_index, " ", input_rank, " ", other_dim);
}
}
const Tensor* num_rows_tensor = c->input_tensor(2);
const Tensor* num_cols_tensor = c->input_tensor(3);
int64_t num_rows = -1;
int64_t num_cols = -1;
if (num_rows_tensor != nullptr) {
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(num_rows_tensor, &num_rows));
}
if (num_cols_tensor != nullptr) {
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(num_cols_tensor, &num_cols));
}
const int32_t max_diag_len = c->Value(c->Dim(input_shape, input_rank - 1));
const int32_t min_num_rows = max_diag_len - std::min(upper_diag_index, 0);
const int32_t min_num_cols = max_diag_len + std::max(lower_diag_index, 0);
if (num_rows == -1 && num_cols == -1) {
num_rows = std::max(min_num_rows, min_num_cols);
num_cols = num_rows;
}
if (num_rows == -1) {
num_rows = min_num_rows;
} else if (num_rows < min_num_rows) {
return errors::InvalidArgument("num_rows is too small");
}
if (num_cols == -1) {
num_cols = min_num_cols;
} else if (num_cols < min_num_cols) {
return errors::InvalidArgument("num_cols is too small.");
}
if (num_rows != min_num_rows && num_cols != min_num_cols) {
return errors::InvalidArgument(
"num_rows and num_cols are not consistent with lower_diag_index, "
"upper_diag_index, and the length of the given diagonals.\n",
"num_rows = ", num_rows, " != min_num_rows = ", min_num_rows,
", num_cols = ", num_cols, " != min_num_cols = ", min_num_cols);
}
ShapeHandle output_shape;
const DimensionHandle output_row_dim = c->MakeDim(num_rows);
const DimensionHandle output_col_dim = c->MakeDim(num_cols);
if (lower_diag_index == upper_diag_index) {
TF_RETURN_IF_ERROR(c->ReplaceDim(input_shape, input_rank - 1,
output_row_dim, &output_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(output_shape, c->Vector(output_col_dim), &output_shape));
} else {
TF_RETURN_IF_ERROR(c->ReplaceDim(input_shape, input_rank - 2,
output_row_dim, &output_shape));
TF_RETURN_IF_ERROR(c->ReplaceDim(output_shape, input_rank - 1,
output_col_dim, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MatrixSetDiagV2Shape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape, diag_shape, diag_index_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input_shape));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &diag_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(2), 1, &diag_index_shape));
int32_t lower_diag_index = 0;
int32_t upper_diag_index = 0;
bool diag_index_known = false;
const Tensor* diag_index_tensor = c->input_tensor(2);
if (diag_index_tensor != nullptr && c->FullyDefined(diag_index_shape)) {
diag_index_known = true;
TF_RETURN_IF_ERROR(ReadDiagIndex(c, diag_index_tensor, &lower_diag_index,
&upper_diag_index));
if (lower_diag_index > upper_diag_index) {
return errors::InvalidArgument(
"lower_diag_index is greater than upper_diag_index");
}
}
if (c->RankKnown(input_shape)) {
int32_t input_rank = c->Rank(input_shape);
if (diag_index_known) {
TF_RETURN_IF_ERROR(c->WithRank(
c->input(1),
(lower_diag_index == upper_diag_index) ? input_rank - 1 : input_rank,
&diag_shape));
} else {
TF_RETURN_IF_ERROR(
c->WithRankAtLeast(c->input(1), input_rank - 1, &diag_shape));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(c->input(1), input_rank, &diag_shape));
}
const int32_t num_rows = c->Value(c->Dim(input_shape, input_rank - 2));
const int32_t num_cols = c->Value(c->Dim(input_shape, input_rank - 1));
if (num_rows != InferenceContext::kUnknownDim &&
num_cols != InferenceContext::kUnknownDim) {
if (lower_diag_index != 0 &&
(-num_rows >= lower_diag_index || lower_diag_index >= num_cols)) {
return errors::InvalidArgument("lower_diag_index is out of bound.");
}
if (upper_diag_index != 0 &&
(-num_rows >= upper_diag_index || upper_diag_index >= num_cols)) {
return errors::InvalidArgument("upper_diag_index is out of bound.");
}
}
}
ShapeHandle output_shape = input_shape;
if (c->RankKnown(diag_shape) && !c->FullyDefined(input_shape)) {
ShapeHandle diag_prefix;
TF_RETURN_IF_ERROR(c->Subshape(
diag_shape, 0, (lower_diag_index == upper_diag_index) ? -1 : -2,
&diag_prefix));
TF_RETURN_IF_ERROR(
c->Concatenate(diag_prefix, c->UnknownShapeOfRank(2), &diag_shape));
TF_RETURN_IF_ERROR(c->Merge(input_shape, diag_shape, &output_shape));
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPoolShapeImpl(shape_inference::InferenceContext* c,
bool supports_explicit_padding) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the stride attribute to contain 4 values, but got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_depth = GetTensorDim(strides, data_format, 'C');
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_depth = GetTensorDim(kernel_sizes, data_format, 'C');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle in_depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
std::vector<int64_t> explicit_paddings;
if (supports_explicit_padding) {
Status status = c->GetAttr("explicit_paddings", &explicit_paddings);
if (!status.ok() && !errors::IsNotFound(status)) {
return status;
}
TF_RETURN_IF_ERROR(CheckValidPadding(padding, explicit_paddings,
4, data_format));
} else {
DCHECK(padding != Padding::EXPLICIT);
}
ShapeHandle output_shape;
DimensionHandle output_rows, output_cols, output_depth;
int64_t pad_rows_before = -1, pad_rows_after = -1;
int64_t pad_cols_before = -1, pad_cols_after = -1;
if (padding == Padding::EXPLICIT) {
GetExplicitPaddingForDim(explicit_paddings, data_format, 'H',
&pad_rows_before, &pad_rows_after);
GetExplicitPaddingForDim(explicit_paddings, data_format, 'W',
&pad_cols_before, &pad_cols_after);
}
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_rows_dim, kernel_rows, 1, stride_rows, padding,
pad_rows_before, pad_rows_after, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_cols_dim, kernel_cols, 1, stride_cols, padding,
pad_cols_before, pad_cols_after, &output_cols));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDimsV2(
c, in_depth_dim, kernel_depth, 1, stride_depth, padding,
0, 0, &output_depth));
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols},
output_depth, &output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPoolShape(shape_inference::InferenceContext* c) {
return MaxPoolShapeImpl(c, false);
}
Status MaxPoolGradShape(shape_inference::InferenceContext* c) {
return UnchangedShapeWithRank(c, 4);
}
Status MaxPoolShapeWithExplicitPadding(shape_inference::InferenceContext* c) {
return MaxPoolShapeImpl(c, true);
}
Status MaxPoolV2Shape(shape_inference::InferenceContext* c, int num_inputs) {
string data_format_str;
TensorFormat data_format;
Status s = c->GetAttr("data_format", &data_format_str);
if (s.ok()) {
FormatFromString(data_format_str, &data_format);
} else {
data_format = FORMAT_NHWC;
}
const int rank = (data_format == FORMAT_NCHW_VECT_C) ? 5 : 4;
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape));
TF_RETURN_IF_ERROR(
CheckFormatConstraintsOnShape(data_format, input_shape, "input", c));
std::vector<int32> kernel_sizes;
std::vector<int32> strides;
if (c->num_inputs() + 2 == num_inputs) {
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
} else {
ShapeHandle size;
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 2), 1, &size));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 1), 1, &size));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(size, 0), 4, &unused));
const Tensor* kernel_sizes_tensor = c->input_tensor(c->num_inputs() - 2);
if (kernel_sizes_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
kernel_sizes.resize(kernel_sizes_tensor->shape().num_elements());
auto kernel_sizes_vec = kernel_sizes_tensor->flat<int32>();
std::copy_n(&kernel_sizes_vec(0), kernel_sizes.size(),
kernel_sizes.begin());
const Tensor* strides_tensor = c->input_tensor(c->num_inputs() - 1);
if (strides_tensor == nullptr) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
strides.resize(strides_tensor->shape().num_elements());
auto strides_vec = strides_tensor->flat<int32>();
std::copy_n(&strides_vec(0), strides.size(), strides.begin());
}
if (strides.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the stride attribute to contain 4 values, but "
"got: ",
strides.size());
}
if (kernel_sizes.size() != 4) {
return errors::InvalidArgument(
"MaxPool requires the ksize attribute to contain 4 values, but got: ",
kernel_sizes.size());
}
int32_t stride_depth = GetTensorDim(strides, data_format, 'C');
int32_t stride_rows = GetTensorDim(strides, data_format, 'H');
int32_t stride_cols = GetTensorDim(strides, data_format, 'W');
int32_t kernel_depth = GetTensorDim(kernel_sizes, data_format, 'C');
int32_t kernel_rows = GetTensorDim(kernel_sizes, data_format, 'H');
int32_t kernel_cols = GetTensorDim(kernel_sizes, data_format, 'W');
constexpr int num_spatial_dims = 2;
DimensionHandle batch_size_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'N'));
DimensionHandle in_rows_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'H'));
DimensionHandle in_cols_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'W'));
DimensionHandle in_depth_dim = c->Dim(
input_shape, GetTensorDimIndex<num_spatial_dims>(data_format, 'C'));
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
ShapeHandle output_shape;
DimensionHandle output_rows, output_cols, output_depth;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_depth_dim, kernel_depth, stride_depth, padding, &output_depth));
TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size_dim,
{output_rows, output_cols},
output_depth, &output_shape, c));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status Pool3DShape(shape_inference::InferenceContext* c) {
ShapeHandle input_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape));
string data_format;
Status s = c->GetAttr("data_format", &data_format);
std::vector<int32> strides;
TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides));
if (strides.size() != 5) {
return errors::InvalidArgument(
"Pool3D ops require the stride attribute to contain 5 values, but "
"got: ",
strides.size());
}
std::vector<int32> kernel_sizes;
TF_RETURN_IF_ERROR(c->GetAttr("ksize", &kernel_sizes));
if (kernel_sizes.size() != 5) {
return errors::InvalidArgument(
"Pool3D requires the ksize attribute to contain 5 values, but got: ",
kernel_sizes.size());
}
int32_t stride_planes, stride_rows, stride_cols;
int32_t kernel_planes, kernel_rows, kernel_cols;
if (s.ok() && data_format == "NCDHW") {
auto dim = [&](char dimension) {
return c->Dim(input_shape, GetTensorDimIndex<3>(FORMAT_NCHW, dimension));
};
input_shape =
c->MakeShape({{dim('N'), dim('0'), dim('1'), dim('2'), dim('C')}});
stride_planes = strides[2];
stride_rows = strides[3];
stride_cols = strides[4];
kernel_planes = kernel_sizes[2];
kernel_rows = kernel_sizes[3];
kernel_cols = kernel_sizes[4];
} else {
stride_planes = strides[1];
stride_rows = strides[2];
stride_cols = strides[3];
kernel_planes = kernel_sizes[1];
kernel_rows = kernel_sizes[2];
kernel_cols = kernel_sizes[3];
}
DimensionHandle batch_size_dim = c->Dim(input_shape, 0);
DimensionHandle in_planes_dim = c->Dim(input_shape, 1);
DimensionHandle in_rows_dim = c->Dim(input_shape, 2);
DimensionHandle in_cols_dim = c->Dim(input_shape, 3);
DimensionHandle output_depth_dim = c->Dim(input_shape, 4);
Padding padding;
TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding));
DimensionHandle output_planes, output_rows, output_cols;
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_planes_dim, kernel_planes, stride_planes, padding, &output_planes));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_rows_dim, kernel_rows, stride_rows, padding, &output_rows));
TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims(
c, in_cols_dim, kernel_cols, stride_cols, padding, &output_cols));
ShapeHandle output_shape;
if (data_format == "NCDHW") {
output_shape = c->MakeShape({batch_size_dim, output_depth_dim,
output_planes, output_rows, output_cols});
} else {
output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows,
output_cols, output_depth_dim});
}
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status MaxPool3DGradShape(shape_inference::InferenceContext* c) {
return UnchangedShapeWithRank(c, 5);
}
Status AvgPool3DGradShape(shape_inference::InferenceContext* c) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s));
TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status UnknownShape(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->UnknownShape());
}
return absl::OkStatus();
}
template <typename T>
Status ReductionShapeHelper(const Tensor* reduction_indices_t,
const int32_t input_rank,
std::set<int64_t>* true_indices) {
auto reduction_indices = reduction_indices_t->flat<T>();
for (int i = 0; i < reduction_indices_t->NumElements(); ++i) {
const T reduction_index = reduction_indices(i);
if (reduction_index < -input_rank || reduction_index >= input_rank) {
return errors::InvalidArgument("Invalid reduction dimension ",
reduction_index, " for input with ",
input_rank, " dimensions.");
}
auto wrapped_index = reduction_index;
if (wrapped_index < 0) {
wrapped_index += input_rank;
}
true_indices->insert(wrapped_index);
}
return absl::OkStatus();
}
Status ReductionShape(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle indices;
if (c->graph_def_version() < 21) {
indices = c->input(1);
} else {
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &indices));
}
bool keep_dims;
TF_RETURN_IF_ERROR(c->GetAttr("keep_dims", &keep_dims));
const Tensor* reduction_indices_t = c->input_tensor(1);
if (reduction_indices_t == nullptr || !c->RankKnown(input)) {
if (keep_dims && c->RankKnown(input)) {
c->set_output(0, c->UnknownShapeOfRank(c->Rank(input)));
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
const int32_t input_rank = c->Rank(input);
std::set<int64_t> true_indices;
if (reduction_indices_t->dtype() == DataType::DT_INT32) {
TF_RETURN_IF_ERROR(ReductionShapeHelper<int32>(reduction_indices_t,
input_rank, &true_indices));
} else if (reduction_indices_t->dtype() == DataType::DT_INT64) {
TF_RETURN_IF_ERROR(ReductionShapeHelper<int64_t>(
reduction_indices_t, input_rank, &true_indices));
} else {
return errors::InvalidArgument(
"reduction_indices can only be int32 or int64");
}
std::vector<DimensionHandle> dims;
for (int i = 0; i < input_rank; ++i) {
if (true_indices.count(i) > 0) {
if (keep_dims) {
dims.emplace_back(c->MakeDim(1));
}
} else {
dims.emplace_back(c->Dim(input, i));
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
Status ConcatShapeHelper(InferenceContext* c, int start_value_index,
int end_value_index, int dim_index) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(dim_index), 0, &unused));
const Tensor* concat_dim_t = c->input_tensor(dim_index);
if (concat_dim_t == nullptr) {
int32_t rank = InferenceContext::kUnknownRank;
for (int i = start_value_index; i < end_value_index; ++i) {
if (rank == InferenceContext::kUnknownRank) rank = c->Rank(c->input(i));
if (rank != InferenceContext::kUnknownRank) {
break;
}
}
if (rank == InferenceContext::kUnknownRank) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
} else if (rank == 0) {
return errors::InvalidArgument(
"Can't concatenate scalars (use tf.stack instead)");
} else {
for (int i = start_value_index; i < end_value_index; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), rank, &unused));
}
}
std::vector<DimensionHandle> dims;
dims.reserve(rank);
for (int i = 0; i < rank; ++i) dims.push_back(c->UnknownDim());
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
int64_t concat_dim;
if (concat_dim_t->dtype() == DT_INT32) {
concat_dim = static_cast<int64_t>(concat_dim_t->flat<int32>()(0));
} else {
concat_dim = concat_dim_t->flat<int64_t>()(0);
}
const int64 min_rank = concat_dim < 0 ? -concat_dim : concat_dim + 1;
ShapeHandle output_before;
ShapeHandle output_after;
ShapeHandle input = c->input(end_value_index - 1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &output_before));
DimensionHandle output_middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
output_after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &output_after));
}
for (int i = end_value_index - 2; i >= start_value_index; --i) {
ShapeHandle before;
ShapeHandle after;
input = c->input(i);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, min_rank, &input));
TF_RETURN_IF_ERROR(c->Subshape(input, 0, concat_dim, &before));
DimensionHandle middle = c->Dim(input, concat_dim);
if (concat_dim == -1) {
after = c->Scalar();
} else {
TF_RETURN_IF_ERROR(c->Subshape(input, concat_dim + 1, &after));
}
TF_RETURN_IF_ERROR(c->Merge(before, output_before, &output_before));
TF_RETURN_IF_ERROR(c->Add(output_middle, middle, &output_middle));
TF_RETURN_IF_ERROR(c->Merge(after, output_after, &output_after));
}
ShapeHandle s;
TF_RETURN_IF_ERROR(
c->Concatenate(output_before, c->Vector(output_middle), &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, output_after, &s));
c->set_output(0, s);
return absl::OkStatus();
}
Status ConcatShape(InferenceContext* c, int num_inputs_to_concat) {
return ConcatShapeHelper(c, 1 ,
1 + num_inputs_to_concat ,
0 );
}
Status ConcatV2Shape(InferenceContext* c) {
return ConcatShapeHelper(c, 0 ,
c->num_inputs() - 1 ,
c->num_inputs() - 1 );
}
Status QuantizedConcatV2Shape(InferenceContext* c, int num_inputs_to_concat) {
return ConcatShapeHelper(c, 0 ,
num_inputs_to_concat ,
num_inputs_to_concat );
}
Status BroadcastBinaryOpOutputShapeFnHelper(InferenceContext* c,
ShapeHandle shape_x,
ShapeHandle shape_y,
bool incompatible_shape_error,
ShapeHandle* out) {
CHECK_NOTNULL(out);
if (!c->RankKnown(shape_x) || !c->RankKnown(shape_y)) {
*out = c->UnknownShape();
return absl::OkStatus();
}
const int32_t rank_x = c->Rank(shape_x);
const int32_t rank_y = c->Rank(shape_y);
const int32_t rank_out = std::max(rank_x, rank_y);
std::vector<DimensionHandle> dims;
DimensionHandle dim_one;
if (rank_x != rank_y) dim_one = c->MakeDim(1);
for (int i = 0; i < rank_out; ++i) {
const auto dim_x = i < (rank_out - rank_x)
? dim_one
: c->Dim(shape_x, i - (rank_out - rank_x));
const bool dim_y_is_one = (i < (rank_out - rank_y));
const auto dim_y =
dim_y_is_one ? dim_one : c->Dim(shape_y, i - (rank_out - rank_y));
if (!c->ValueKnown(dim_x) || !c->ValueKnown(dim_y)) {
if (c->Value(dim_x) > 1) {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(dim_x);
} else if (c->Value(dim_y) > 1) {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(dim_y);
} else if (c->Value(dim_x) == 1) {
dims.push_back(dim_y);
} else if (c->Value(dim_y) == 1) {
dims.push_back(dim_x);
} else if (dim_y.SameHandle(dim_x)) {
dims.push_back(dim_x);
} else if (!c->ValueKnown(dim_x) && !c->ValueKnown(dim_y)) {
dims.push_back(c->UnknownDim());
} else {
if (!incompatible_shape_error) {
*out = c->UnknownShape();
return absl::OkStatus();
}
dims.push_back(c->UnknownDim());
}
} else if (c->Value(dim_x) == 1 || c->Value(dim_y) == 1) {
if (c->Value(dim_x) == 1 && !dim_y_is_one) {
dims.push_back(dim_y);
} else {
DCHECK_EQ(c->Value(dim_y), 1);
dims.push_back(dim_x);
}
} else {
DimensionHandle dim;
Status s = c->Merge(dim_x, dim_y, &dim);
if (!s.ok()) {
if (!incompatible_shape_error) {
*out = c->MakeShape({});
return absl::OkStatus();
}
return s;
}
dims.push_back(dim);
}
}
*out = c->MakeShape(dims);
return absl::OkStatus();
}
Status RandomShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status SegmentReductionWithNumSegmentsShapeFn(InferenceContext* c) {
ShapeHandle s_data = c->input(0);
ShapeHandle s_segment_ids = c->input(1);
ShapeHandle s_num_segments = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(s_num_segments, 0, &s_num_segments));
ShapeHandle out;
if (c->RankKnown(s_segment_ids)) {
TF_RETURN_IF_ERROR(
c->MergePrefix(s_data, s_segment_ids, &s_data, &s_segment_ids));
DimensionHandle num_segments_dim;
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(2, &num_segments_dim));
ShapeHandle s_data_suffix;
TF_RETURN_IF_ERROR(
c->Subshape(s_data, c->Rank(s_segment_ids), &s_data_suffix));
TF_RETURN_IF_ERROR(
c->Concatenate(c->Vector(num_segments_dim), s_data_suffix, &out));
} else {
out = c->UnknownShape();
}
c->set_output(0, out);
return absl::OkStatus();
}
namespace {
template <typename T>
Status SliceHelper(InferenceContext* c, ShapeHandle begin_value,
const Tensor* sizes_value,
std::vector<DimensionHandle>* dims) {
auto sizes_vec = sizes_value->vec<T>();
for (int i = 0; i < sizes_value->NumElements(); ++i) {
DimensionHandle dim = c->Dim(c->input(0), i);
if (sizes_vec(i) != -1) {
auto dim_val = c->Value(dim);
if (sizes_vec(i) < 0) {
return errors::InvalidArgument(
"Out of bounds slicing on dimension ", i, " of length ", dim_val,
": sizes vector cannot be < -1, but was ", sizes_vec(i));
}
dims->emplace_back(c->MakeDim(sizes_vec(i)));
} else {
DimensionHandle result;
TF_RETURN_IF_ERROR(c->Subtract(dim, c->Dim(begin_value, i), &result));
dims->emplace_back(result);
}
}
return absl::OkStatus();
}
}
Status SliceShape(InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle begin_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &begin_shape));
ShapeHandle sizes_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &sizes_shape));
TF_RETURN_IF_ERROR(c->Merge(begin_shape, sizes_shape, &begin_shape));
DimensionHandle ndims = c->Dim(begin_shape, 0);
if (c->ValueKnown(ndims)) {
TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(ndims), &input));
}
ShapeHandle begin_value;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &begin_value));
const Tensor* sizes_value = c->input_tensor(2);
if (sizes_value != nullptr) {
TF_RETURN_IF_ERROR(
c->WithRank(begin_value, sizes_value->NumElements(), &begin_value));
std::vector<DimensionHandle> dims;
if (sizes_value->dtype() == DT_INT64) {
TF_RETURN_IF_ERROR(
SliceHelper<int64_t>(c, begin_value, sizes_value, &dims));
} else {
TF_RETURN_IF_ERROR(
SliceHelper<int32>(c, begin_value, sizes_value, &dims));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
} else {
ShapeHandle sizes_value;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &sizes_value));
if (c->RankKnown(sizes_value)) {
TF_RETURN_IF_ERROR(
c->WithRank(begin_value, c->Rank(sizes_value), &begin_value));
std::vector<DimensionHandle> dims;
dims.reserve(c->Rank(sizes_value));
for (int i = 0; i < c->Rank(sizes_value); ++i) {
dims.emplace_back(c->Dim(sizes_value, i));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
if (c->RankKnown(input)) {
c->set_output(0, c->UnknownShapeOfRank(c->Rank(input)));
return absl::OkStatus();
} else {
return shape_inference::UnknownShape(c);
}
}
return absl::OkStatus();
}
Status ValidateSparseTensor(InferenceContext* c, ShapeHandle indices_shape,
ShapeHandle values_shape, ShapeHandle shape_shape) {
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(c->WithRank(indices_shape, 2, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(values_shape, 1, &unused_shape));
TF_RETURN_IF_ERROR(c->WithRank(shape_shape, 1, &unused_shape));
DimensionHandle num_index_elements_dim = c->Dim(indices_shape, 0);
if (c->ValueKnown(num_index_elements_dim)) {
DimensionHandle num_values_elements_dim = c->Dim(values_shape, 0);
if (c->ValueKnown(num_values_elements_dim)) {
int64_t num_index_elements = c->Value(num_index_elements_dim);
int64_t num_values_elements = c->Value(num_values_elements_dim);
if (num_index_elements != num_values_elements) {
return errors::InvalidArgument("Number of elements in index (",
num_index_elements, ") and values (",
num_values_elements, ") do not match.");
}
}
}
DimensionHandle index_rank_dim = c->Dim(indices_shape, 1);
if (c->ValueKnown(index_rank_dim)) {
DimensionHandle shape_rank_dim = c->Dim(shape_shape, 0);
if (c->ValueKnown(shape_rank_dim)) {
int64_t index_rank = c->Value(index_rank_dim);
int32_t shape_rank = c->Value(shape_rank_dim);
if (index_rank != shape_rank) {
return errors::InvalidArgument("Index rank (", index_rank,
") and shape rank (", shape_rank,
") do not match.");
}
}
}
return absl::OkStatus();
}
Status ValidateVariableResourceHandle(
InferenceContext* c, std::vector<ShapeAndType>* shape_and_type) {
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data == nullptr || handle_data->empty()) {
shape_and_type->emplace_back(c->UnknownShape(), DT_INVALID);
} else {
*shape_and_type = *handle_data;
DataType value_dtype;
TF_RETURN_IF_ERROR(c->GetAttr("dtype", &value_dtype));
if (shape_and_type->at(0).dtype != value_dtype) {
return errors::InvalidArgument(
"Trying to read variable with wrong dtype. "
"Expected ",
DataTypeString(shape_and_type->at(0).dtype), " got ",
DataTypeString(value_dtype));
}
}
return absl::OkStatus();
}
Status GatherNdShape(InferenceContext* c) {
ShapeHandle params;
std::vector<ShapeAndType> handle_shape_and_type;
if (c->input_handle_shapes_and_types(0) != nullptr) {
TF_RETURN_IF_ERROR(
ValidateVariableResourceHandle(c, &handle_shape_and_type));
params = handle_shape_and_type[0].shape;
} else {
params = c->input(0);
}
ShapeHandle indices;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices));
DimensionHandle r_dim = c->Dim(indices, -1);
if (!c->RankKnown(params) || !c->ValueKnown(r_dim)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
if (c->Value(r_dim) > c->Rank(params)) {
return errors::InvalidArgument(
"indices.shape[-1] must be <= params.rank, but saw indices shape: ",
c->DebugString(indices), " and params shape: ", c->DebugString(params));
}
ShapeHandle indices_slice;
ShapeHandle params_slice;
TF_RETURN_IF_ERROR(c->Subshape(indices, 0, -1, &indices_slice));
TF_RETURN_IF_ERROR(c->Subshape(params, c->Value(r_dim), ¶ms_slice));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(indices_slice, params_slice, &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status ScatterNdShapeHelper(InferenceContext* c, ShapeHandle indices_shape,
ShapeHandle updates_shape,
ShapeHandle input_shape) {
if (c->Value(c->NumElements(input_shape)) == 0 &&
(c->Value(c->NumElements(indices_shape)) > 0 ||
c->Value(c->NumElements(updates_shape)) > 0)) {
return errors::InvalidArgument(
"Indices and updates specified for empty input");
}
if (c->RankKnown(indices_shape) && c->RankKnown(updates_shape) &&
c->Rank(updates_shape) != 0) {
const int64_t outer_dims = c->Rank(indices_shape) - 1;
const DimensionHandle ixdim = c->Dim(indices_shape, -1);
if (c->ValueKnown(ixdim)) {
int64_t ix = c->Value(ixdim);
ShapeHandle unused;
ShapeHandle prefix_indices;
TF_RETURN_IF_ERROR(
c->Subshape(indices_shape, 0, outer_dims, &prefix_indices));
ShapeHandle prefix_updates;
TF_RETURN_IF_ERROR(
c->Subshape(updates_shape, 0, outer_dims, &prefix_updates));
Status s = c->Merge(prefix_indices, prefix_updates, &unused);
if (!s.ok()) {
return errors::InvalidArgument(
"Dimensions [0,", outer_dims,
") of indices[shape=", c->DebugString(indices_shape),
"] = ", c->DebugString(prefix_indices),
" must match dimensions [0,", outer_dims,
") of updates[shape=", c->DebugString(updates_shape),
"] = ", c->DebugString(prefix_updates), ": ", s.message());
}
ShapeHandle suffix_output;
TF_RETURN_IF_ERROR(c->Subshape(input_shape, ix, &suffix_output));
ShapeHandle suffix_updates;
TF_RETURN_IF_ERROR(
c->Subshape(updates_shape, outer_dims, &suffix_updates));
s = c->Merge(suffix_output, suffix_updates, &unused);
if (!s.ok()) {
return errors::InvalidArgument(
"Dimensions [", ix, ",", c->Rank(input_shape),
") of input[shape=", c->DebugString(input_shape),
"] = ", c->DebugString(suffix_output), " must match dimensions [",
outer_dims, ",", c->Rank(updates_shape),
") of updates[shape=", c->DebugString(updates_shape),
"] = ", c->DebugString(suffix_updates), ": ", s.message());
}
}
}
if (c->input_handle_shapes_and_types(0) == nullptr && c->num_outputs() > 0) {
c->set_output(0, input_shape);
}
return absl::OkStatus();
}
Status ExplicitShape(InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
}
Status ExplicitShapes(InferenceContext* c) {
std::vector<PartialTensorShape> shapes;
TF_RETURN_IF_ERROR(c->GetAttr("shapes", &shapes));
if (shapes.empty()) {
return errors::Internal("shapes attribute is empty");
}
for (int i = 0, end = shapes.size(); i < end; ++i) {
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(
c->MakeShapeFromPartialTensorShape(shapes[i], &output_shape));
c->set_output(i, output_shape);
}
return absl::OkStatus();
}
Status SparseReduceShapeFn(InferenceContext* c) {
bool keep_dims = false;
TF_RETURN_IF_ERROR(c->GetAttr("keep_dims", &keep_dims));
const Tensor* shape_tensor = c->input_tensor(2);
const Tensor* axes_tensor = c->input_tensor(3);
if (shape_tensor != nullptr && axes_tensor != nullptr) {
auto shape_vec = shape_tensor->flat<int64_t>();
auto axes_vec = axes_tensor->flat<int32>();
int64_t ndims = shape_vec.size();
absl::flat_hash_set<int64_t> axes;
if (ndims == 0)
return errors::InvalidArgument(
"Number of dims in shape tensor must not be 0");
for (int i = 0; i < axes_vec.size(); i++) {
axes.insert((axes_vec(i) + ndims) % ndims);
}
std::vector<DimensionHandle> dims;
if (keep_dims) {
dims.reserve(ndims);
for (int d = 0; d < ndims; ++d) {
if (axes.find(d) == axes.end()) {
dims.push_back(c->MakeDim(shape_vec(d)));
} else {
dims.push_back(c->MakeDim(1));
}
}
} else {
for (int d = 0; d < ndims; ++d) {
if (axes.find(d) == axes.end()) {
dims.push_back(c->MakeDim(shape_vec(d)));
}
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
}
return UnknownShape(c);
}
Status QuantizedConv2DShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::Conv2DShape(c));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused));
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
return absl::OkStatus();
}
Status FusedQuantizedConvShape(InferenceContext* c, int num_dims) {
std::vector<string> fused_ops;
TF_RETURN_IF_ERROR(c->GetAttr("fused_ops", &fused_ops));
ShapeHandle unused, channel;
bool fused_sum, fused_bias, fused_requantize;
fused_sum =
std::find(fused_ops.begin(), fused_ops.end(), "Sum") != fused_ops.end();
fused_bias = std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") !=
fused_ops.end();
fused_requantize = std::find(fused_ops.begin(), fused_ops.end(),
"Requantize") != fused_ops.end();
const int kMinInputBaseIdx = 2;
const int kMinFilterBaseIdx = 4;
int min_input_filter_offset = 0;
if (fused_bias && !fused_sum) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
min_input_filter_offset = 1;
} else if (fused_sum && !fused_bias) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), num_dims, &unused));
min_input_filter_offset = 1;
} else if (fused_bias && fused_sum) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), num_dims, &unused));
min_input_filter_offset = 2;
}
TF_RETURN_IF_ERROR(
c->WithRank(c->input(kMinInputBaseIdx + min_input_filter_offset), 0,
&unused));
TF_RETURN_IF_ERROR(
c->WithRank(c->input(kMinInputBaseIdx + min_input_filter_offset + 1), 0,
&unused));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(c->input(kMinFilterBaseIdx + min_input_filter_offset),
1, &channel));
TF_RETURN_IF_ERROR(c->WithRankAtMost(
c->input(kMinFilterBaseIdx + min_input_filter_offset + 1), 1,
&channel));
if (fused_requantize) {
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
} else {
c->set_output(1, channel);
c->set_output(2, channel);
}
return absl::OkStatus();
}
Status FusedQuantizedConv2DShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::Conv2DShapeImpl(c, true));
TF_RETURN_IF_ERROR(FusedQuantizedConvShape(c, 4));
return absl::OkStatus();
}
Status FusedQuantizedDepthwiseConv2D(InferenceContext* c) {
TF_RETURN_IF_ERROR(DepthwiseConv2DNativeShapeImpl(c, true));
TF_RETURN_IF_ERROR(FusedQuantizedConvShape(c, 4));
return absl::OkStatus();
}
Status QuantizedAvgPoolShape(InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::AvgPoolShape(c));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
return absl::OkStatus();
}
Status QuantizeV2Shape(InferenceContext* c) {
int axis = -1;
Status s = c->GetAttr("axis", &axis);
if (!s.ok() && s.code() != error::NOT_FOUND) {
return s;
}
if (axis < -1) {
return errors::InvalidArgument("axis should be at least -1, got ", axis);
}
const int minmax_rank = (axis == -1) ? 0 : 1;
TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c));
ShapeHandle minmax;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax));
if (axis != -1) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input));
DimensionHandle depth;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth));
}
c->set_output(1, minmax);
c->set_output(2, minmax);
return absl::OkStatus();
}
Status ReduceScatterShape(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle in = c->input(0);
if (!c->RankKnown(in)) {
c->set_output(0, in);
return absl::OkStatus();
}
shape_inference::ShapeHandle group_assignment_shape = c->input(1);
if (c->Rank(group_assignment_shape) != 2)
return errors::InvalidArgument(
"ReduceScatter group_assignment should be rank 2");
const Tensor* scatter_dimension = c->input_tensor(2);
if (!scatter_dimension) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int64_t scatter_dim;
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(scatter_dimension, &scatter_dim));
std::vector<shape_inference::DimensionHandle> out_dims;
out_dims.reserve(c->Rank(in));
for (int i = 0; i < c->Rank(in); ++i) {
if (i == scatter_dim) {
shape_inference::DimensionHandle dim = c->Dim(in, i);
shape_inference::DimensionHandle out_dim;
TF_RETURN_IF_ERROR(c->Divide(dim, c->Dim(group_assignment_shape, 1),
true, &out_dim));
out_dims.push_back(out_dim);
} else {
out_dims.emplace_back(c->Dim(in, i));
}
}
c->set_output(0, c->MakeShape(out_dims));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/framework/common_shape_fns.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace shape_inference {
namespace {
PartialTensorShape S(std::initializer_list<int64_t> dims) {
return PartialTensorShape(dims);
}
PartialTensorShape Unknown() { return PartialTensorShape(); }
OpDef MakeOpDef(int num_inputs, int num_outputs) {
OpRegistrationData op_reg_data;
OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
}
TEST(CommonShapeFnsTest, NoOutputShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("Assert")
.Input("condition: bool")
.Input("data: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "Assert")
.Input("condition", 0, DT_BOOL)
.Input({{"data", 0, DT_FLOAT}})
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({}), S({10})}, {},
{}, {});
TF_EXPECT_OK(NoOutputs(&c));
EXPECT_EQ(0, c.num_outputs());
}
TEST(CommonShapeFnsTest, ScalarShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("L2Loss")
.Input("t: float")
.Output("t: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(
NodeDefBuilder("test", "L2Loss").Input("t", 0, DT_FLOAT).Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({})}, {}, {}, {});
TF_EXPECT_OK(ScalarShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(0, c.Rank(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({1, 23, 4, 4, 2})},
{}, {}, {});
TF_EXPECT_OK(ScalarShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(0, c.Rank(output));
}
}
TEST(CommonShapeFnsTest, MatMulShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("MatMul")
.Input("a: float")
.Input("b: float")
.Output("c: float")
.Attr("transpose_a:bool=false")
.Attr("transpose_b:bool=false")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", false)
.Attr("transpose_b", false)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({3, 4})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, -1}), S({3, 4})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2}), S({3, 4})},
{}, {}, {});
auto s = MatMulShape(&c);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "Shape must be rank 2 but is rank 1"));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({3, -1})}, {}, {}, {});
TF_EXPECT_OK(MatMulShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 5}), S({3, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Dimensions must be equal, but are 5 and 3"));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 5, 3}), S({3, 5, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "Shape must be rank 2 but is rank 3"));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Attr("type", DT_FLOAT)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 2}), S({3, 4})}, {}, {}, {});
auto s = MatMulShape(&c);
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "MatMul")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("transpose_a", false)
.Attr("transpose_b", true)
.Attr("type", DT_FLOAT)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3}), S({4, 3})}, {}, {}, {});
auto s = MatMulShape(&c);
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(4, c.Value(c.Dim(output, 1)));
}
}
TEST(CommonShapeFnsTest, Einsum_ShapeFn) {
ShapeInferenceTestOp op("Einsum");
auto set_equation = [&op](int n, string equation) {
std::vector<NodeDefBuilder::NodeOut> input_list;
input_list.reserve(n);
for (int i = 0; i < n; ++i) {
input_list.emplace_back("a", 0, DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "Einsum")
.Input(input_list)
.Attr("equation", equation)
.Finalize(&op.node_def));
};
set_equation(1, "abc->c");
INFER_OK(op, "[?,?,?]", "[d0_2]");
set_equation(1, "abc->aabbcc");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_0,d0_1,d0_1,d0_2,d0_2]");
set_equation(1, "abc->");
INFER_OK(op, "[?,?,?]", "[]");
set_equation(1, "->");
INFER_OK(op, "[]", "[]");
set_equation(2, "ij,jk->ik");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
set_equation(2, "ij,jk->ik");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
set_equation(2, "ab,ab->");
INFER_OK(op, "[?,?];[?,?]", "[]");
set_equation(2, "ab,->b");
INFER_OK(op, "[?,?];[]", "[d0_1]");
set_equation(2, ",->");
INFER_OK(op, "[];[]", "[]");
set_equation(2, "aaa,b->abbb");
INFER_OK(op, "[?,?,?];[?]", "[d0_0,d1_0,d1_0,d1_0]");
set_equation(2, ",abcd->badc");
INFER_OK(op, "[];[?,?,?,?]", "[d1_1,d1_0,d1_3,d1_2]");
set_equation(1, "a...bc->c...");
INFER_OK(op, "[?,?,?,?,?]", "[d0_4,d0_1,d0_2]");
set_equation(2, "...ij,...jk->...ik");
INFER_OK(op, "[?,?,?,?,?];[1,?,?]", "[d0_0,d0_1,d0_2,d0_3,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?,?,?]", "[d1_0,d1_1,d1_2,d0_1,d1_4]");
set_equation(1, "abc->c");
INFER_OK(op, "?", "[?]");
set_equation(1, "a...bc->c");
INFER_OK(op, "?", "[?]");
set_equation(1, "a...bc->c...");
INFER_OK(op, "?", "?");
set_equation(2, "...ij,...jk->...ik");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?,?];?", "?");
INFER_OK(op, "?;[?,?,?]", "?");
set_equation(2, "...ij,...jk->ik");
INFER_OK(op, "?;?", "[?,?]");
set_equation(2, "abd,b...c->...cad");
INFER_OK(op, "[?,?,?];[?,?,?,?]", "[d1_1,d1_2,d1_3,d0_0,d0_2]");
set_equation(2, "...ab,b...c->ac...");
INFER_OK(op, "[?,1,?,?];[?,?,?]", "[d0_2,d1_2,d0_0,d1_1]");
set_equation(2, "ab->b");
INFER_ERROR("got: 2", op, "[?,?];[?,?]");
set_equation(1, "ab,a->b");
INFER_ERROR("got: 1", op, "[?,?]");
set_equation(1, "a");
INFER_ERROR("equation", op, "[2]");
set_equation(2, "ab,bc");
INFER_ERROR("equation", op, "[2,2];[2,2]");
set_equation(1, "..a.->a...");
INFER_ERROR("ellipsis", op, "[1,1,2,1]");
set_equation(1, "...a->.a..");
INFER_ERROR("ellipsis", op, "[1,1,1,2]");
set_equation(1, "...a...->...a");
INFER_ERROR("ellipsis", op, "[1,1,1,2]");
set_equation(1, "..a..b..->...ab");
INFER_ERROR("ellipsis", op, "[1,1,2,1]");
set_equation(2, "...a...,ab->a");
INFER_ERROR("ellipsis", op, "[1,2,1];[2,1]");
set_equation(2, "a,...ab...->a");
INFER_ERROR("ellipsis", op, "[2];[1,2,1,1]");
set_equation(2, "a,ab->a......");
INFER_ERROR("ellipsis", op, "[2];[2,1]");
set_equation(1, "abc->d");
INFER_ERROR("'d'", op, "[?,?,?]");
set_equation(1, "abc->c");
INFER_ERROR("4", op, "[?,?,?,?]");
INFER_ERROR("2", op, "[?,?]");
set_equation(1, "...abc->...c");
INFER_ERROR("2", op, "[?,?]");
set_equation(2, "ab,ab->a");
INFER_ERROR("are 1 and 2", op, "[1,2];[2,1]");
set_equation(2, "aa,bb->a");
INFER_ERROR("are 1 and 2", op, "[1,2];[2,2]");
set_equation(2, "...ij,...jk->...ik");
INFER_ERROR("are 2 and 3", op, "[2,?,?];[3,?,?]");
set_equation(2, "i...j,jk...->...ik");
INFER_ERROR("are 2 and 3", op, "[?,2,?];[?,?,3]");
set_equation(2, "...ij,...jk->ik");
set_equation(2, "i...j,jk...->ik");
INFER_ERROR("non-empty broadcasting", op, "[?,2,?];[?,?]");
set_equation(2, "...ab,b...c->ac...");
INFER_OK(op, "?;[4,5,3]", "?");
}
TEST(CommonShapeFnsTest, BatchMatMulV2_ShapeFn) {
ShapeInferenceTestOp op("BatchMatMulV2");
auto set_adj = [&op](bool adj_x, bool adj_y) {
TF_ASSERT_OK(NodeDefBuilder("test", "BatchMatMulV2")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Attr("adj_x", adj_x)
.Attr("adj_y", adj_y)
.Finalize(&op.node_def));
};
set_adj(false, false);
INFER_ERROR("at least rank 2", op, "[];?");
INFER_ERROR("at least rank 2", op, "[1];?");
INFER_ERROR("at least rank 2", op, "?;[]");
INFER_ERROR("at least rank 2", op, "?;[2]");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[3,?,?];[3,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[1,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[2,?,?]", "[d1_0,d0_1,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?]", "[d1_0,d0_1,d1_2]");
INFER_OK(op, "[2,?,?];[?,?,?]", "[d0_0,d0_1,d1_2]");
INFER_OK(op, "[?,?,?];[?,?,?]", "[?,d0_1,d1_2]");
INFER_OK(op, "[?,?];[?,?,?]", "[d1_0,d0_0,d1_2]");
INFER_OK(op, "[?,?,?];[?,?]", "[d0_0,d0_1,d1_1]");
INFER_OK(op, "[?,?];[?,?,?,?]", "[d1_0,d1_1,d0_0,d1_3]");
INFER_OK(op, "[?,?,?,?];[?,?]", "[d0_0,d0_1,d0_2,d1_1]");
INFER_OK(op, "[?,?];?", "?");
INFER_OK(op, "?;[?,?]", "?");
INFER_OK(op, "[?,?,?,?];?", "?");
INFER_OK(op, "[?,?,?,?,?];[1,?,?]", "[d0_0,d0_1,d0_2,d0_3,d1_2]");
INFER_OK(op, "[1,?,?];[?,?,?,?,?]", "[d1_0,d1_1,d1_2,d0_1,d1_4]");
INFER_ERROR("are 2 and 3", op, "[?,?,2,?,?];[3,?,?]");
INFER_ERROR("are 2 and 3", op, "[2,?,?];[?,?,3,?,?]");
set_adj(false, false);
INFER_OK(op, "[2,2,3,4];[2,2,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,3,1]");
set_adj(true, false);
INFER_OK(op, "[2,2,3,4];[2,2,?,?]", "[d0_0,d0_1,d0_3,d1_3]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,3,1]");
set_adj(false, true);
INFER_OK(op, "[2,2,?,?];[2,2,3,4]", "[d0_0,d0_1,d0_2,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,1,3]");
set_adj(true, true);
INFER_OK(op, "[2,2,?,?];[2,2,3,4]", "[d0_0,d0_1,d0_3,d1_2]");
INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,1,3]");
}
TEST(CommonShapeFnsTest, BiasAddShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("BiasAdd")
.Input("a: float")
.Input("b: float")
.Output("c: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 10}), S({10})},
{}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(2, c.Value(c.Dim(output, 0)));
EXPECT_EQ(10, c.Value(c.Dim(output, 1)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{Unknown(), Unknown()}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_FALSE(c.RankKnown(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({4, 3, 4, 2, 15}), S({15})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[4,3,4,2,15]", c.DebugString(output));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 3, 4, 5}), S({3})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[2,3,4,5]", c.DebugString(output));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({8, 6, 4, 2, 3, 4, 5}), S({3})}, {}, {}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({10, 11, 12}), S({11})}, {}, {}, {});
TF_EXPECT_OK(BiasAddShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ("[10,11,12]", c.DebugString(output));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({3}), S({3})}, {},
{}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAdd")
.Input("a", 0, DT_FLOAT)
.Input("b", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3}), S({3})},
{}, {}, {});
EXPECT_FALSE(BiasAddShape(&c).ok());
}
}
TEST(CommonShapeFnsTest, FusedBatchNormExTest) {
ShapeInferenceTestOp op("_FusedBatchNormEx");
std::vector<NodeDefBuilder::NodeOut> no_side_inputs;
TF_CHECK_OK(NodeDefBuilder("test", "_FusedBatchNormEx")
.Input("x", 0, DT_HALF)
.Input("scale", 0, DT_FLOAT)
.Input("offset", 0, DT_FLOAT)
.Input("mean", 0, DT_FLOAT)
.Input("variance", 0, DT_FLOAT)
.Input(no_side_inputs)
.Attr("T", DT_HALF)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("data_format", "NHWC")
.Attr("activation_mode", "Relu")
.Attr("num_side_inputs", 0)
.Attr("is_training", true)
.Finalize(&op.node_def));
INFER_ERROR("must be divisible by 4", op, "[2,2,2,2];[2];[2];[2];[2]");
INFER_OK(op, "[2,2,2,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3];[d0_3];[d0_3];[d0_3];[d0_3];?");
}
TEST(CommonShapeFnsTest, BiasAddGradShapeTest) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("BiasAddGrad")
.Input("a: float")
.Output("b: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Finalize(&def));
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 10})}, {}, {},
{});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(10, c.Value(c.Dim(output, 0)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({5, 7, 2, 10})},
{}, {}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(10, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3, 4, 5})}, {},
{}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(3, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({8, 6, 4, 2, 3, 4, 5})}, {}, {}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(6, c.Value(c.Dim(output, 0)));
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({10, 11, 12})}, {},
{}, {});
TF_EXPECT_OK(BiasAddGradShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(11, c.Value(c.Dim(output, 0)));
}
{
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({3})}, {}, {}, {});
EXPECT_FALSE(BiasAddGradShape(&c).ok());
}
{
TF_CHECK_OK(NodeDefBuilder("test", "BiasAddGrad")
.Input("a", 0, DT_FLOAT)
.Attr("data_format", "NCHW")
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def, {S({2, 3})}, {}, {},
{});
EXPECT_FALSE(BiasAddGradShape(&c).ok());
}
}
TEST(CommonShapeFnsTest, ConvTest) {
ShapeInferenceTestOp op("Conv");
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
string data_format, int batch_dims, int groups) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Attr("batch_dims", batch_dims)
.Attr("groups", groups)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("Input tensor rank must be the same as filter rank.", op,
"[2,2,1,1,1];[2,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", -1, 1);
INFER_ERROR("must be non-negative", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 5, 1);
INFER_ERROR(
"Input tensor must be rank 4 or 5, excluding extra "
"batch dimensions, but got: 0",
op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("extra batch dimensions", op, "[1,2,3];[1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 1);
INFER_ERROR("extra batch dimensions", op, "[1,2,3,4,5,6];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 0);
INFER_ERROR("should be a positive integer", op, "[1,2,3,4,5];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, -1);
INFER_ERROR("should be a positive integer", op, "[1,2,3,4,5];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 3);
INFER_ERROR("should divide input depth", op, "[1,1,1,1,13];[3,3,3,13,3]");
set_op({{1, 1, 1, 1, 1}}, "VALID",
"CHANNELS_LAST", 1, 3);
INFER_ERROR("should divide output depth", op, "[3,3,3,3,3];[1,1,1,3,13]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 2);
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 1);
INFER_ERROR(
"Input depth divided by filter input depth does not match with groups "
"parameter (1)",
op, "[1,4,4,4,10];[2,2,2,5,2]");
set_op({{1, 1, 1, 1, 1}}, "SAME",
"CHANNELS_LAST", 1, 10);
INFER_OK(op, "[1,4,4,4,10];[2,2,2,1,10]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
TEST(CommonShapeFnsTest, Conv2DFormatsTest) {
ShapeInferenceTestOp op("Conv2D");
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
const string& data_format, const string& filter_format,
const std::vector<int32>& explicit_paddings = {}) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", data_format)
.Attr("filter_format", filter_format)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,2,2,4];[4,1,1,1,4]", "[d0_0,1,2,2,d0_4]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,2,2,4];[4,1,2,2,4]", "[d0_0,1,1,1,d0_4]");
set_op({{1, 1, 2, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,3,3,4];[8,1,1,1,4]", "[d0_0,2,2,2,d0_4]");
set_op({{1, 1, 2, 1}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,3,3,4];[4,1,1,1,4]", "[d0_0,1,2,3,d0_4]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,4,4,4];[4,1,2,1,4]", "[d0_0,1,3,2,d0_4]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW_VECT_C", "OIHW_VECT_I");
INFER_OK(op, "[1,1,4,4,32];[32,1,2,1,32]", "[d0_0,1,3,2,d0_4]");
}
class Conv2DShapeTest : public ::testing::TestWithParam<string> {};
TEST_P(Conv2DShapeTest, Conv2DShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& strides, const string& padding,
const string& data_format, const string& filter_format,
const std::vector<int32>& explicit_paddings = {}) {
string format;
if (op.name == "Conv")
format = (data_format == "NHWC") ? "CHANNELS_LAST" : "CHANNELS_FIRST";
else
format = data_format;
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", format)
.Attr("filter_format", filter_format)
.Finalize(&op.node_def));
};
set_op({{1, 1, 0, 1}}, "VALID",
"NHWC", "HWIO");
INFER_ERROR("must be rank 4", op, "[4,4];[2,1,1,1]");
INFER_ERROR("must be rank 4", op, "[1,4,4,1];[2,1,1]");
set_op({{1, 1, 0, 1}}, "VALID",
"NHWC", "HWIO");
INFER_ERROR("must be > 0", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,2,2,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,2,2,1];[2,2,1,1]", "[d0_0,1,1,d1_3]");
set_op({{1, 2, 2, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,3,3,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,3,3,1];[1,1,1,1]", "[d0_0,2,3,d1_3]");
set_op({{1, 1, 2, 1}}, "VALID",
"NHWC", "HWIO");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,?,4,1];[2,1,1,1]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,?,1];[2,1,1,1]", "[d0_0,3,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[2,1,1,1]", "[d0_0,3,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[?,1,1,1]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,?,1,1]", "[d0_0,3,?,d1_3]");
INFER_ERROR(
"Depth of input (10) is not a multiple of input depth of filter (10000)",
op, "[1,2,2,10];[1,1,10000,20]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,2,2];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 1, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,2,2];[2,2,1,1]", "[d0_0,d1_3,1,1]");
set_op({{1, 1, 2, 2}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,3,3];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,3,3];[1,1,1,1]", "[d0_0,d1_3,2,3]");
set_op({{1, 1, 1, 2}}, "VALID",
"NCHW", "HWIO");
INFER_OK(op, "[1,1,4,4];[2,1,1,1]", "[d0_0,d1_3,3,2]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 1, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[1,4,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,?,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,4,?,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[1,4,4,?];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
INFER_OK(op, "[?,4,4,1];[?,?,?,?]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, "SAME", "NHWC",
"HWIO");
INFER_OK(op, "[?,4,4,1];[?,?,?,?]", "[d0_0,2,2,d1_3]");
INFER_OK(op, "[1,?,4,1];[?,?,?,?]", "[d0_0,?,2,d1_3]");
INFER_OK(op, "[1,4,?,1];[?,?,?,?]", "[d0_0,2,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[?,?,?,?]", "[d0_0,2,2,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 2, 1, 4, 0, 0});
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,6,9,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 1, 0, 1, 2, 0, 0});
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,3,5,d1_3]");
set_op({{1, 2, 2, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 3, 2, 1, 0, 0, 0});
INFER_OK(op, "[1,4,4,2];[2,2,2,3]", "[d0_0,4,2,d1_3]");
set_op({{1, 1, 2, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 1, 1, 2, 2, 0, 0});
INFER_OK(op, "[1,2,2,1];[2,1,1,1]", "[d0_0,3,3,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,1,1,1]", "[d0_0,5,4,d1_3]");
INFER_OK(op, "[1,?,4,1];[2,1,1,1]", "[d0_0,?,4,d1_3]");
INFER_OK(op, "[1,4,?,1];[2,1,1,1]", "[d0_0,5,?,d1_3]");
INFER_OK(op, "[1,4,4,?];[2,1,1,1]", "[d0_0,5,4,d1_3]");
INFER_OK(op, "[1,4,4,1];[?,1,1,1]", "[d0_0,?,4,d1_3]");
INFER_OK(op, "[1,4,4,1];[2,?,1,1]", "[d0_0,5,?,d1_3]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, -1, 0, 0, 0, 0});
INFER_ERROR("must be nonnegative", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must contain 8 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT", "NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must contain 8 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{1, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("batch or depth dimensions", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "EXPLICIT",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 1, 0});
INFER_ERROR("batch or depth dimensions", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 1, 1, 1}}, "VALID",
"NHWC", "HWIO",
{0, 0, 0, 0, 0, 0, 0, 0});
INFER_ERROR("must be empty", op, "[1,2,2,1];[1,1,1,1]");
}
TEST_P(Conv2DShapeTest, Conv2DDilatedShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& dilations,
const std::vector<int32>& strides, const string& padding,
const string& data_format,
const std::vector<int32>& explicit_paddings = {}) {
string format;
if (op.name == "Conv")
format = (data_format == "NHWC") ? "CHANNELS_LAST" : "CHANNELS_FIRST";
else
format = data_format;
TF_CHECK_OK(NodeDefBuilder("test", "Conv2D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("dilations", dilations)
.Attr("strides", strides)
.Attr("padding", padding)
.Attr("explicit_paddings", explicit_paddings)
.Attr("data_format", format)
.Attr("batch_dims", 1)
.Attr("groups", 1)
.Finalize(&op.node_def));
};
set_op({{1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_ERROR("contain 4 values", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 0, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_ERROR("must be >= 1", op, "[1,2,2,1];[1,1,1,1]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,2,2,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,2,4,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 2, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,1,3,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 2, 1, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,1,3,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"VALID", "NHWC");
INFER_OK(op, "[1,5,5,1];[3,3,1,1]", "[d0_0,2,1,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,2,2];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,4,4];[1,1,1,1]", "[d0_0,d1_3,2,4]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 2}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,4,4];[1,1,1,1]", "[d0_0,d1_3,2,2]");
set_op({{1, 1, 2, 1}}, {{1, 1, 1, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,1,3]");
set_op({{1, 1, 2, 1}}, {{1, 1, 2, 1}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,1,3]");
set_op({{1, 1, 1, 2}}, {{1, 1, 2, 2}},
"VALID", "NCHW");
INFER_OK(op, "[1,1,5,5];[3,3,1,1]", "[d0_0,d1_3,2,1]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,2,2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"SAME", "NHWC");
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,d0_1,d0_2,d1_3]");
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 0, 2, 1, 4, 0, 0});
INFER_OK(op, "[1,4,4,1];[1,1,1,1]", "[d0_0,6,9,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 1, 0, 1, 2, 0, 0});
INFER_OK(op, "[1,3,3,1];[2,2,1,1]", "[d0_0,2,4,d1_3]");
set_op({{1, 1, 2, 1}}, {{1, 2, 2, 1}},
"EXPLICIT", "NHWC",
{0, 0, 3, 2, 1, 0, 0, 0});
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,4,2,d1_3]");
set_op({{1, 2, 2, 1}}, {{1, 1, 1, 1}},
"EXPLICIT", "NHWC",
{0, 0, 1, 1, 2, 2, 0, 0});
INFER_OK(op, "[1,4,4,1];[2,2,1,1]", "[d0_0,4,6,d1_3]");
}
TEST(CommonShapeFnsTest, Conv3DShapeRankTest) {
ShapeInferenceTestOp op("Conv3D");
INFER_ERROR("must be rank 5", op, "[4,4];[2,1,1,1]");
INFER_ERROR("must be rank 5", op, "[1,4,4,1];[2,1,1]");
}
TEST(CommonShapeFnsTest, Conv3DGroupsTest) {
ShapeInferenceTestOp op("Conv3D");
auto set_op = [&op](const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", "Conv3D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_ERROR(
"Depth of input (10) is not a multiple of input depth of filter (6)", op,
"[1,2,2,2,10];[1,1,1,6,20]");
INFER_ERROR(
"Depth of output (1) is not a multiple of the number of groups (2)", op,
"[1,2,2,2,10];[1,1,1,5,1]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,5,2]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,10];[2,2,2,1,10]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
INSTANTIATE_TEST_SUITE_P(CommonShapeFnsTest, Conv2DShapeTest,
::testing::Values("Conv2D", "Conv"));
class Conv3DShapeTest : public ::testing::TestWithParam<string> {};
TEST_P(Conv3DShapeTest, Conv3DShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 1, 1, 0, 1}}, "VALID");
INFER_ERROR("must be > 0", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,?,2,2,1];[1,1,1,1,1]", "[d0_0,?,2,2,d1_4]");
INFER_OK(op, "[1,2,?,2,1];[1,1,1,1,1]", "[d0_0,2,?,2,d1_4]");
INFER_OK(op, "[1,2,2,?,1];[1,1,1,1,1]", "[d0_0,2,2,?,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[?,1,1,1,1]", "[d0_0,?,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,?,1,1,1]", "[d0_0,2,?,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,?,1,1]", "[d0_0,2,2,?,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,?,1]", "[d0_0,2,2,2,d1_4]");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,?]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,2,2,2,1];[2,2,2,1,1]", "[d0_0,1,1,1,d1_4]");
set_op({{1, 2, 2, 2, 1}}, "VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, "VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,3,3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 1, 1, 1, 1}}, "SAME");
INFER_OK(op, "[?,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,?,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,?,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,?,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,?];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[?,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,?,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,?,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,?,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,?]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
set_op({{1, 2, 3, 4, 1}}, "SAME");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[?,4,9,4,1];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,?,9,4,1];[2,2,2,1,1]", "[d0_0,?,3,1,d1_4]");
INFER_OK(op, "[1,4,?,4,1];[2,2,2,1,1]", "[d0_0,2,?,1,d1_4]");
INFER_OK(op, "[1,4,9,?,1];[2,2,2,1,1]", "[d0_0,2,3,?,d1_4]");
INFER_OK(op, "[1,4,9,4,?];[2,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[?,2,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,?,2,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,?,1,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,?,1]", "[d0_0,2,3,1,d1_4]");
INFER_OK(op, "[1,4,9,4,1];[2,2,2,1,?]", "[d0_0,2,3,1,d1_4]");
}
TEST_P(Conv3DShapeTest, Conv3DDilatedShapeTest) {
const string op_name = GetParam();
ShapeInferenceTestOp op(op_name);
auto set_op = [&op](const std::vector<int32>& dilations,
const std::vector<int32>& strides,
const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", op.name)
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("dilations", dilations)
.Attr("strides", strides)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({{1, 2, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_ERROR("contain 5 values", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 2, 0, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_ERROR("must be >= 1", op, "[1,2,2,2,1];[1,1,1,1,1]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,2,2,2,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,3,2,2,1];[2,2,2,1,1]", "[d0_0,1,1,1,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 2, 2, 2, 1}},
"VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,2,2,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 2, 1, 1, 1}},
"VALID");
INFER_OK(op, "[1,3,3,3,1];[1,1,1,1,1]", "[d0_0,2,3,3,d1_4]");
set_op({{1, 2, 1, 1, 1}}, {{1, 1, 1, 1, 1}},
"SAME");
INFER_OK(op, "[1,4,4,4,1];[2,2,2,1,1]", "[d0_0,d0_1,d0_2,d0_3,d1_4]");
}
INSTANTIATE_TEST_SUITE_P(CommonShapeFnsTest, Conv3DShapeTest,
::testing::Values("Conv3D", "Conv"));
TEST(CommonShapeFnsTest, DepthwiseConv2DShapeTest) {
ShapeInferenceTestOp op("DepthwiseConv2dNative");
std::vector<int32> strides = {{1, 1, 1, 1}};
TF_CHECK_OK(NodeDefBuilder("test", "DepthwiseConv2dNative")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", "VALID")
.Attr("data_format", "NHWC")
.Finalize(&op.node_def));
INFER_OK(op, "[1,2,2,3];[1,1,3,4]", "[d0_0,2,2,12]");
INFER_ERROR("Dimensions must be equal, but are 3 and 12", op,
"[1,2,2,3];[1,1,12,4]");
INFER_OK(op, "[1,2,2,3];[1,1,3,4]", "[d0_0,2,2,12]");
INFER_OK(op, "[1,?,2,3];[1,1,3,4]", "[d0_0,?,2,12]");
INFER_OK(op, "[1,2,?,3];[1,1,3,4]", "[d0_0,2,?,12]");
INFER_OK(op, "[1,2,2,3];[?,1,3,4]", "[d0_0,?,2,12]");
INFER_OK(op, "[1,2,2,3];[1,?,3,4]", "[d0_0,2,?,12]");
INFER_OK(op, "[1,2,2,3];[1,1,?,4]", "[d0_0,2,2,12]");
INFER_OK(op, "[1,2,2,?];[1,1,?,4]", "[d0_0,2,2,?]");
INFER_OK(op, "[1,2,2,3];[1,1,3,?]", "[d0_0,2,2,?]");
TF_CHECK_OK(NodeDefBuilder("test", "DepthwiseConv2dNative")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("padding", "VALID")
.Attr("data_format", "NCHW")
.Finalize(&op.node_def));
INFER_OK(op, "[1,3,2,2];[1,1,3,4]", "[d0_0,12,2,2]");
}
TEST(CommonShapeFnsTest, AvgPool2DShapeTest) {
ShapeInferenceTestOp op("AvgPool");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "AvgPool")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,1]", "[d0_0,2,2,d0_3]");
set_op({1, 1, 2, 1}, {1, 2, 1, 1}, "VALID", "NHWC");
INFER_OK(op, "[1,4,4,1]", "[d0_0,3,2,d0_3]");
INFER_OK(op, "[1,?,4,1]", "[d0_0,?,2,d0_3]");
INFER_OK(op, "[1,4,?,1]", "[d0_0,3,?,d0_3]");
set_op({{1, 1, 1, 2}}, {1, 1, 2, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,1,4,4]", "[d0_0,d0_1,3,2]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "VALID", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4]", "[d0_0,d0_1,4,6,4]");
INFER_OK(op, "[5,7,?,?,4]", "[d0_0,d0_1,?,?,4]");
INFER_OK(op, "[?,?,?,?,4]", "[d0_0,d0_1,?,?,4]");
INFER_ERROR("must be 4 or 32, but is 3", op, "[2,5,7,11,3]");
INFER_ERROR("Shape must be rank", op, "[4,4]");
}
TEST(CommonShapeFnsTest, MaxPool2DShapeTest) {
ShapeInferenceTestOp op("MaxPool");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPool")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 2}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,2]", "[d0_0,2,2,1]");
set_op({1, 3, 1, 1}, {1, 1, 1, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,7,5,5]", "[d0_0,3,5,5]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "SAME", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[5,7,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[?,?,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_ERROR("must be 4 or 32, but is 8", op, "[2,3,5,7,8]");
}
TEST(CommonShapeFnsTest, MaxPoolV22DShapeTest) {
ShapeInferenceTestOp op("MaxPoolV2");
Tensor ksizes_tensor, strides_tensor;
auto set_op = [&op, &ksizes_tensor, &strides_tensor](
const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding,
const string& data_format) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPoolV2")
.Input("input", 0, DT_FLOAT)
.Input("ksize", 1, DT_INT32)
.Input("strides", 2, DT_INT32)
.Attr("padding", padding)
.Attr("data_format", data_format)
.Finalize(&op.node_def));
ksizes_tensor = test::AsTensor<int32>(ksizes);
op.input_tensors.resize(3);
op.input_tensors[0] = nullptr;
op.input_tensors[1] = &ksizes_tensor;
strides_tensor = test::AsTensor<int32>(strides);
op.input_tensors[2] = &strides_tensor;
};
set_op({1, 1, 1, 1}, {1, 1, 1, 2}, "VALID", "NHWC");
INFER_OK(op, "[1,2,2,2];[4];[4]", "[d0_0,2,2,1]");
set_op({1, 3, 1, 1}, {1, 1, 1, 1}, "VALID", "NCHW");
INFER_OK(op, "[1,7,5,5];[4];[4]", "[d0_0,3,5,5]");
set_op({{1, 1, 1, 1}}, {1, 1, 2, 2}, "SAME", "NCHW_VECT_C");
INFER_OK(op, "[2,3,5,7,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[5,7,?,?,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_OK(op, "[?,?,?,?,4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3,4]");
INFER_ERROR("must be 4 or 32, but is 8", op, "[2,3,5,7,8];[4];[4]");
}
TEST(CommonShapeFnsTest, Pool3DShapeTest) {
ShapeInferenceTestOp op("MaxPool3D");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& ksizes, const string& padding) {
TF_CHECK_OK(NodeDefBuilder("test", "MaxPool3D")
.Input("input", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("ksize", ksizes)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 2, 3, 4, 1}, {1, 1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,24,24,24,1]", "[d0_0,12,8,6,d0_4]");
set_op({1, 1, 3, 4, 1}, {1, 1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,?,24,24,1]", "[d0_0,?,8,6,d0_4]");
}
TEST(CommonShapeFnsTest, UnknownShapeTest) {
{
ShapeInferenceTestOp op("QueueDequeue");
TF_CHECK_OK(NodeDefBuilder("test", "QueueDequeue")
.Input("handle", 0, DT_STRING_REF)
.Attr("component_types", {DT_FLOAT})
.Finalize(&op.node_def));
INFER_OK(op, "[1]", "?");
}
{
ShapeInferenceTestOp op("QueueDequeue");
TF_CHECK_OK(NodeDefBuilder("test", "QueueDequeue")
.Input("handle", 0, DT_STRING_REF)
.Attr("component_types", {DT_FLOAT, DT_FLOAT, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[1]", "?;?;?");
}
}
TEST(CommonShapeFnsTest, Reduce_ShapeFn) {
ShapeInferenceTestOp op("Sum");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Sum")
.Input("input", 0, DT_FLOAT)
.Input("reduction_indices", 1, DT_INT32)
.Attr("keep_dims", false)
.Finalize(&op.node_def));
INFER_OK(op, "[2,4,5];[2]", "?");
INFER_OK(op, "?;[2]", "?");
Tensor indices = test::AsTensor<int32>({1, 2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0]");
indices = test::AsTensor<int32>({-1, -2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0]");
indices = test::AsScalar<int32>(0);
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[]", "[d0_1,d0_2]");
indices = test::AsScalar<int32>(-4);
op.input_tensors[1] = &indices;
INFER_ERROR("Invalid reduction dimension", op, "[2,4,5];[]");
indices = test::AsTensor<int32>({});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[0]", "[d0_0,d0_1,d0_2]");
TF_ASSERT_OK(NodeDefBuilder("test", "Sum")
.Input("input", 0, DT_FLOAT)
.Input("reduction_indices", 1, DT_INT32)
.Attr("keep_dims", true)
.Finalize(&op.node_def));
indices = test::AsTensor<int32>({-1, -2});
op.input_tensors[1] = &indices;
INFER_OK(op, "[2,4,5];[2]", "[d0_0, 1, 1]");
op.input_tensors[1] = nullptr;
INFER_OK(op, "[?,?,?];?", "[?,?,?]");
INFER_OK(op, "[?,?,?];[2]", "[?,?,?]");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "[?,?,?];[?,?]");
op.graph_def_version = 20;
INFER_OK(op, "[?,?,?];[?,?]", "[?,?,?]");
op.input_tensors[1] = &indices;
indices = test::AsTensor<int32>({-1, -2}, TensorShape({2, 1}));
INFER_OK(op, "[2,4,5];[2,1]", "[d0_0, 1, 1]");
indices = test::AsTensor<int32>({-1, -2}, TensorShape({1, 2}));
INFER_OK(op, "[2,4,5];[1,2]", "[d0_0, 1, 1]");
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownShapes) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{Unknown(), Unknown(), Unknown()}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownDims) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1, -1}), S({-1}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidIndicesRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1}), S({-1}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidNumElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({4}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_InvalidRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({4})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
EXPECT_EQ(error::INVALID_ARGUMENT,
ValidateSparseTensor(&c, indices, values, shape).code());
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownNumIndexElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({-1, 3}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownNumValueElements) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({-1}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownIndexRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, -1}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor_UnknownShapeRank) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({-1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ValidateSparseTensor) {
NodeDef def;
InferenceContext c(TF_GRAPH_DEF_VERSION, def, MakeOpDef(3, 1),
{S({5, 3}), S({5}), S({3})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
auto indices = c.input(0);
auto values = c.input(1);
auto shape = c.input(2);
TF_EXPECT_OK(ValidateSparseTensor(&c, indices, values, shape));
}
TEST(CommonShapeFnsTest, ReduceScatterSuccess) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 2}), S({1, 2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
TF_EXPECT_OK(ReduceScatterShape(&c));
ShapeHandle output = c.output(0);
EXPECT_EQ(1, c.Value(c.Dim(output, 0)));
EXPECT_EQ(2, c.Value(c.Dim(output, 1)));
}
TEST(CommonShapeFnsTest, ReduceScatter_MissingScatterDimension) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({2, 2}), S({1, 2}), S({1})}, {}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
TF_EXPECT_OK(ReduceScatterShape(&c));
ShapeHandle output = c.output(0);
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 0)));
EXPECT_FALSE(c.ValueKnown(c.Dim(output, 1)));
}
TEST(CommonShapeFnsTest, ReduceScatter_NotEvenlyDivisible) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 3}), S({1, 2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
EXPECT_THAT(ReduceScatterShape(&c),
tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"Dimension size must be evenly divisible by 2 but is 3"));
}
TEST(CommonShapeFnsTest, ReduceScatter_INVALID_GROUP_ASSIGNMENT) {
OpRegistrationData op_reg_data;
TF_CHECK_OK(OpDefBuilder("XlaReduceScatter")
.Input("input: float")
.Input("group_assignment: int32")
.Input("scatter_dimension: int32")
.Output("output: float")
.Finalize(&op_reg_data));
OpDef op_def = op_reg_data.op_def;
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("test", "XlaReduceScatter")
.Input("input", 0, DT_FLOAT)
.Input("group_assignment", 0, DT_INT32)
.Input("scatter_dimension", 0, DT_INT32)
.Finalize(&def));
const Tensor scatter_dimension = Tensor(0);
InferenceContext c(TF_GRAPH_DEF_VERSION, def, op_def,
{S({3, 3}), S({2}), S({1})},
{nullptr, nullptr, &scatter_dimension}, {}, {});
EXPECT_EQ(3, c.num_inputs());
EXPECT_EQ(1, c.num_outputs());
EXPECT_THAT(ReduceScatterShape(&c),
tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"ReduceScatter group_assignment should be rank 2"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/common_shape_fns.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/common_shape_fns_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bfb04cd2-590c-4ad3-b474-00e48e6de835 | cpp | tensorflow/tensorflow | op_kernel | tensorflow/core/framework/op_kernel.cc | tensorflow/core/framework/op_kernel_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/kernel_def_util.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/platform_strings.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
const char* kJitKernelLabel = "JITCompiledKernel";
const char* kDisableJitKernelsEnvVar = "TF_DISABLE_JIT_KERNELS";
namespace {
Status MatchSignatureHelper(const DataTypeSlice expected_inputs,
const DataTypeSlice expected_outputs,
const DataTypeSlice inputs,
const DataTypeSlice outputs) {
bool signature_mismatch = false;
if (inputs.size() != expected_inputs.size()) signature_mismatch = true;
for (size_t i = 0; !signature_mismatch && i < inputs.size(); ++i) {
if (!TypesCompatible(expected_inputs[i], inputs[i])) {
signature_mismatch = true;
}
}
if (outputs.size() != expected_outputs.size()) signature_mismatch = true;
for (size_t i = 0; !signature_mismatch && i < outputs.size(); ++i) {
if (!TypesCompatible(expected_outputs[i], outputs[i])) {
signature_mismatch = true;
}
}
if (signature_mismatch) {
return errors::InvalidArgument(
"Signature mismatch, have: ", DataTypeSliceString(inputs), "->",
DataTypeSliceString(outputs),
" expected: ", DataTypeSliceString(expected_inputs), "->",
DataTypeSliceString(expected_outputs));
}
return absl::OkStatus();
}
const absl::flat_hash_set<std::string>* GetOpNodeDefsToLogFromEnv() {
auto* result = new absl::flat_hash_set<std::string>;
const char* env = getenv("TF_DEBUG_OPS_TO_LOG_NODEDEFS");
if (!env) {
return result;
}
std::vector<absl::string_view> ops = absl::StrSplit(env, ',');
LOG(INFO) << "Will log NodeDefs from the following ops: ";
for (absl::string_view op : ops) {
result->insert(std::string(op));
LOG(INFO) << " |" << op << "|";
}
return result;
}
bool ShouldLogNodeDef(OpKernel* op_kernel) {
static const absl::flat_hash_set<std::string>& ops_to_log_nodedefs =
*GetOpNodeDefsToLogFromEnv();
return ops_to_log_nodedefs.count(op_kernel->type_string());
}
}
OpKernel::OpKernel(OpKernelConstruction* context) : OpKernel(context, false) {}
OpKernel::OpKernel(OpKernelConstruction* context, bool is_deferred)
: props_(context->props_),
input_memory_types_(context->input_memory_types().begin(),
context->input_memory_types().end()),
output_memory_types_(context->output_memory_types().begin(),
context->output_memory_types().end()),
input_name_map_(context->num_inputs()),
output_name_map_(context->num_outputs()),
name_view_(props_->node_def.name()),
type_string_view_(props_->node_def.op()),
graph_def_version_(context->graph_def_version()),
is_deferred_(is_deferred) {
OP_REQUIRES_OK(context,
NameRangesForNode(props_->node_def, *props_->op_def,
&input_name_map_, &output_name_map_));
OP_REQUIRES_OK(context, CheckOpDeprecation(*props_->op_def,
context->graph_def_version()));
expensive_ = context->device_type() != DeviceType(DEVICE_GPU) &&
!DeviceFactory::IsPluggableDevice(
DeviceTypeString(context->device_type()));
if (ShouldLogNodeDef(this)) {
LOG(INFO) << "NodeDef for " << name() << ":\n" << def().ShortDebugString();
}
}
OpKernel::OpKernel(OpKernelConstruction* context, NodeDef&& custom_def,
bool is_deferred)
: props_(std::make_shared<const NodeProperties>(
context->props_->op_def, std::move(custom_def),
context->props_->input_types, context->props_->output_types)),
input_memory_types_(context->input_memory_types().begin(),
context->input_memory_types().end()),
output_memory_types_(context->output_memory_types().begin(),
context->output_memory_types().end()),
input_name_map_(context->num_inputs()),
output_name_map_(context->num_outputs()),
name_view_(props_->node_def.name()),
type_string_view_(props_->node_def.op()),
graph_def_version_(context->graph_def_version()),
is_deferred_(is_deferred) {
OP_REQUIRES_OK(context,
NameRangesForNode(props_->node_def, *props_->op_def,
&input_name_map_, &output_name_map_));
OP_REQUIRES_OK(context, CheckOpDeprecation(*props_->op_def,
context->graph_def_version()));
expensive_ = context->device_type() != DeviceType(DEVICE_GPU) &&
!DeviceFactory::IsPluggableDevice(
DeviceTypeString(context->device_type()));
}
OpKernel::~OpKernel() {}
Status OpKernel::InputRange(StringPiece input_name, int* start,
int* stop) const {
const auto result = input_name_map_.find(input_name);
if (result == input_name_map_.end()) {
return errors::InvalidArgument("Unknown input name: ", input_name);
} else {
*start = result->second.first;
*stop = result->second.second;
return absl::OkStatus();
}
}
Status OpKernel::OutputRange(StringPiece output_name, int* start,
int* stop) const {
const auto result = output_name_map_.find(output_name);
if (result == output_name_map_.end()) {
return errors::InvalidArgument("Unknown output name: ", output_name);
} else {
*start = result->second.first;
*stop = result->second.second;
return absl::OkStatus();
}
}
string OpKernel::ShapeTraceString(const OpKernelContext& ctx) const {
int num_inputs = ctx.num_inputs();
if (num_inputs == 0) return "";
std::vector<string> tensor_shapes;
tensor_shapes.reserve(num_inputs);
for (int i = 0; i < num_inputs; i++) {
if (!ctx.has_input(i)) {
tensor_shapes.emplace_back();
continue;
}
DataType input_dtype = ctx.input_dtype(i);
if (input_dtype == DataType::DT_RESOURCE ||
input_dtype == DataType::DT_VARIANT || IsRefType(input_dtype)) {
tensor_shapes.emplace_back();
continue;
}
tensor_shapes.emplace_back(strings::StrCat(
DataTypeString(input_dtype), ctx.input(i).shape().DebugString()));
}
return strings::StrCat("(", absl::StrJoin(tensor_shapes, ";"), ")");
}
string OpKernel::TraceString(const OpKernelContext& ctx, bool verbose) const {
string trace_string =
tsl::profiler::TraceMeOp(name_view(), type_string_view());
if (verbose) {
string shape = ShapeTraceString(ctx);
if (!shape.empty()) {
trace_string = tsl::profiler::TraceMeEncode(std::move(trace_string),
{{"shape", shape}});
}
}
return trace_string;
}
void AsyncOpKernel::Compute(OpKernelContext* context) {
Notification n;
ComputeAsync(context, [&n]() { n.Notify(); });
n.WaitForNotification();
}
OpKernelConstruction::OpKernelConstruction(
DeviceType device_type, DeviceBase* device, Allocator* allocator,
FunctionLibraryRuntime* flib, ResourceMgr* resource_mgr,
const std::shared_ptr<const NodeProperties>& props,
const MemoryTypeSlice& input_memory_types,
const MemoryTypeSlice& output_memory_types, int graph_def_version,
Status* status)
: device_type_(std::move(device_type)),
device_(device),
allocator_(allocator),
flib_(flib),
resource_mgr_(resource_mgr),
props_(props),
input_memory_types_(input_memory_types),
output_memory_types_(output_memory_types),
graph_def_version_(graph_def_version),
status_(status) {}
bool OpKernelConstruction::HasAttr(StringPiece attr_name) const {
return HasNodeAttr(def(), attr_name);
}
void OpKernelConstruction::SetStatus(const Status& status) {
status_->Update(status);
}
Status OpKernelConstruction::MatchSignature(
const DataTypeSlice expected_inputs, const DataTypeSlice expected_outputs) {
return MatchSignatureHelper(expected_inputs, expected_outputs,
props_->input_types, props_->output_types);
}
Status OpKernelConstruction::allocate_temp(DataType type,
const TensorShape& shape,
Tensor* out_temp) {
AllocationAttributes attr;
attr.allocation_will_be_logged = true;
Tensor new_temp(allocator_, type, shape, attr);
if (!new_temp.IsInitialized()) {
return errors::ResourceExhausted(
"OOM when allocating temporary tensor with shape", shape.DebugString());
}
if (LogMemory::IsEnabled()) {
LogMemory::RecordTensorAllocation(
def().name(), LogMemory::OP_KERNEL_CONSTRUCTION_STEP_ID, new_temp);
}
*out_temp = new_temp;
return absl::OkStatus();
}
Status OpKernelConstruction::allocate_temp(DataType type,
const TensorShape& shape,
Tensor* out_temp,
AllocatorAttributes allocator_attr) {
if (allocator_attr.scope_id != 0) {
return errors::InvalidArgument(
"ScopedAllocator cannot be used via OpKernelConstruction.");
}
Allocator* a = device_->GetAllocator(allocator_attr);
AllocationAttributes attr;
attr.allocation_will_be_logged = true;
Tensor new_temp(a, type, shape, attr);
if (!new_temp.IsInitialized()) {
return errors::ResourceExhausted(
"OOM when allocating temporary tensor with shape", shape.DebugString());
}
if (LogMemory::IsEnabled()) {
LogMemory::RecordTensorAllocation(
def().name(), LogMemory::OP_KERNEL_CONSTRUCTION_STEP_ID, new_temp);
}
*out_temp = new_temp;
return absl::OkStatus();
}
const int OpKernelContext::Params::kNeverForward;
const int OpKernelContext::Params::kNoReservation;
OpKernelContext::OpKernelContext(Params* params)
: OpKernelContext(
params, static_cast<int>(params->op_kernel->output_types().size())) {}
OpKernelContext::OpKernelContext(Params* params, int num_outputs)
: params_(params), outputs_(num_outputs) {
if (params_->track_allocations) {
tracking_state_ = std::make_unique<TrackingState>();
}
params_->ensure_eigen_gpu_device();
if (params_->eigen_gpu_device != nullptr) {
Allocator* eigen_gpu_allocator = get_allocator(AllocatorAttributes());
Status s = params_->device->ReinitializeGpuDevice(
this, params_->eigen_gpu_device, params_->op_device_context,
eigen_gpu_allocator);
if (!s.ok()) {
SetStatus(s);
}
}
}
OpKernelContext::~OpKernelContext() {
for (TensorValue& value : outputs_) {
if (!value.is_ref()) {
delete value.tensor;
}
}
if (params_->track_allocations &&
!tracking_state_->wrapped_allocators.empty()) {
LOG(WARNING) << "OpKernelContext is tracking allocations but they are not "
<< "being consumed by the StepStatsCollector.";
for (auto& wrapped_allocator : tracking_state_->wrapped_allocators) {
wrapped_allocator.second->GetRecordsAndUnRef();
}
}
}
Allocator* OpKernelContext::get_allocator(AllocatorAttributes attr) {
Allocator* allocator = nullptr;
if (TF_PREDICT_FALSE(attr.scope_id > 0)) {
allocator = params_->device->GetScopedAllocator(attr, step_id());
CHECK(allocator);
} else {
allocator = params_->device->GetAllocator(attr);
}
if (TF_PREDICT_FALSE(track_allocations())) {
DCHECK(tracking_state_);
mutex_lock lock(tracking_state_->mu);
for (const auto& wrapped : tracking_state_->wrapped_allocators) {
if (wrapped.first == allocator) {
return wrapped.second;
}
}
TrackingAllocator* wrapped_allocator =
new TrackingAllocator(allocator, params_->track_allocations);
tracking_state_->wrapped_allocators.push_back(
std::make_pair(allocator, wrapped_allocator));
return wrapped_allocator;
} else {
return allocator;
}
}
void OpKernelContext::SetStatus(const Status& status) {
status_.Update(status);
}
Status OpKernelContext::input(StringPiece name, const Tensor** tensor) {
int index;
TF_RETURN_IF_ERROR(get_input_index(name, &index));
if (input_is_ref(index)) {
return errors::InvalidArgument("OpKernel used ref input name '", name,
"' when non-ref input was expected");
}
*tensor = params_->inputs[index].tensor;
return absl::OkStatus();
}
Status OpKernelContext::input_dtype(StringPiece name, DataType* dtype) const {
int index;
TF_RETURN_IF_ERROR(get_input_index(name, &index));
const TensorValue& value(params_->inputs[index]);
*dtype = value.dtype();
return absl::OkStatus();
}
Status OpKernelContext::input_ref_mutex(StringPiece name, mutex** out_mutex) {
int index;
TF_RETURN_IF_ERROR(get_input_index(name, &index));
*out_mutex = input_ref_mutex(index);
return absl::OkStatus();
}
absl::StatusOr<const Tensor*> OpKernelContext::get_input(int index) const {
if (index < 0 || index >= num_inputs() || input_is_ref(index)) {
return absl::InvalidArgumentError(
absl::StrCat("Given index was ", index,
", but index of input must be greater than "
"0, less than the number of inputs (",
num_inputs(), "), and not a ref."));
}
return params_->inputs[index].tensor;
}
const Tensor& OpKernelContext::input(int index) const {
CHECK_GE(index, 0);
CHECK_LT(index, num_inputs()) << " name: " << op_kernel().name();
CHECK(!input_is_ref(index));
const Tensor& tensor = *params_->inputs[index].tensor;
return tensor;
}
Tensor OpKernelContext::mutable_input(int index, bool lock_held) {
CHECK_GE(index, 0);
CHECK_LT(index, num_inputs());
CHECK(input_is_ref(index));
if (lock_held) {
Tensor& tensor = *params_->inputs[index].tensor;
return tensor;
} else {
tf_shared_lock l(*input_ref_mutex(index));
Tensor& tensor = *params_->inputs[index].tensor;
return tensor;
}
}
void OpKernelContext::replace_ref_input(int index, const Tensor& tensor,
bool lock_held) {
CHECK_GE(index, 0);
CHECK_LT(index, num_inputs());
CHECK(input_is_ref(index));
if (lock_held) {
*params_->inputs[index].tensor = tensor;
} else {
mutex_lock l(*input_ref_mutex(index));
*params_->inputs[index].tensor = tensor;
}
}
void OpKernelContext::forward_ref_input_to_ref_output(int input_index,
int output_index) {
CHECK_GE(input_index, 0);
CHECK_LT(input_index, num_inputs());
CHECK(input_is_ref(input_index));
set_output_ref(output_index, params_->inputs[input_index].mutex_if_ref,
params_->inputs[input_index].tensor);
}
bool OpKernelContext::forward_input_to_output_with_shape(
int input_index, int output_index, const TensorShape& output_shape,
Tensor** output) {
const auto output_attr = params_->output_attr_array == nullptr
? AllocatorAttributes()
: output_alloc_attr(output_index);
std::unique_ptr<Tensor> new_tensor = forward_input(
input_index, output_index, expected_output_dtype(output_index),
output_shape, output_memory_type(output_index), output_attr);
if (new_tensor != nullptr) {
outputs_[output_index] = TensorValue(new_tensor.release());
*output = outputs_[output_index].tensor;
return true;
} else {
return false;
}
}
Status OpKernelContext::forward_input_to_output_with_shape(
StringPiece input_name, StringPiece output_name,
const TensorShape& output_shape, Tensor** output) {
int input_index, output_index;
TF_RETURN_IF_ERROR(get_input_index(input_name, &input_index));
TF_RETURN_IF_ERROR(get_output_index(output_name, &output_index));
if (!forward_input_to_output_with_shape(input_index, output_index,
output_shape, output)) {
return errors::FailedPrecondition("OpKernel could not forward input '",
input_name, "' to output '", output_name);
}
return absl::OkStatus();
}
std::unique_ptr<Tensor> OpKernelContext::forward_input(
int input_index, int output_index, DataType output_dtype,
const TensorShape& output_shape, MemoryType output_memory_type,
const AllocatorAttributes& output_attr) {
CHECK_GE(input_index, 0);
CHECK_LT(input_index, num_inputs());
const TensorValue& input = params_->inputs[input_index];
bool never_forward =
(params_->forward_from_array != nullptr && output_index >= 0 &&
params_->forward_from_array[output_index] == Params::kNeverForward);
if (never_forward) return nullptr;
bool forward_expected =
(params_->forward_from_array != nullptr && output_index >= 0 &&
params_->forward_from_array[output_index] == input_index);
if (!forward_expected && params_->forward_from_array != nullptr) {
for (int i = 0; i < num_outputs(); ++i) {
if (params_->forward_from_array[i] == input_index) {
return nullptr;
}
}
}
if (input.tensor == nullptr || input.is_ref()) {
CHECK(!forward_expected);
return nullptr;
}
if (input_dtype(input_index) != output_dtype) {
CHECK(!forward_expected);
return nullptr;
}
if (input.tensor->shape().num_elements() != output_shape.num_elements()) {
CHECK(!forward_expected);
return nullptr;
}
if (input_memory_type(input_index) != output_memory_type) {
CHECK(!forward_expected);
return nullptr;
}
if (!forward_expected) {
if (!input->RefCountIsOne()) {
return nullptr;
}
const auto input_attr = params_->input_alloc_attrs.empty()
? AllocatorAttributes()
: input_alloc_attr(input_index);
if (!output_attr.IsEqualOrLessRestrictiveThan(input_attr)) {
return nullptr;
}
}
auto output_tensor = std::make_unique<Tensor>();
CHECK(output_tensor->CopyFrom(*input.tensor, output_shape));
return output_tensor;
}
Status OpKernelContext::forward_input_or_allocate_temp(
absl::Span<const int> candidate_input_indices, DataType type,
const TensorShape& shape, const AllocatorAttributes& allocator_attr,
Tensor* out_temp) {
for (int input_index : candidate_input_indices) {
std::unique_ptr<Tensor> new_tensor =
forward_input(input_index, Params::kNoReservation ,
type, shape, DEVICE_MEMORY, allocator_attr);
if (new_tensor != nullptr) {
*out_temp = std::move(*new_tensor);
return absl::OkStatus();
}
}
return allocate_temp(type, shape, out_temp, allocator_attr);
}
Status OpKernelContext::forward_input_or_allocate_output(
absl::Span<const int> candidate_input_indices, int output_index,
const TensorShape& output_shape, Tensor** output, int* forwarded_input) {
for (int input_index : candidate_input_indices) {
if (forward_input_to_output_with_shape(input_index, output_index,
output_shape, output)) {
if (forwarded_input != nullptr) {
*forwarded_input = input_index;
}
return absl::OkStatus();
}
}
if (forwarded_input != nullptr) {
*forwarded_input = -1;
}
return allocate_output(output_index, output_shape, output);
}
Status OpKernelContext::forward_input_or_allocate_output(
absl::Span<const StringPiece> candidate_input_names,
StringPiece output_name, const TensorShape& output_shape, Tensor** output) {
for (const StringPiece& input_name : candidate_input_names) {
if (forward_input_to_output_with_shape(input_name, output_name,
output_shape, output)
.ok()) {
return absl::OkStatus();
}
}
return allocate_output(output_name, output_shape, output);
}
void OpKernelContext::delete_ref_input(int index, bool lock_held) {
CHECK_GE(index, 0);
CHECK_LT(index, num_inputs());
CHECK(input_is_ref(index));
if (lock_held) {
delete params_->inputs[index].tensor;
} else {
mutex_lock l(*input_ref_mutex(index));
delete params_->inputs[index].tensor;
}
}
Status OpKernelContext::mutable_input(StringPiece name, Tensor* tensor,
bool lock_held) {
int index;
TF_RETURN_IF_ERROR(get_input_index(name, &index));
if (!input_is_ref(index)) {
return errors::InvalidArgument("OpKernel used non-ref input name '", name,
"' when ref input was expected");
}
if (lock_held) {
*tensor = *params_->inputs[index].tensor;
} else {
tf_shared_lock l(*input_ref_mutex(index));
*tensor = *params_->inputs[index].tensor;
}
return absl::OkStatus();
}
Status OpKernelContext::replace_ref_input(StringPiece name,
const Tensor& tensor,
bool lock_held) {
int index;
TF_RETURN_IF_ERROR(get_input_index(name, &index));
if (!input_is_ref(index)) {
return errors::InvalidArgument("OpKernel used immutable input name '", name,
"' when ref input was expected");
}
replace_ref_input(index, tensor, lock_held);
return absl::OkStatus();
}
Status OpKernelContext::input_list(StringPiece name, OpInputList* list) {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop));
*list = OpInputList(this, start, stop);
return absl::OkStatus();
}
Status OpKernelContext::mutable_input_list(StringPiece name,
OpMutableInputList* list) {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop));
*list = OpMutableInputList(this, start, stop);
return absl::OkStatus();
}
Status OpKernelContext::output_list(StringPiece name, OpOutputList* list) {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->OutputRange(name, &start, &stop));
*list = OpOutputList(this, start, stop);
return absl::OkStatus();
}
void OpKernelContext::maybe_initialize_scope_id_set() {
if (allocated_scope_ids_ == nullptr) {
allocated_scope_ids_ = std::make_unique<std::unordered_set<int32>>();
}
}
Status OpKernelContext::allocate_output(int index, const TensorShape& shape,
Tensor** tensor) {
if (index < 0) {
return errors::Internal("allocate_output with bad index=", index,
" kernel=", params_->op_kernel->name());
}
if (index >= num_outputs()) {
return errors::Internal("allocate_output with bad index=", index,
" num_outputs=", num_outputs(),
" kernel=", params_->op_kernel->name());
}
bool forward_expected =
(params_->forward_from_array != nullptr && index >= 0 &&
params_->forward_from_array[index] >= 0);
if (forward_expected) {
return errors::Internal(
"Explicit allocate_output call where input forwarding required. Try "
"turning off the ScopedAllocator optimizer.");
}
AllocatorAttributes attr = output_alloc_attr(index);
return allocate_output(index, shape, tensor, attr);
}
Status OpKernelContext::allocate_output(StringPiece name,
const TensorShape& shape,
Tensor** tensor) {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->OutputRange(name, &start, &stop));
if (stop != start + 1) {
return errors::InvalidArgument("OpKernel used list-valued output name '",
name,
"' when single-valued output was "
"expected");
}
return allocate_output(start, shape, tensor);
}
Status OpKernelContext::allocate_output(StringPiece name,
const TensorShape& shape,
Tensor** tensor,
AllocatorAttributes attr) {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->OutputRange(name, &start, &stop));
if (stop != start + 1) {
return errors::InvalidArgument("OpKernel used list-valued output name '",
name,
"' when single-valued output was "
"expected");
}
return allocate_output(start, shape, tensor, attr);
}
Status OpKernelContext::allocate_tensor(
DataType type, const TensorShape& shape, Tensor* out_tensor,
AllocatorAttributes attr, const AllocationAttributes& allocation_attr) {
Allocator* a = get_allocator(attr);
Tensor new_tensor(
a, type, shape,
AllocationAttributes(
allocation_attr.retry_on_failure,
true, allocation_attr.freed_by_func));
if (!new_tensor.IsInitialized()) {
return errors::ResourceExhausted(
"OOM when allocating tensor with shape", shape.DebugString(),
" and type ", DataTypeString(type), " on ", params_->device->name(),
" by allocator ", a->Name());
}
if (params_->log_memory) {
LogMemory::RecordTensorAllocation(params_->op_kernel->name(),
params_->step_id, new_tensor);
}
*out_tensor = std::move(new_tensor);
return absl::OkStatus();
}
Status OpKernelContext::allocate_output(int index, const TensorShape& shape,
Tensor** output,
AllocatorAttributes attr) {
if (index < 0) {
return errors::Internal("allocate_output with bad index=", index,
" kernel=", params_->op_kernel->name());
}
if (index >= num_outputs()) {
return errors::Internal("allocate_output with bad index=", index,
" num_outputs=", outputs_.size(),
" kernel=", params_->op_kernel->name());
}
const DataType type = params_->op_kernel->output_type(index);
if (IsRefType(type)) {
return errors::Internal("allocate_output with ref type. index=", index,
" type=", type,
" kernel=", params_->op_kernel->name());
}
if (mutable_output(index) != nullptr) {
return errors::Internal("allocate_output on same index multiple times.",
" index = ", index,
" mutable_output(index) = ", mutable_output(index),
" kernel=", params_->op_kernel->name());
}
if (attr.scope_id > 0) {
maybe_initialize_scope_id_set();
if (!allocated_scope_ids_->insert(attr.scope_id).second) {
return errors::Internal(
"OpKernel ", params_->op_kernel->name(),
" called allocate_output at index ", index, " with scope_id ",
attr.scope_id,
" more than once. Try turning off the ScopedAllocator optimizer.");
}
}
profiler::ScopedMemoryDebugAnnotation op_annotation(
op_kernel().name_view().data(), step_id(), "output", type,
[&shape]() { return shape.DebugString(); });
auto output_tensor = std::make_unique<Tensor>();
Status s = allocate_tensor(type, shape, output_tensor.get(), attr);
if (s.ok()) {
outputs_[index] = TensorValue(output_tensor.release());
*output = outputs_[index].tensor;
}
return s;
}
Status OpKernelContext::allocate_temp(
DataType type, const TensorShape& shape, Tensor* out_temp,
AllocatorAttributes allocator_attr,
const AllocationAttributes& allocation_attr) {
if (allocator_attr.scope_id > 0) {
VLOG(2) << "Warning: OpKernel " << params_->op_kernel->name()
<< " called allocate_temp with scope_id " << allocator_attr.scope_id
<< ". Switch to allocate_output to avoid performance penalty.";
allocator_attr.scope_id = -1;
}
profiler::ScopedMemoryDebugAnnotation op_annotation(
op_kernel().name_view().data(), step_id(), "temp", type,
[&shape]() { return shape.DebugString(); });
Status s =
allocate_tensor(type, shape, out_temp, allocator_attr, allocation_attr);
if (track_allocations() && s.ok() && out_temp->TotalBytes() > 0) {
Allocator* a = get_allocator(allocator_attr);
if (a->TracksAllocationSizes()) {
int64_t alloc_size = a->AllocatedSize(out_temp->tensor_data().data());
record_temp_memory_allocation(alloc_size, *out_temp);
}
} else if (record_memory_consumption_) {
DCHECK(tracking_state_);
mutex_lock l(tracking_state_->stats_mu);
tracking_state_->temp_memory_allocated += out_temp->TotalBytes();
}
return s;
}
Status OpKernelContext::allocate_temp(DataType type, const TensorShape& shape,
Tensor* out_temp,
AllocatorAttributes allocator_attr) {
return allocate_temp(type, shape, out_temp, allocator_attr,
AllocationAttributes());
}
Status OpKernelContext::allocate_temp(DataType type, const TensorShape& shape,
Tensor* out_temp) {
return allocate_temp(type, shape, out_temp, AllocatorAttributes());
}
Status OpKernelContext::get_input_index(StringPiece name,
int* out_index) const {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->InputRange(name, &start, &stop));
if (stop != start + 1) {
return errors::InvalidArgument("OpKernel used list-valued input name '",
name,
"' when single-valued input was "
"expected");
}
*out_index = start;
return absl::OkStatus();
}
Status OpKernelContext::get_output_index(StringPiece name,
int* out_index) const {
int start, stop;
TF_RETURN_IF_ERROR(params_->op_kernel->OutputRange(name, &start, &stop));
if (stop != start + 1) {
return errors::InvalidArgument("OpKernel used list-valued output name '",
name,
"' when single-valued output was "
"expected");
}
*out_index = start;
return absl::OkStatus();
}
Status OpKernelContext::set_output(StringPiece name, const Tensor& tensor) {
int index;
TF_RETURN_IF_ERROR(get_output_index(name, &index));
set_output(index, tensor);
return absl::OkStatus();
}
Status OpKernelContext::set_output(StringPiece name, Tensor&& tensor) {
int index;
TF_RETURN_IF_ERROR(get_output_index(name, &index));
set_output(index, std::move(tensor));
return absl::OkStatus();
}
bool OpKernelContext::maybe_set_output_by_allocate_and_copy(
int index, const Tensor& tensor) {
bool allocate_and_copy = false;
const bool never_forward =
(params_->forward_from_array != nullptr &&
params_->forward_from_array[index] == Params::kNeverForward);
if (TF_PREDICT_FALSE(never_forward)) {
maybe_initialize_scope_id_set();
if (allocated_scope_ids_->find(output_alloc_attr(index).scope_id) ==
allocated_scope_ids_->end()) {
allocate_and_copy = true;
} else {
LOG(WARNING)
<< "OpKernel " << params_->op_kernel->name()
<< " called both allocate_output and set_output with scope_id "
<< output_alloc_attr(index).scope_id;
}
}
if (TF_PREDICT_FALSE(allocate_and_copy)) {
VLOG(1) << "OpKernelContext set_output index " << index << " tensor "
<< tensor.DebugString() << " never_forward " << never_forward
<< " params_->forward_from_array[index] "
<< params_->forward_from_array[index] << " alloc_attr.scope_id "
<< output_alloc_attr(index).scope_id;
profiler::ScopedMemoryDebugAnnotation op_annotation(
op_kernel().name_view().data(), step_id(), "output", tensor.dtype(),
[&tensor]() { return tensor.shape().DebugString(); });
auto new_tensor = std::make_unique<Tensor>();
Status s = allocate_tensor(tensor.dtype(), tensor.shape(), new_tensor.get(),
output_alloc_attr(index));
TF_CHECK_OK(s);
device()->CopyTensorInSameDevice(&tensor, new_tensor.get(),
op_device_context(), [](const Status&) {});
outputs_[index] = TensorValue(new_tensor.release());
}
return allocate_and_copy;
}
void OpKernelContext::maybe_track_allocations_for_set_output(
const Tensor& tensor) {
if (TF_PREDICT_FALSE(track_allocations()) && tensor.TotalBytes() > 0) {
DCHECK(tracking_state_);
mutex_lock l(tracking_state_->stats_mu);
const auto it = std::find_if(
tracking_state_->temp_tensor_buffer_and_size.begin(),
tracking_state_->temp_tensor_buffer_and_size.end(),
[&tensor](const std::pair<const void*, int64>& e) {
return e.first ==
static_cast<const void*>(tensor.tensor_data().data());
});
if (it != tracking_state_->temp_tensor_buffer_and_size.end()) {
tracking_state_->temp_memory_allocated -= it->second;
tracking_state_->temp_tensor_buffer_and_size.erase(it);
}
}
}
void OpKernelContext::set_output(int index, const Tensor& tensor) {
CHECK_GE(index, 0);
CHECK_LT(index, outputs_.size());
const DataType type = params_->op_kernel->output_type(index);
CHECK(!IsRefType(type));
CHECK_EQ(outputs_[index].tensor, nullptr);
if (TF_PREDICT_TRUE(!maybe_set_output_by_allocate_and_copy(index, tensor))) {
outputs_[index] = TensorValue(new Tensor(tensor));
maybe_track_allocations_for_set_output(*outputs_[index].tensor);
}
}
void OpKernelContext::set_output(int index, Tensor&& tensor) {
CHECK_GE(index, 0);
CHECK_LT(index, outputs_.size());
const DataType type = params_->op_kernel->output_type(index);
CHECK(!IsRefType(type));
CHECK_EQ(outputs_[index].tensor, nullptr);
if (TF_PREDICT_TRUE(!maybe_set_output_by_allocate_and_copy(index, tensor))) {
outputs_[index] = TensorValue(new Tensor(std::move(tensor)));
maybe_track_allocations_for_set_output(*outputs_[index].tensor);
}
}
void OpKernelContext::set_output_ref(int index, mutex* mu,
Tensor* tensor_for_ref) {
CHECK_GE(index, 0);
CHECK_LT(index, outputs_.size());
CHECK(IsRefType(params_->op_kernel->output_type(index)));
outputs_[index] = TensorValue(mu, tensor_for_ref);
}
Status OpKernelContext::set_output_ref(StringPiece name, mutex* mu,
Tensor* tensor_for_ref) {
int index;
TF_RETURN_IF_ERROR(get_output_index(name, &index));
set_output_ref(index, mu, tensor_for_ref);
return absl::OkStatus();
}
Status OpKernelContext::mutable_output(StringPiece name, Tensor** tensor) {
int index;
TF_RETURN_IF_ERROR(get_output_index(name, &index));
*tensor = mutable_output(index);
return absl::OkStatus();
}
bool OpKernelContext::ValidateInputsAreSameShape(OpKernel* op) {
const auto& inputs = params_->inputs;
for (size_t i = 1; i < inputs.size(); ++i) {
if (!inputs[0]->IsSameSize(*(inputs[i].tensor))) {
SetStatus(errors::InvalidArgument(
"Inputs to operation ", op->name(), " of type ", op->type_string(),
" must have the same size and shape. Input 0: ",
inputs[0]->shape().DebugString(), " != input ", i, ": ",
inputs[i]->shape().DebugString()));
return false;
}
}
return true;
}
Status OpKernelContext::MatchSignature(const DataTypeSlice expected_inputs,
const DataTypeSlice expected_outputs) {
DataTypeVector inputs;
for (const TensorValue& t : params_->inputs) {
inputs.push_back(t.dtype());
}
DataTypeVector outputs = params_->op_kernel->output_types();
return MatchSignatureHelper(expected_inputs, expected_outputs, inputs,
outputs);
}
void OpKernelContext::record_temp_memory_allocation(int64_t size,
const Tensor& t) {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
tracking_state_->temp_memory_allocated += size;
tracking_state_->temp_tensor_buffer_and_size.emplace_back(
static_cast<const void*>(t.tensor_data().data()), size);
}
}
int64_t OpKernelContext::temp_memory_allocated() const {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
return tracking_state_->temp_memory_allocated;
} else {
return 0;
}
}
void OpKernelContext::record_persistent_memory_allocation(int64_t size,
int64_t alloc_id) {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
tracking_state_->persistent_memory_allocated += size;
if (alloc_id >= 0) {
tracking_state_->persistent_alloc_ids.push_back(alloc_id);
}
}
}
int64_t OpKernelContext::persistent_memory_allocated() const {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
return tracking_state_->persistent_memory_allocated;
} else {
return 0;
}
}
std::vector<int64_t> OpKernelContext::persistent_alloc_ids() const {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
return std::vector<int64_t>(tracking_state_->persistent_alloc_ids.begin(),
tracking_state_->persistent_alloc_ids.end());
} else {
return std::vector<int64_t>();
}
}
void OpKernelContext::clear_recorded_memory() {
if (tracking_state_) {
mutex_lock l(tracking_state_->stats_mu);
tracking_state_->temp_memory_allocated = 0;
tracking_state_->persistent_memory_allocated = 0;
tracking_state_->temp_tensor_buffer_and_size.clear();
tracking_state_->persistent_alloc_ids.clear();
}
}
void OpKernelContext::set_record_memory_consumption(bool v) {
record_memory_consumption_ = v;
if (v && !tracking_state_) {
tracking_state_ = std::make_unique<TrackingState>();
}
}
const string& OpKernelContext::executor_type() const {
if (params_->executor_type) {
return *params_->executor_type;
} else {
static const string& kEmptyString = *new string("");
return kEmptyString;
}
}
struct KernelRegistration {
KernelRegistration(const KernelDef& d, StringPiece c,
std::unique_ptr<kernel_factory::OpKernelFactory> f)
: def(d), kernel_class_name(c), factory(std::move(f)) {}
const KernelDef def;
const string kernel_class_name;
std::unique_ptr<kernel_factory::OpKernelFactory> factory;
};
struct KernelRegistry {
mutex mu;
std::unordered_multimap<string, KernelRegistration> registry
TF_GUARDED_BY(mu);
};
#if defined(_WIN32)
static const char kKernelLibPattern[] = "libtfkernel*.dll";
#elif defined(__APPLE__)
static const char kKernelLibPattern[] = "libtfkernel*.dylib";
#else
static const char kKernelLibPattern[] = "libtfkernel*.so";
#endif
#define FEATURE(x) \
{ x, #x }
static Status IsProbablySafeToLoad(const string& path) {
using port::CPUFeature;
static const auto* feature_map =
new std::map<string, std::pair<CPUFeature, string>>{
{"__AVX512VL__=1", FEATURE(CPUFeature::AVX512VL)},
};
std::vector<std::string> platform_strings;
int result = GetPlatformStrings(path, &platform_strings);
if (result) {
return Status(absl::StatusCode::kUnknown, strerror(result));
}
if (platform_strings.empty()) {
return Status(absl::StatusCode::kFailedPrecondition,
"Didn't find any platform strings");
}
std::vector<std::string> missing_features;
for (const auto& platform_string : platform_strings) {
const auto& entry = feature_map->find(platform_string);
if (entry != feature_map->end() &&
!port::TestCPUFeature(entry->second.first)) {
missing_features.emplace_back(entry->second.second);
}
}
if (!missing_features.empty()) {
string errmsg = "Missing CPU features: ";
errmsg.append(absl::StrJoin(missing_features, ", "));
return errors::FailedPrecondition(errmsg);
}
return absl::OkStatus();
}
void LoadDynamicKernelsInternal() {
Env* env = Env::Default();
char* _abi_check_env_var = getenv("TF_REALLY_LOAD_UNSAFE_PACKAGES");
bool override_abi_check = false;
if (_abi_check_env_var != nullptr) {
override_abi_check = strcmp(_abi_check_env_var, "1") == 0;
}
string bazel_kernel_dir =
io::JoinPath(env->GetRunfilesDir(), "tensorflow", "core", "kernels");
std::vector<string> files;
Status s_kernel_dir = env->GetChildren(bazel_kernel_dir, &files);
if (s_kernel_dir.ok()) {
string dll_spec = io::JoinPath(bazel_kernel_dir, kKernelLibPattern);
for (const auto& file : files) {
string fullpath = io::JoinPath(bazel_kernel_dir, file);
if (env->MatchPath(fullpath, dll_spec)) {
Status s = IsProbablySafeToLoad(fullpath);
if (!s.ok() && override_abi_check) {
LOG(WARNING) << "Loading UNSAFE library " << fullpath
<< " because ABI check override is set: " << s.message();
}
if (s.ok() || override_abi_check) {
void* unused_filehandle;
TF_CHECK_OK(
env->LoadDynamicLibrary(fullpath.c_str(), &unused_filehandle));
} else {
LOG(WARNING) << "Not loading plugin library " << fullpath << ": "
<< s.message();
}
}
}
}
}
void LoadDynamicKernels() {
static absl::once_flag dll_loader_flag;
absl::call_once(dll_loader_flag, LoadDynamicKernelsInternal);
}
static string Key(StringPiece op_type, const DeviceType& device_type,
StringPiece label) {
return strings::StrCat(op_type, ":", DeviceTypeString(device_type), ":",
label);
}
void SetupOrDisableJit(KernelRegistry* registry) {
std::unordered_multimap<string, KernelRegistration> jit_kernels;
bool remove_jit_kernels = absl::StrContains(
absl::NullSafeStringView(getenv(kDisableJitKernelsEnvVar)), "1");
mutex_lock l(registry->mu);
std::unordered_multimap<string, KernelRegistration>& all_kernels =
registry->registry;
auto it = all_kernels.begin();
while (it != all_kernels.end()) {
if (absl::StrContains(it->second.def.label(), kJitKernelLabel)) {
KernelDef def_without_label = it->second.def;
def_without_label.set_label("");
if (!remove_jit_kernels) {
jit_kernels.emplace(
Key(def_without_label.op(),
DeviceType(def_without_label.device_type()),
def_without_label.label()),
KernelRegistration(def_without_label, it->second.kernel_class_name,
std::move(it->second.factory)));
}
it = all_kernels.erase(it);
} else {
it++;
}
}
for (auto& jit_kernel : jit_kernels) {
all_kernels.insert(std::move(jit_kernel));
}
}
namespace register_kernel {
Name::Name(const char* op) : KernelDefBuilder(op) {}
}
void* GlobalKernelRegistry() {
static KernelRegistry* global_kernel_registry = []() {
KernelRegistry* registry = new KernelRegistry;
OpRegistry::Global()->RegisterValidator(ValidateKernelRegistrations);
return registry;
}();
return global_kernel_registry;
}
static KernelRegistry* GlobalKernelRegistryTyped() {
#ifdef AUTOLOAD_DYNAMIC_KERNELS
LoadDynamicKernels();
#endif
auto* registry = reinterpret_cast<KernelRegistry*>(GlobalKernelRegistry());
static absl::once_flag setup_or_disable_jit;
absl::call_once(setup_or_disable_jit, SetupOrDisableJit, registry);
return registry;
}
namespace kernel_factory {
void OpKernelRegistrar::InitInternal(const KernelDef* kernel_def,
StringPiece kernel_class_name,
std::unique_ptr<OpKernelFactory> factory) {
const string key =
Key(kernel_def->op(), DeviceType(kernel_def->device_type()),
kernel_def->label());
auto global_registry =
reinterpret_cast<KernelRegistry*>(GlobalKernelRegistry());
mutex_lock l(global_registry->mu);
global_registry->registry.emplace(
key,
KernelRegistration(*kernel_def, kernel_class_name, std::move(factory)));
delete kernel_def;
}
OpKernel* OpKernelRegistrar::PtrOpKernelFactory::Create(
OpKernelConstruction* context) {
return (*create_func_)(context);
}
}
namespace {
const string& GetKernelLabelAttr(const AttrSlice& node_attrs) {
static const string& kKernelAttr = *new string("_kernel");
static const string& kEmptyString = *new string("");
const AttrValue* attr_value = node_attrs.FindByString(kKernelAttr);
if (attr_value == nullptr || attr_value->value_case() != AttrValue::kS)
return kEmptyString;
else
return attr_value->s();
}
Status FindKernelRegistration(
const DeviceType& device_type, StringPiece node_name,
bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info,
StringPiece node_op, AttrSlice node_attrs, const KernelRegistration** reg,
bool* was_attr_mismatch) {
*reg = nullptr;
*was_attr_mismatch = false;
const string& label = GetKernelLabelAttr(node_attrs);
const string key = Key(node_op, device_type, label);
auto typed_registry = GlobalKernelRegistryTyped();
tf_shared_lock lock(typed_registry->mu);
auto regs = typed_registry->registry.equal_range(key);
for (auto iter = regs.first; iter != regs.second; ++iter) {
bool match;
TF_RETURN_IF_ERROR(KernelAttrsMatch(iter->second.def, node_attrs, &match));
if (match) {
if (*reg != nullptr) {
if ((*reg)->def.priority() == iter->second.def.priority()) {
return errors::InvalidArgument(
"Multiple OpKernel registrations match NodeDef at the same "
"priority '",
FormatNodeDefForError(node_name, has_experimental_debug_info,
experimental_debug_info),
"': '", (*reg)->def.ShortDebugString(), "' and '",
iter->second.def.ShortDebugString(), "'");
} else if ((*reg)->def.priority() > iter->second.def.priority()) {
continue;
}
}
*reg = &iter->second;
} else {
*was_attr_mismatch = true;
}
}
if (*reg == nullptr &&
!IsSymbolicExecutionDevice(device_type.type_string())) {
const string default_key = Key(node_op, DEVICE_DEFAULT, label);
auto regs = typed_registry->registry.equal_range(default_key);
for (auto iter = regs.first; iter != regs.second; ++iter) {
bool match;
TF_RETURN_IF_ERROR(
KernelAttrsMatch(iter->second.def, node_attrs, &match));
if (match) {
if (*reg != nullptr) {
return errors::InvalidArgument(
"Multiple Default OpKernel registrations match NodeDef '",
FormatNodeDefForError(node_name, has_experimental_debug_info,
experimental_debug_info),
"': '", (*reg)->def.ShortDebugString(), "' and '",
iter->second.def.ShortDebugString(), "'");
}
*reg = &iter->second;
} else {
*was_attr_mismatch = true;
}
}
if (*reg != nullptr) {
VLOG(1) << "No device-specific kernels found for NodeDef '"
<< FormatNodeDefForError(node_name, has_experimental_debug_info,
experimental_debug_info)
<< "'"
<< "Will fall back to a default kernel." << std::endl;
}
}
return absl::OkStatus();
}
Status FindKernelRegistration(const DeviceType& device_type,
const NodeDef& node_def,
const KernelRegistration** reg,
bool* was_attr_mismatch) {
return FindKernelRegistration(
device_type, node_def.name(), node_def.has_experimental_debug_info(),
node_def.experimental_debug_info(), node_def.op(),
AttrSlice(&node_def.attr()), reg, was_attr_mismatch);
}
}
bool KernelDefAvailable(const DeviceType& device_type,
const NodeDef& node_def) {
const KernelRegistration* reg = nullptr;
bool was_attr_mismatch;
Status result =
FindKernelRegistration(device_type, node_def, ®, &was_attr_mismatch);
return result.ok() && reg != nullptr;
}
Status FindKernelDef(
const DeviceType& device_type, StringPiece node_name,
bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info,
StringPiece node_op, StringPiece node_device, AttrSlice node_attrs,
const KernelDef** def, string* kernel_class_name) {
const KernelRegistration* reg = nullptr;
bool was_attr_mismatch;
TF_RETURN_IF_ERROR(FindKernelRegistration(
device_type, node_name, has_experimental_debug_info,
experimental_debug_info, node_op, node_attrs, ®, &was_attr_mismatch));
if (reg == nullptr) {
const std::string device_str = DeviceTypeString(device_type);
Status s = errors::NotFound(
"No registered '", node_op, "' OpKernel for ", device_str,
" devices compatible with node ",
FormatNodeDefForError(node_name, has_experimental_debug_info,
experimental_debug_info));
if (was_attr_mismatch) {
errors::AppendToMessage(
&s, " (OpKernel was found, but attributes didn't match) ",
"Requested Attributes: ",
SummarizeAttrsHelper(node_attrs, node_device));
}
if (!absl::StrContains(device_str, "JIT") &&
!absl::StartsWith(node_name, "_Mkl")) {
errors::AppendToMessage(
&s, ". Registered:", KernelsRegisteredForOp(node_op));
}
return s;
}
if (def != nullptr) *def = ®->def;
if (kernel_class_name != nullptr) *kernel_class_name = reg->kernel_class_name;
return absl::OkStatus();
}
Status FindKernelDef(const DeviceType& device_type, const NodeDef& node_def,
const KernelDef** def, string* kernel_class_name) {
return FindKernelDef(
device_type, node_def.name(), node_def.has_experimental_debug_info(),
node_def.experimental_debug_info(), node_def.op(), node_def.device(),
AttrSlice(&node_def.attr()), def, kernel_class_name);
}
Status SupportedDeviceTypesForNode(
const std::vector<DeviceType>& prioritized_types, const NodeDef& def,
PrioritizedDeviceTypeVector* prioritized_device_types,
const DeviceNameUtils::ParsedName* local_address_spec) {
const OpRegistrationData* op_reg_data;
const Status s = OpRegistry::Global()->LookUp(def.op(), &op_reg_data);
if (s.ok()) {
bool exists_attr_mismatch = false;
for (const DeviceType& device_type : prioritized_types) {
const KernelRegistration* reg = nullptr;
bool was_attr_mismatch = false;
TF_RETURN_IF_ERROR(
FindKernelRegistration(device_type, def, ®, &was_attr_mismatch));
exists_attr_mismatch = exists_attr_mismatch || was_attr_mismatch;
if (reg != nullptr) {
int32_t priority = reg->def.priority();
prioritized_device_types->emplace_back(device_type, priority);
}
}
if (prioritized_device_types->empty() && !exists_attr_mismatch &&
local_address_spec != nullptr) {
DeviceNameUtils::ParsedName requested_device_name;
DeviceNameUtils::ParseFullName(def.device(), &requested_device_name);
if (DeviceNameUtils::IsDifferentAddressSpace(*local_address_spec,
requested_device_name)) {
if (requested_device_name.has_type) {
prioritized_device_types->push_back(
std::make_pair(DeviceType(requested_device_name.type), 0));
} else {
for (const DeviceType& device_type : prioritized_types) {
prioritized_device_types->push_back(std::make_pair(device_type, 0));
}
}
}
}
if (prioritized_device_types->empty()) {
TF_RETURN_IF_ERROR(ValidateNodeDef(def, op_reg_data->op_def));
}
std::stable_sort(prioritized_device_types->begin(),
prioritized_device_types->end(),
[](const std::pair<DeviceType, int32>& a,
const std::pair<DeviceType, int32>& b) {
return a.second > b.second;
});
} else {
for (const DeviceType& device_type : prioritized_types) {
prioritized_device_types->push_back(std::make_pair(device_type, 0));
}
}
return absl::OkStatus();
}
void LogAllRegisteredKernels() {
KernelList kernel_list = GetAllRegisteredKernels();
for (const auto& kernel_def : kernel_list.kernel()) {
LOG(INFO) << "OpKernel ('" << kernel_def.ShortDebugString() << "')";
}
}
KernelList GetAllRegisteredKernels() {
return GetFilteredRegisteredKernels([](const KernelDef& k) { return true; });
}
KernelList GetFilteredRegisteredKernels(
const std::function<bool(const KernelDef&)>& predicate) {
KernelRegistry* const typed_registry = GlobalKernelRegistryTyped();
KernelList kernel_list;
tf_shared_lock lock(typed_registry->mu);
kernel_list.mutable_kernel()->Reserve(typed_registry->registry.size());
for (const auto& p : typed_registry->registry) {
const KernelDef& kernel_def = p.second.def;
if (predicate(kernel_def)) {
*kernel_list.add_kernel() = kernel_def;
}
}
return kernel_list;
}
KernelList GetRegisteredKernelsForOp(StringPiece op_name) {
auto op_pred = [op_name](const KernelDef& k) { return k.op() == op_name; };
return GetFilteredRegisteredKernels(op_pred);
}
string KernelsRegisteredForOp(StringPiece op_name) {
KernelList kernel_list = GetRegisteredKernelsForOp(op_name);
if (kernel_list.kernel_size() == 0) return " <no registered kernels>\n";
string ret;
for (const auto& kernel_def : kernel_list.kernel()) {
strings::StrAppend(&ret, " device='", kernel_def.device_type(), "'");
if (!kernel_def.label().empty()) {
strings::StrAppend(&ret, "; label='", kernel_def.label(), "'");
}
for (int i = 0; i < kernel_def.constraint_size(); ++i) {
strings::StrAppend(
&ret, "; ", kernel_def.constraint(i).name(), " in ",
SummarizeAttrValue(kernel_def.constraint(i).allowed_values()));
}
strings::StrAppend(&ret, "\n");
}
return ret;
}
std::unique_ptr<OpKernel> CreateOpKernel(
DeviceType device_type, DeviceBase* device, Allocator* allocator,
const NodeDef& node_def, int graph_def_version, Status* status) {
std::shared_ptr<const NodeProperties> props;
status->Update(NodeProperties::CreateFromNodeDef(
node_def, OpRegistry::Global(), &props));
if (!status->ok()) {
errors::AppendToMessage(status,
" for node: ", FormatNodeDefForError(node_def));
return nullptr;
}
return CreateOpKernel(device_type, device, allocator, props,
graph_def_version, status);
}
std::unique_ptr<OpKernel> CreateOpKernel(
DeviceType device_type, DeviceBase* device, Allocator* allocator,
const std::shared_ptr<const NodeProperties>& props, int graph_def_version,
Status* status) {
OpKernel* kernel = nullptr;
*status = CreateOpKernel(std::move(device_type), device, allocator,
nullptr, props, graph_def_version, &kernel);
return std::unique_ptr<OpKernel>(kernel);
}
Status CreateOpKernel(DeviceType device_type, DeviceBase* device,
Allocator* allocator, FunctionLibraryRuntime* flib,
const std::shared_ptr<const NodeProperties>& props,
int graph_def_version, OpKernel** kernel) {
return CreateOpKernel(std::move(device_type), device, allocator, flib,
nullptr, props, graph_def_version,
kernel);
}
Status CreateOpKernel(DeviceType device_type, DeviceBase* device,
Allocator* allocator, FunctionLibraryRuntime* flib,
ResourceMgr* resource_mgr,
const std::shared_ptr<const NodeProperties>& props,
int graph_def_version, OpKernel** kernel) {
const NodeDef& node_def = props->node_def;
bool was_attr_mismatch;
const KernelRegistration* registration = nullptr;
Status s;
if (props != nullptr) {
VLOG(1) << "Instantiating kernel for node: " << SummarizeNodeDef(node_def);
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *props->op_def));
s = FindKernelRegistration(device_type, node_def, ®istration,
&was_attr_mismatch);
if (!s.ok()) {
errors::AppendToMessage(&s, " when instantiating ", node_def.op());
return s;
}
}
if (registration == nullptr) {
s.Update(errors::NotFound("No registered '", node_def.op(),
"' OpKernel for '", DeviceTypeString(device_type),
"' devices compatible with node ",
FormatNodeDefForError(node_def)));
if (was_attr_mismatch) {
errors::AppendToMessage(
&s, " (OpKernel was found, but attributes didn't match) ",
"Requested Attributes: ", SummarizeAttrs(node_def));
}
errors::AppendToMessage(
&s, ". Registered:", KernelsRegisteredForOp(node_def.op()));
return s;
}
MemoryTypeVector input_memory_types;
MemoryTypeVector output_memory_types;
TF_RETURN_IF_ERROR(MemoryTypesForNode(OpRegistry::Global(), device_type,
node_def, &input_memory_types,
&output_memory_types));
OpKernelConstruction context(std::move(device_type), device, allocator, flib,
resource_mgr, props, input_memory_types,
output_memory_types, graph_def_version, &s);
*kernel = registration->factory->Create(&context);
if (!s.ok()) {
delete *kernel;
*kernel = nullptr;
}
return s;
}
namespace {
bool FindArgInOp(StringPiece arg_name,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args) {
for (const auto& arg : args) {
if (arg_name == arg.name()) {
return true;
}
}
return false;
}
}
Status ValidateKernelRegistrations(const OpRegistryInterface& op_registry) {
auto typed_registry = GlobalKernelRegistryTyped();
tf_shared_lock lock(typed_registry->mu);
for (const auto& key_registration : typed_registry->registry) {
const KernelDef& kernel_def(key_registration.second.def);
const OpRegistrationData* op_reg_data;
const Status status = op_registry.LookUp(kernel_def.op(), &op_reg_data);
if (!status.ok()) {
LOG(ERROR) << "OpKernel ('" << kernel_def.ShortDebugString()
<< "') for unknown op: " << kernel_def.op();
continue;
}
const OpDef& op_def = op_reg_data->op_def;
for (const auto& host_memory_arg : kernel_def.host_memory_arg()) {
if (!FindArgInOp(host_memory_arg, op_def.input_arg()) &&
!FindArgInOp(host_memory_arg, op_def.output_arg())) {
return errors::InvalidArgument(
"HostMemory arg '", host_memory_arg,
"' not found in OpDef: ", SummarizeOpDef(op_def));
}
}
}
return absl::OkStatus();
}
template <>
const Eigen::ThreadPoolDevice& OpKernelContext::eigen_device() const {
return eigen_cpu_device();
}
template <>
const Eigen::GpuDevice& OpKernelContext::eigen_device() const {
return eigen_gpu_device();
}
void OpKernelConstruction::CtxFailure(const Status& s) {
VLOG(1) << s;
SetStatus(s);
}
void OpKernelConstruction::CtxFailureWithWarning(const Status& s) {
LOG(WARNING) << s;
SetStatus(s);
}
void OpKernelConstruction::CtxFailure(const char* file, int line,
const Status& s) {
VLOG(1) << "OP_REQUIRES failed at " << io::Basename(file) << ":" << line
<< " : " << s;
SetStatus(s);
}
void OpKernelConstruction::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
LOG(WARNING) << "OP_REQUIRES failed at " << io::Basename(file) << ":" << line
<< " : " << s;
SetStatus(s);
}
void OpKernelContext::CtxFailure(const Status& s) {
VLOG(1) << s;
SetStatus(s);
}
void OpKernelContext::CtxFailureWithWarning(const Status& s) {
LOG(WARNING) << s;
SetStatus(s);
}
void OpKernelContext::CtxFailure(const char* file, int line, const Status& s) {
VLOG(1) << "OP_REQUIRES failed at " << io::Basename(file) << ":" << line
<< " : " << s;
SetStatus(s);
}
void OpKernelContext::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
LOG(WARNING) << "OP_REQUIRES failed at " << io::Basename(file) << ":" << line
<< " : " << s;
SetStatus(s);
}
void CheckNotInComputeAsync(OpKernelContext* ctx,
const char* correct_macro_name) {
CHECK_EQ(nullptr, ctx->params_->op_kernel->AsAsync())
<< "Use " << correct_macro_name << " in AsyncOpKernel implementations.";
}
} | #include "tensorflow/core/framework/op_kernel.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel_test_base.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/protobuf.h"
class DummyKernel : public tensorflow::OpKernel {
public:
explicit DummyKernel(tensorflow::OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(tensorflow::OpKernelContext* context) override {}
};
REGISTER_OP("Test1").Input("a: float").Input("b: int32").Output("o: uint8");
REGISTER_KERNEL_BUILDER(Name("Test1").Device(tensorflow::DEVICE_CPU),
DummyKernel);
namespace foo {
bool match_signature_ = false;
class TestOp2 : public ::tensorflow::OpKernel {
public:
explicit TestOp2(::tensorflow::OpKernelConstruction* context)
: OpKernel(context) {
::tensorflow::Status status = context->MatchSignature(
{::tensorflow::DT_INT32}, {::tensorflow::DT_INT32});
match_signature_ = status.ok();
context->SetStatus(status);
}
void Compute(::tensorflow::OpKernelContext* context) override {}
};
REGISTER_OP("Test2").Input("i: T").Output("o: T").Attr("T: type");
REGISTER_KERNEL_BUILDER(Name("Test2")
.Device(::tensorflow::DEVICE_GPU)
.HostMemory("i")
.HostMemory("o"),
TestOp2);
}
namespace tensorflow {
REGISTER_OP("Test3").Input("a: T").Input("b: T").Attr("T: type");
class TestOp3Cpu : public tensorflow::OpKernel {
public:
explicit TestOp3Cpu(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
};
REGISTER_KERNEL_BUILDER(
Name("Test3").Device(DEVICE_CPU).TypeConstraint<int8>("T"), TestOp3Cpu);
namespace {
class TestOp3Gpu : public tensorflow::OpKernel {
public:
explicit TestOp3Gpu(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
};
REGISTER_KERNEL_BUILDER(
Name("Test3").Device(DEVICE_GPU).TypeConstraint<float>("T"), TestOp3Cpu);
REGISTER_OP("Test4").Input("i: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("Test4").Device(DEVICE_CPU), DummyKernel);
REGISTER_KERNEL_BUILDER(Name("Test4").Device(DEVICE_GPU), DummyKernel);
REGISTER_OP("Test5").Input("a: T").Input("b: T").Attr("T: type");
REGISTER_OP("OpWithoutKernel").Input("a: T").Input("b: T").Attr("T: type");
class TestOp5Cpu : public tensorflow::OpKernel {
public:
explicit TestOp5Cpu(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
};
REGISTER_KERNEL_BUILDER(Name("Test5").Device(DEVICE_CPU).Priority(2),
TestOp5Cpu);
class TestOp5Gpu : public tensorflow::OpKernel {
public:
explicit TestOp5Gpu(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
};
REGISTER_KERNEL_BUILDER(Name("Test5").Device(DEVICE_GPU).Priority(1),
TestOp5Gpu);
NodeDef StripNodeDef(OpKernelConstruction* ctx) {
const NodeDef& original = ctx->def();
NodeDef ret;
ret.set_name(original.name());
ret.set_op(original.op());
return ret;
}
class StrippedNode : public tensorflow::OpKernel {
public:
explicit StrippedNode(OpKernelConstruction* context)
: OpKernel(context, StripNodeDef(context), false) {}
void Compute(OpKernelContext* context) override {}
};
REGISTER_OP("StrippedNode").Input("i: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("StrippedNode").Device(DEVICE_CPU), StrippedNode);
class RefInputs : public tensorflow::OpKernel {
public:
explicit RefInputs(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
ctx->delete_ref_input(0, false);
Tensor t;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DT_FLOAT, TensorShape({}), &t));
OP_REQUIRES_OK(ctx, ctx->replace_ref_input("b", t, false));
}
};
REGISTER_OP("RefInputs")
.Input("a: Ref(float)")
.Input("b: Ref(float)")
.Output("c: float");
REGISTER_KERNEL_BUILDER(Name("RefInputs").Device(DEVICE_CPU), RefInputs);
static std::vector<DeviceType> DeviceTypes() {
return {DeviceType(DEVICE_GPU), DeviceType(DEVICE_CPU)};
}
class OpKernelTest : public ::testing::Test {
public:
OpKernelTest() : device_(Env::Default()) {}
protected:
NodeDef CreateNodeDef(const string& op_type, const DataTypeVector& inputs,
const string& device = "") {
NodeDefBuilder builder(op_type + "-op", op_type);
for (DataType dt : inputs) {
builder.Input(FakeInput(dt));
}
builder.Device(device);
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
return node_def;
}
void ExpectEqual(const string& what, const DataTypeVector& expected,
const DataTypeVector& observed) {
EXPECT_EQ(expected.size(), observed.size()) << what;
const size_t size = std::min(expected.size(), observed.size());
for (size_t i = 0; i < size; ++i) {
bool match = TypesCompatible(expected[i], observed[i]);
EXPECT_TRUE(match) << what << " i:" << i << ", expected: " << expected[i]
<< ", observed: " << observed[i];
}
}
void ExpectSuccess(const string& op_type, DeviceType device_type,
const DataTypeVector& inputs,
const DataTypeVector& outputs) {
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(
std::move(device_type), &device_, cpu_allocator(),
CreateNodeDef(op_type, inputs), TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok()) << status;
EXPECT_TRUE(op != nullptr);
if (op != nullptr) {
ExpectEqual("inputs", op->input_types(), inputs);
ExpectEqual("outputs", op->output_types(), outputs);
}
}
void ExpectFailure(const string& ascii_node_def, DeviceType device_type,
error::Code code) {
NodeDef node_def;
protobuf::TextFormat::ParseFromString(ascii_node_def, &node_def);
Status status;
std::unique_ptr<OpKernel> op(
CreateOpKernel(std::move(device_type), &device_, cpu_allocator(),
node_def, TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(op == nullptr);
EXPECT_FALSE(status.ok());
if (!status.ok()) {
LOG(INFO) << "Status message: " << status.message();
EXPECT_EQ(code, status.code());
}
}
private:
DeviceBase device_;
};
TEST_F(OpKernelTest, SuccessCpu) {
ExpectSuccess("Test1", DEVICE_CPU, {DT_FLOAT, DT_INT32}, {DT_UINT8});
ExpectSuccess("Test1", DEVICE_CPU, {DT_FLOAT_REF, DT_INT32}, {DT_UINT8});
}
TEST_F(OpKernelTest, SuccessStrippedNode) {
ExpectSuccess("StrippedNode", DEVICE_CPU, {DT_FLOAT}, {DT_FLOAT});
}
TEST_F(OpKernelTest, SuccessGpu) {
foo::match_signature_ = false;
ExpectSuccess("Test2", DEVICE_GPU, {DT_INT32}, {DT_INT32});
EXPECT_TRUE(foo::match_signature_);
}
TEST_F(OpKernelTest, SuccessBothCpuAndGpu) {
ExpectSuccess("Test3", DEVICE_CPU, {DT_INT8, DT_INT8}, {});
ExpectSuccess("Test3", DEVICE_GPU, {DT_FLOAT, DT_FLOAT}, {});
}
TEST_F(OpKernelTest, CpuTypeRegistered) {
NodeDef ndef = CreateNodeDef("Test1", {DT_FLOAT, DT_INT32});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[0].first);
}
TEST_F(OpKernelTest, KernelNotRegistered) {
const string& local_device = "/job:localhost/replica:0/task:0/device:CPU:0";
const string& remote_device = "/job:worker/replica:0/task:0/device";
{
NodeDef ndef =
CreateNodeDef("OpWithoutKernel", {DT_STRING, DT_STRING}, remote_device);
PrioritizedDeviceTypeVector devs;
DeviceNameUtils::ParsedName local_device_name;
DeviceNameUtils::ParseFullName(local_device, &local_device_name);
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs,
&local_device_name));
EXPECT_EQ(2, devs.size());
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[0].first);
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[1].first);
}
{
NodeDef ndef =
CreateNodeDef("OpWithoutKernel", {DT_STRING, DT_STRING}, local_device);
PrioritizedDeviceTypeVector devs;
DeviceNameUtils::ParsedName local_device_name;
DeviceNameUtils::ParseFullName(local_device, &local_device_name);
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs,
&local_device_name));
EXPECT_EQ(0, devs.size());
}
}
TEST_F(OpKernelTest, CpuAndGpuTypeRegistered) {
{
NodeDef ndef = CreateNodeDef("Test3", {DT_INT8, DT_INT8});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[0].first);
}
{
NodeDef ndef = CreateNodeDef("Test3", {DT_FLOAT, DT_FLOAT});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[0].first);
}
{
NodeDef ndef = CreateNodeDef("Test3", {DT_STRING, DT_STRING});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(0, devs.size());
}
{
NodeDef ndef = CreateNodeDef("Test4", {DT_FLOAT});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(2, devs.size());
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[0].first);
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[1].first);
}
{
NodeDef ndef = CreateNodeDef("Test5", {DT_STRING, DT_STRING});
PrioritizedDeviceTypeVector devs;
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(2, devs.size());
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[0].first);
EXPECT_EQ(2, devs[0].second);
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[1].first);
EXPECT_EQ(1, devs[1].second);
}
}
TEST_F(OpKernelTest, NotFound) {
const auto not_found = error::NOT_FOUND;
ExpectFailure(tsl::LegacyUnredactedDebugString(
CreateNodeDef("Test1", {DT_FLOAT, DT_INT32})),
DEVICE_GPU, not_found);
ExpectFailure(tsl::LegacyUnredactedDebugString(
CreateNodeDef("Test3", {DT_INT8, DT_INT8})),
DEVICE_GPU, not_found);
ExpectFailure(tsl::LegacyUnredactedDebugString(
CreateNodeDef("Test3", {DT_FLOAT, DT_FLOAT})),
DEVICE_CPU, not_found);
ExpectFailure(tsl::LegacyUnredactedDebugString(
CreateNodeDef("Test3", {DT_INT32, DT_INT32})),
DEVICE_GPU, not_found);
ExpectFailure("name: 'NF' op: 'Testnotfound'", DEVICE_CPU, not_found);
ExpectFailure("name: 'NF' op: 'Testnotfound'", DEVICE_GPU, not_found);
}
TEST_F(OpKernelTest, TooFewInputs) {
const auto invalid = error::INVALID_ARGUMENT;
NodeDef node_def = CreateNodeDef("Test1", {DT_FLOAT, DT_INT32});
node_def.clear_input();
ExpectFailure(tsl::LegacyUnredactedDebugString(node_def), DEVICE_CPU,
invalid);
node_def.add_input("a");
ExpectFailure(tsl::LegacyUnredactedDebugString(node_def), DEVICE_CPU,
invalid);
}
TEST_F(OpKernelTest, TooManyInputs) {
const auto invalid = error::INVALID_ARGUMENT;
NodeDef node_def = CreateNodeDef("Test1", {DT_FLOAT, DT_INT32});
node_def.add_input("c");
ExpectFailure(tsl::LegacyUnredactedDebugString(node_def), DEVICE_CPU,
invalid);
}
TEST_F(OpKernelTest, MatchSignatureFails) {
const auto invalid = error::INVALID_ARGUMENT;
foo::match_signature_ = true;
ExpectFailure(
tsl::LegacyUnredactedDebugString(CreateNodeDef("Test2", {DT_FLOAT})),
DEVICE_GPU, invalid);
EXPECT_FALSE(foo::match_signature_);
}
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {}
Allocator* GetAllocator(AllocatorAttributes ) override {
return cpu_allocator();
}
};
TEST_F(OpKernelTest, InputDtype) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(
CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("Test1", {DT_FLOAT, DT_INT32}),
TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok());
params.op_kernel = op.get();
Tensor a(DT_FLOAT, TensorShape({}));
Tensor b(DT_INT32, TensorShape({}));
Tensor c(DT_UINT8, TensorShape({}));
absl::InlinedVector<TensorValue, 4> inputs{TensorValue(&a), TensorValue(&b),
TensorValue(&c)};
params.inputs = inputs;
auto ctx = std::make_unique<OpKernelContext>(¶ms);
DataType dtype;
EXPECT_FALSE(ctx->input_dtype("non_existent_input", &dtype).ok());
ASSERT_TRUE(ctx->input_dtype("a", &dtype).ok());
EXPECT_EQ(dtype, DT_FLOAT);
ASSERT_TRUE(ctx->input_dtype("b", &dtype).ok());
EXPECT_EQ(dtype, DT_INT32);
}
TEST_F(OpKernelTest, InputOnly) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(
CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("Test1", {DT_FLOAT, DT_INT32}),
TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok());
params.op_kernel = op.get();
Tensor a(DT_FLOAT, TensorShape({}));
absl::InlinedVector<TensorValue, 2> inputs{TensorValue(&a)};
params.inputs = inputs;
auto ctx = std::make_unique<OpKernelContext>(¶ms);
ASSERT_TRUE(ctx->get_input(0).ok());
EXPECT_FALSE(ctx->get_input(1).ok());
EXPECT_EQ(ctx->get_input(1).status(),
absl::InvalidArgumentError(
"Given index was 1, but index of input must be greater than 0, "
"less than the number of inputs (1), and not a ref."));
}
TEST_F(OpKernelTest, RefInputs) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(
CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("RefInputs", {DT_FLOAT_REF, DT_FLOAT_REF}),
TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok());
params.op_kernel = op.get();
Tensor* a = new Tensor(DT_FLOAT, TensorShape({}));
Tensor* b = new Tensor(DT_FLOAT, TensorShape({2}));
mutex mu_a, mu_b;
absl::InlinedVector<TensorValue, 4> inputs{TensorValue(&mu_a, a),
TensorValue(&mu_b, b)};
params.inputs = inputs;
auto ctx = std::make_unique<OpKernelContext>(¶ms);
op->Compute(ctx.get());
EXPECT_NE(ctx->mutable_input(1, false).shape(), TensorShape({2}));
delete b;
}
TEST_F(OpKernelTest, AllocateOutput) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(
CreateOpKernel(DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("Test1", {DT_FLOAT, DT_INT32}),
TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok());
params.op_kernel = op.get();
Tensor a(DT_FLOAT, TensorShape({}));
Tensor b(DT_INT32, TensorShape({}));
absl::InlinedVector<TensorValue, 4> inputs{TensorValue(&a), TensorValue(&b)};
params.inputs = inputs;
auto ctx = std::make_unique<OpKernelContext>(¶ms);
Tensor* output = nullptr;
Status s = ctx->allocate_output(-1, TensorShape({}), &output);
EXPECT_THAT(s, tensorflow::testing::StatusIs(error::INTERNAL));
EXPECT_THAT(s.message(), ::testing::ContainsRegex("bad index=-1"));
s = ctx->allocate_output(1, TensorShape({}), &output);
EXPECT_THAT(s, tensorflow::testing::StatusIs(error::INTERNAL));
EXPECT_THAT(s.message(), ::testing::ContainsRegex("bad index=1"));
AllocatorAttributes attrs;
attrs.set_on_host(true);
TF_ASSERT_OK(ctx->allocate_output("o", TensorShape({}), &output, attrs));
s = ctx->allocate_output(-1, TensorShape({}), &output, attrs);
EXPECT_THAT(s, tensorflow::testing::StatusIs(error::INTERNAL));
EXPECT_THAT(s.message(), ::testing::ContainsRegex("bad index=-1"));
s = ctx->allocate_output(1, TensorShape({}), &output, attrs);
EXPECT_THAT(s, tensorflow::testing::StatusIs(error::INTERNAL));
EXPECT_THAT(s.message(), ::testing::ContainsRegex("bad index=1"));
}
class ScopedAllocatorDevice : public DeviceBase {
public:
explicit ScopedAllocatorDevice(Env* env)
: DeviceBase(env),
scope_allocated_(false),
num_allocations_(0),
num_scoped_allocations_(0) {}
Allocator* GetAllocator(AllocatorAttributes attrs) override {
CHECK_LE(attrs.scope_id, 0);
num_allocations_++;
return cpu_allocator();
}
Allocator* GetScopedAllocator(AllocatorAttributes attrs,
int64_t ) override {
CHECK_GT(attrs.scope_id, 0);
num_scoped_allocations_++;
if (scope_allocated_) {
return nullptr;
} else {
scope_allocated_ = true;
return cpu_allocator();
}
}
void CopyTensorInSameDevice(const Tensor* input_tensor, Tensor* output_tensor,
const DeviceContext* device_context,
StatusCallback done) override {
CHECK(input_tensor->NumElements() == output_tensor->NumElements());
tensor::DeepCopy(*input_tensor, output_tensor);
done(absl::OkStatus());
}
int num_allocations(bool scoped) {
if (scoped) {
return num_scoped_allocations_;
} else {
return num_allocations_;
}
}
private:
bool scope_allocated_;
int num_allocations_;
int num_scoped_allocations_;
};
TEST_F(OpKernelTest, ScopedAllocationTest) {
Env* env = Env::Default();
OpKernelContext::Params params;
auto sa_device = std::make_unique<ScopedAllocatorDevice>(env);
params.device = sa_device.get();
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(
DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("Test4", {DT_FLOAT}), TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok());
params.op_kernel = op.get();
AllocatorAttributes alloc_attrs;
alloc_attrs.scope_id = 1;
std::vector<AllocatorAttributes> output_alloc_attrs({alloc_attrs});
params.output_attr_array = output_alloc_attrs.data();
std::vector<int> forward_from({OpKernelContext::Params::kNeverForward});
params.forward_from_array = forward_from.data();
auto ctx = std::make_unique<OpKernelContext>(¶ms);
EXPECT_EQ(sa_device->num_allocations(false), 0);
EXPECT_EQ(sa_device->num_allocations(true), 0);
Tensor temp1;
TF_EXPECT_OK(
ctx->allocate_temp(DT_FLOAT, TensorShape({8}), &temp1, alloc_attrs));
EXPECT_EQ(sa_device->num_allocations(false), 1);
EXPECT_EQ(sa_device->num_allocations(true), 0);
Tensor temp2;
alloc_attrs.scope_id = -1;
TF_EXPECT_OK(
ctx->allocate_temp(DT_FLOAT, TensorShape({4}), &temp2, alloc_attrs));
EXPECT_EQ(sa_device->num_allocations(false), 2);
EXPECT_EQ(sa_device->num_allocations(true), 0);
ctx->set_output(0, temp1);
EXPECT_EQ(sa_device->num_allocations(false), 2);
EXPECT_EQ(sa_device->num_allocations(true), 1);
}
TEST_F(OpKernelTest, TraceString) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(
DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("Test4", {DT_FLOAT}), TF_GRAPH_DEF_VERSION, &status));
TF_CHECK_OK(status);
params.op_kernel = op.get();
Tensor a(DT_FLOAT, TensorShape({4, 8}));
absl::InlinedVector<TensorValue, 4> inputs{TensorValue(&a)};
params.inputs = inputs;
params.op_kernel = op.get();
OpKernelContext ctx(¶ms);
EXPECT_THAT(op->TraceString(ctx, true),
::testing::ContainsRegex("Test4-op:Test4#shape"));
}
REGISTER_OP("BuildCPU");
REGISTER_KERNEL_BUILDER(Name("BuildCPU").Device(DEVICE_CPU), DummyKernel);
TEST_F(OpKernelBuilderTest, BuilderCPU) {
ExpectSuccess("BuildCPU", DEVICE_CPU, {});
EXPECT_EQ("DummyKernel", GetKernelClassName("BuildCPU", DEVICE_CPU, {}));
ExpectFailure("BuildCPU", DEVICE_GPU, {}, error::NOT_FOUND);
EXPECT_EQ("not found", GetKernelClassName("BuildCPU", DEVICE_GPU, {}));
}
REGISTER_OP("BuildGPU");
REGISTER_KERNEL_BUILDER(Name("BuildGPU").Device(DEVICE_GPU), DummyKernel);
TEST_F(OpKernelBuilderTest, BuilderGPU) {
ExpectFailure("BuildGPU", DEVICE_CPU, {}, error::NOT_FOUND);
ExpectSuccess("BuildGPU", DEVICE_GPU, {});
}
REGISTER_OP("BuildBoth");
REGISTER_KERNEL_BUILDER(Name("BuildBoth").Device(DEVICE_CPU), DummyKernel);
REGISTER_KERNEL_BUILDER(Name("BuildBoth").Device(DEVICE_GPU), DummyKernel);
TEST_F(OpKernelBuilderTest, BuilderBoth) {
ExpectSuccess("BuildBoth", DEVICE_CPU, {});
ExpectSuccess("BuildBoth", DEVICE_GPU, {});
}
REGISTER_OP("BuildTypeAttr").Attr("T: type");
REGISTER_KERNEL_BUILDER(
Name("BuildTypeAttr").Device(DEVICE_CPU).TypeConstraint<float>("T"),
DummyKernel);
TEST_F(OpKernelBuilderTest, BuilderTypeAttr) {
ExpectSuccess("BuildTypeAttr", DEVICE_CPU, {"T|type|DT_FLOAT"});
ExpectFailure("BuildTypeAttr", DEVICE_CPU, {"T|type|DT_BOOL"},
error::NOT_FOUND);
ExpectFailure("BuildTypeAttr", DEVICE_CPU, {}, error::INVALID_ARGUMENT);
ExpectFailure("BuildTypeAttr", DEVICE_CPU, {"T|int|7"},
error::INVALID_ARGUMENT);
}
REGISTER_OP("BuildTypeListAttr").Attr("T: list(type)");
REGISTER_KERNEL_BUILDER(
Name("BuildTypeListAttr").Device(DEVICE_CPU).TypeConstraint<bool>("T"),
DummyKernel);
TEST_F(OpKernelBuilderTest, BuilderTypeListAttr) {
ExpectSuccess("BuildTypeListAttr", DEVICE_CPU, {"T|list(type)|[]"});
EXPECT_EQ("DummyKernel", GetKernelClassName("BuildTypeListAttr", DEVICE_CPU,
{"T|list(type)|[]"}));
ExpectSuccess("BuildTypeListAttr", DEVICE_CPU, {"T|list(type)|[DT_BOOL]"});
EXPECT_EQ("DummyKernel", GetKernelClassName("BuildTypeListAttr", DEVICE_CPU,
{"T|list(type)|[]"}));
ExpectSuccess("BuildTypeListAttr", DEVICE_CPU,
{"T|list(type)|[DT_BOOL, DT_BOOL]"});
ExpectFailure("BuildTypeListAttr", DEVICE_CPU, {"T|list(type)|[DT_FLOAT]"},
error::NOT_FOUND);
EXPECT_EQ("not found", GetKernelClassName("BuildTypeListAttr", DEVICE_CPU,
{"T|list(type)|[DT_FLOAT]"}));
ExpectFailure("BuildTypeListAttr", DEVICE_CPU, {}, error::INVALID_ARGUMENT);
ExpectFailure("BuildTypeListAttr", DEVICE_CPU, {"T|int|7"},
error::INVALID_ARGUMENT);
}
REGISTER_OP("DuplicateKernel");
REGISTER_KERNEL_BUILDER(Name("DuplicateKernel").Device(DEVICE_CPU),
DummyKernel);
REGISTER_KERNEL_BUILDER(Name("DuplicateKernel").Device(DEVICE_CPU),
DummyKernel);
TEST_F(OpKernelBuilderTest, DuplicateKernel) {
const NodeDef ndef = CreateNodeDef("DuplicateKernel", {});
PrioritizedDeviceTypeVector devs;
Status status = SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(
status.message(), "Multiple OpKernel registrations match NodeDef"));
ExpectFailure("DuplicateKernel", DEVICE_CPU, {}, error::INVALID_ARGUMENT);
}
REGISTER_OP("DuplicateKernelForT").Attr("T: type");
REGISTER_KERNEL_BUILDER(
Name("DuplicateKernelForT").Device(DEVICE_CPU).TypeConstraint<float>("T"),
DummyKernel);
REGISTER_KERNEL_BUILDER(
Name("DuplicateKernelForT").Device(DEVICE_CPU).TypeConstraint<float>("T"),
DummyKernel);
TEST_F(OpKernelBuilderTest, DuplicateKernelForT) {
const NodeDef ndef =
CreateNodeDef("DuplicateKernelForT", {"T|type|DT_FLOAT"});
PrioritizedDeviceTypeVector devs;
Status status = SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(
status.message(), "Multiple OpKernel registrations match NodeDef"));
ExpectFailure("DuplicateKernelForT", DEVICE_CPU, {"T|type|DT_FLOAT"},
error::INVALID_ARGUMENT);
ExpectFailure("DuplicateKernelForT", DEVICE_CPU, {"T|type|DT_BOOL"},
error::NOT_FOUND);
}
REGISTER_OP("BadConstraint").Attr("dtype: type");
REGISTER_KERNEL_BUILDER(Name("BadConstraint")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T"),
DummyKernel);
TEST_F(OpKernelBuilderTest, BadConstraint) {
const NodeDef ndef = CreateNodeDef("BadConstraint", {});
PrioritizedDeviceTypeVector devs;
Status status = SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(
absl::StrContains(status.message(),
"OpKernel 'BadConstraint' has constraint on attr "
"'T' not in NodeDef"));
ExpectFailure("BadConstraint", DEVICE_CPU, {"dtype|type|DT_FLOAT"},
error::INVALID_ARGUMENT);
}
REGISTER_OP("ListOut").Output("a: int32").Output("b: T").Attr("T: list(type)");
REGISTER_KERNEL_BUILDER(Name("ListOut").Device(tensorflow::DEVICE_CPU),
DummyKernel);
TEST_F(OpKernelBuilderTest, OpOutputList) {
Env* env = Env::Default();
OpKernelContext::Params params;
DummyDevice device(env);
params.device = &device;
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(
DEVICE_CPU, params.device, cpu_allocator(),
CreateNodeDef("ListOut", {"T|list(type)|[DT_FLOAT, DT_INT32]"}),
TF_GRAPH_DEF_VERSION, &status));
EXPECT_TRUE(status.ok()) << status.ToString();
params.op_kernel = op.get();
auto ctx = std::make_unique<OpKernelContext>(¶ms);
EXPECT_EQ(DT_INT32, ctx->expected_output_dtype(0));
OpOutputList out_list;
EXPECT_FALSE(ctx->output_list("non_existent_output", &out_list).ok());
ASSERT_TRUE(ctx->output_list("b", &out_list).ok());
EXPECT_EQ(DT_FLOAT, out_list.expected_output_dtype(0));
EXPECT_EQ(DT_INT32, out_list.expected_output_dtype(1));
}
class GetAttrKernel : public ::tensorflow::OpKernel {
public:
explicit GetAttrKernel(OpKernelConstruction* context) : OpKernel(context) {
string attr_name;
OP_REQUIRES_OK(context, context->GetAttr("attr_name", &attr_name));
status.emplace_back("s", context->GetAttr(attr_name, &s));
status.emplace_back("s_list", context->GetAttr(attr_name, &s_list));
status.emplace_back("i", context->GetAttr(attr_name, &i));
status.emplace_back("i_list", context->GetAttr(attr_name, &i_list));
status.emplace_back("i32", context->GetAttr(attr_name, &i32));
status.emplace_back("i32_list", context->GetAttr(attr_name, &i32_list));
status.emplace_back("f", context->GetAttr(attr_name, &f));
status.emplace_back("f_list", context->GetAttr(attr_name, &f_list));
status.emplace_back("b", context->GetAttr(attr_name, &b));
status.emplace_back("b_list", context->GetAttr(attr_name, &b_list));
status.emplace_back("type", context->GetAttr(attr_name, &type));
status.emplace_back("type_list", context->GetAttr(attr_name, &type_list));
status.emplace_back("type_vector",
context->GetAttr(attr_name, &type_vector));
status.emplace_back("shape_proto",
context->GetAttr(attr_name, &shape_proto));
status.emplace_back("shape_proto_list",
context->GetAttr(attr_name, &shape_proto_list));
status.emplace_back("shape", context->GetAttr(attr_name, &shape));
status.emplace_back("shape_list", context->GetAttr(attr_name, &shape_list));
}
void Compute(::tensorflow::OpKernelContext* context) override {}
void ExpectOk(std::initializer_list<string> keys) {
for (const auto& key_status : status) {
bool in_keys = false;
for (const string& key : keys) {
if (key_status.first == key) {
in_keys = true;
}
}
EXPECT_EQ(in_keys, key_status.second.ok())
<< "key_status: " << key_status.first << ", " << key_status.second;
}
}
string s;
std::vector<string> s_list;
int64_t i;
std::vector<int64_t> i_list;
int32 i32;
std::vector<int32> i32_list;
float f;
std::vector<float> f_list;
bool b;
std::vector<bool> b_list;
DataType type;
std::vector<DataType> type_list;
DataTypeVector type_vector;
TensorShapeProto shape_proto;
std::vector<TensorShapeProto> shape_proto_list;
TensorShape shape;
std::vector<TensorShape> shape_list;
std::vector<std::pair<string, Status>> status;
};
class GetAttrTest : public OpKernelBuilderTest {};
REGISTER_OP("GetAttrStringList")
.Attr("attr_name: string")
.Attr("a: list(string)");
REGISTER_KERNEL_BUILDER(Name("GetAttrStringList").Device(DEVICE_CPU),
GetAttrKernel);
TEST_F(GetAttrTest, StringList) {
std::unique_ptr<OpKernel> op_kernel =
ExpectSuccess("GetAttrStringList", DEVICE_CPU,
{"attr_name|string|'a'", "a|list(string)|['foo', 'bar']"});
auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"s_list"});
EXPECT_EQ(std::vector<string>({"foo", "bar"}), get_attr_kernel->s_list);
op_kernel = ExpectSuccess("GetAttrStringList", DEVICE_CPU,
{"attr_name|string|'b'", "a|list(string)|['baz']"});
get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({});
EXPECT_TRUE(get_attr_kernel->s_list.empty());
}
REGISTER_OP("GetAttrInt")
.Attr("attr_name: string")
.Attr("a: int")
.Attr("b: list(int)");
REGISTER_KERNEL_BUILDER(Name("GetAttrInt").Device(DEVICE_CPU), GetAttrKernel);
TEST_F(GetAttrTest, Int) {
std::unique_ptr<OpKernel> op_kernel = ExpectSuccess(
"GetAttrInt", DEVICE_CPU,
{"attr_name|string|'a'", "a|int|35", "b|list(int)|[-1, 2, -4]"});
auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"i", "i32"});
EXPECT_EQ(35, get_attr_kernel->i);
EXPECT_EQ(35, get_attr_kernel->i32);
op_kernel = ExpectSuccess(
"GetAttrInt", DEVICE_CPU,
{"attr_name|string|'b'", "a|int|35", "b|list(int)|[-1, 2, -4]"});
get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"i_list", "i32_list"});
EXPECT_EQ(std::vector<int64_t>({-1, 2, -4}), get_attr_kernel->i_list);
EXPECT_EQ(std::vector<int32>({-1, 2, -4}), get_attr_kernel->i32_list);
op_kernel = ExpectSuccess("GetAttrInt", DEVICE_CPU,
{"attr_name|string|'a'", "a|int|8589934592",
"b|list(int)|[-8589934592]"});
get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"i"});
EXPECT_EQ(8589934592ll, get_attr_kernel->i);
for (const auto& key_status : get_attr_kernel->status) {
if (key_status.first == "i32") {
EXPECT_EQ(error::INVALID_ARGUMENT, key_status.second.code());
EXPECT_EQ("Attr a has value 8589934592 out of range for an int32",
key_status.second.message());
}
}
op_kernel = ExpectSuccess("GetAttrInt", DEVICE_CPU,
{"attr_name|string|'b'", "a|int|8589934592",
"b|list(int)|[-8589934592]"});
get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"i_list"});
EXPECT_EQ(std::vector<int64_t>({-8589934592ll}), get_attr_kernel->i_list);
for (const auto& key_status : get_attr_kernel->status) {
if (key_status.first == "i32_list") {
EXPECT_EQ(error::INVALID_ARGUMENT, key_status.second.code());
EXPECT_EQ("Attr b has value -8589934592 out of range for an int32",
key_status.second.message());
}
}
}
REGISTER_OP("GetAttrShape")
.Attr("attr_name: string")
.Attr("a: shape")
.Attr("b: list(shape)");
REGISTER_KERNEL_BUILDER(Name("GetAttrShape").Device(DEVICE_CPU), GetAttrKernel);
TEST_F(GetAttrTest, Shape) {
std::unique_ptr<OpKernel> op_kernel = ExpectSuccess(
"GetAttrShape", DEVICE_CPU,
{"attr_name|string|'a'", "a|shape|{ dim { size: 3 } }",
"b|list(shape)|[{ dim { size:2 } }, { dim { size: 4 } }]"});
auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"shape", "shape_proto"});
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString("dim { size: 3 }",
&expected_shape_proto);
EXPECT_EQ(get_attr_kernel->shape_proto.ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ("[3]", get_attr_kernel->shape.DebugString());
op_kernel = ExpectSuccess(
"GetAttrShape", DEVICE_CPU,
{"attr_name|string|'b'", "a|shape|{ dim { size: 3 } }",
"b|list(shape)|[{ dim { size:2 } }, { dim { size: 4 } }]"});
get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"shape_list", "shape_proto_list"});
ASSERT_EQ(2, get_attr_kernel->shape_proto_list.size());
protobuf::TextFormat::ParseFromString("dim { size: 2 }",
&expected_shape_proto);
EXPECT_EQ(get_attr_kernel->shape_proto_list[0].ShortDebugString(),
expected_shape_proto.ShortDebugString());
protobuf::TextFormat::ParseFromString("dim { size: 4 }",
&expected_shape_proto);
EXPECT_EQ(get_attr_kernel->shape_proto_list[1].ShortDebugString(),
expected_shape_proto.ShortDebugString());
ASSERT_EQ(2, get_attr_kernel->shape_list.size());
EXPECT_EQ("[2]", get_attr_kernel->shape_list[0].DebugString());
EXPECT_EQ("[4]", get_attr_kernel->shape_list[1].DebugString());
}
REGISTER_OP("GetAttrType").Attr("attr_name: string").Attr("a: type");
REGISTER_KERNEL_BUILDER(Name("GetAttrType").Device(DEVICE_CPU), GetAttrKernel);
TEST_F(GetAttrTest, Type) {
std::unique_ptr<OpKernel> op_kernel = ExpectSuccess(
"GetAttrType", DEVICE_CPU, {"attr_name|string|'a'", "a|type|DT_FLOAT"});
auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"type"});
EXPECT_EQ(DT_FLOAT, get_attr_kernel->type);
}
REGISTER_OP("GetAttrTypeList").Attr("attr_name: string").Attr("a: list(type)");
REGISTER_KERNEL_BUILDER(Name("GetAttrTypeList").Device(DEVICE_CPU),
GetAttrKernel);
TEST_F(GetAttrTest, TypeList) {
std::unique_ptr<OpKernel> op_kernel = ExpectSuccess(
"GetAttrTypeList", DEVICE_CPU,
{"attr_name|string|'a'", "a|list(type)|[DT_INT32, DT_BOOL]"});
auto* get_attr_kernel = static_cast<GetAttrKernel*>(op_kernel.get());
get_attr_kernel->ExpectOk({"type_list", "type_vector"});
ASSERT_EQ(2, get_attr_kernel->type_list.size());
EXPECT_EQ(DT_INT32, get_attr_kernel->type_list[0]);
EXPECT_EQ(DT_BOOL, get_attr_kernel->type_list[1]);
ASSERT_EQ(2, get_attr_kernel->type_vector.size());
EXPECT_EQ(DT_INT32, get_attr_kernel->type_vector[0]);
EXPECT_EQ(DT_BOOL, get_attr_kernel->type_vector[1]);
}
template <int WHICH>
class LabeledKernel : public BaseKernel {
public:
using BaseKernel::BaseKernel;
int Which() const override { return WHICH; }
};
class LabelTest : public OpKernelBuilderTest {};
REGISTER_OP("LabeledKernel");
REGISTER_KERNEL_BUILDER(Name("LabeledKernel").Device(DEVICE_CPU),
LabeledKernel<0>);
REGISTER_KERNEL_BUILDER(Name("LabeledKernel").Device(DEVICE_CPU).Label("one"),
LabeledKernel<1>);
REGISTER_KERNEL_BUILDER(Name("LabeledKernel").Device(DEVICE_CPU).Label("dupe"),
LabeledKernel<2>);
REGISTER_KERNEL_BUILDER(Name("LabeledKernel").Device(DEVICE_CPU).Label("dupe"),
LabeledKernel<3>);
TEST_F(LabelTest, Default) {
std::unique_ptr<OpKernel> op_kernel =
ExpectSuccess("LabeledKernel", DEVICE_CPU, {});
auto* get_labeled_kernel = static_cast<BaseKernel*>(op_kernel.get());
EXPECT_EQ(0, get_labeled_kernel->Which());
EXPECT_EQ("LabeledKernel<0>",
GetKernelClassName("LabeledKernel", DEVICE_CPU, {}));
}
TEST_F(LabelTest, Specified) {
std::unique_ptr<OpKernel> op_kernel =
ExpectSuccess("LabeledKernel", DEVICE_CPU, {"_kernel|string|'one'"});
auto* get_labeled_kernel = static_cast<BaseKernel*>(op_kernel.get());
EXPECT_EQ(1, get_labeled_kernel->Which());
EXPECT_EQ("LabeledKernel<1>", GetKernelClassName("LabeledKernel", DEVICE_CPU,
{"_kernel|string|'one'"}));
}
TEST_F(LabelTest, Duplicate) {
ExpectFailure("LabeledKernel", DEVICE_CPU, {"_kernel|string|'dupe'"},
error::INVALID_ARGUMENT);
}
REGISTER_OP("JitKernel");
REGISTER_KERNEL_BUILDER(
Name("JitKernel").Device(DEVICE_CPU).Label(kJitKernelLabel),
LabeledKernel<4>);
TEST_F(LabelTest, Filter) {
ExpectSuccess("JitKernel", DEVICE_CPU, {absl::StrCat("_kernel|string|''")});
ExpectFailure("JitKernel", DEVICE_CPU,
{absl::StrCat("_kernel|string|'", kJitKernelLabel, "'")},
error::NOT_FOUND);
}
void BM_InputRangeHelper(::testing::benchmark::State& state,
const NodeDef& node_def, const char* input_name,
int expected_start, int expected_stop) {
Status status;
auto device = std::make_unique<DummyDevice>(Env::Default());
std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(),
cpu_allocator(), node_def,
TF_GRAPH_DEF_VERSION, &status));
TF_CHECK_OK(status);
for (auto s : state) {
int start;
int stop;
TF_CHECK_OK(op->InputRange(input_name, &start, &stop));
EXPECT_EQ(expected_start, start);
EXPECT_EQ(expected_stop, stop);
}
}
REGISTER_KERNEL_BUILDER(Name("ConcatV2").Device(DEVICE_CPU), DummyKernel);
REGISTER_KERNEL_BUILDER(Name("Select").Device(DEVICE_CPU), DummyKernel);
REGISTER_KERNEL_BUILDER(Name("MatMul").Device(DEVICE_CPU), DummyKernel);
void BM_ConcatInputRange(::testing::benchmark::State& state) {
NodeDef node_def;
node_def.set_name("concat-op");
node_def.set_op("ConcatV2");
AttrValue attr_N;
attr_N.set_i(4);
AttrValue attr_T;
attr_T.set_type(DT_FLOAT);
AttrValue attr_Tidx;
attr_Tidx.set_type(DT_INT32);
node_def.mutable_attr()->insert({"N", attr_N});
node_def.mutable_attr()->insert({"T", attr_T});
node_def.mutable_attr()->insert({"Tidx", attr_Tidx});
for (size_t i = 0; i < 5; ++i) {
node_def.add_input(strings::StrCat("a:", i));
}
BM_InputRangeHelper(state, node_def, "values", 0, 4);
}
void BM_SelectInputRange(::testing::benchmark::State& state) {
NodeDef node_def;
node_def.set_name("select-op");
node_def.set_op("Select");
AttrValue attr_T;
attr_T.set_type(DT_FLOAT);
node_def.mutable_attr()->insert({"T", attr_T});
for (size_t i = 0; i < 3; ++i) {
node_def.add_input(strings::StrCat("a:", i));
}
BM_InputRangeHelper(state, node_def, "condition", 0, 1);
}
void BM_TraceString(::testing::benchmark::State& state) {
const int verbose = state.range(0);
NodeDef node_def;
node_def.set_name("gradient_tape/model_1/dense_1/MatMul_1");
node_def.set_op("MatMul");
AttrValue transpose_a, transpose_b, attr_t;
attr_t.set_type(DT_FLOAT);
node_def.mutable_attr()->insert({"T", attr_t});
transpose_a.set_b(true);
node_def.mutable_attr()->insert({"transpose_a", transpose_a});
transpose_b.set_b(true);
node_def.mutable_attr()->insert({"transpose_b", transpose_b});
for (size_t i = 0; i < 2; ++i) {
node_def.add_input(strings::StrCat("a:", i));
}
Status status;
auto device = std::make_unique<DummyDevice>(Env::Default());
std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(),
cpu_allocator(), node_def,
TF_GRAPH_DEF_VERSION, &status));
TF_CHECK_OK(status);
OpKernelContext::Params params;
params.device = device.get();
params.op_kernel = op.get();
Tensor a(DT_FLOAT, TensorShape({99000, 256}));
Tensor b(DT_FLOAT, TensorShape({256, 256}));
absl::InlinedVector<TensorValue, 4> inputs{TensorValue(&a), TensorValue(&b)};
params.inputs = inputs;
auto ctx = std::make_unique<OpKernelContext>(¶ms);
for (auto s : state) {
auto trace = op->TraceString(*ctx, verbose);
}
}
BENCHMARK(BM_ConcatInputRange);
BENCHMARK(BM_SelectInputRange);
BENCHMARK(BM_TraceString)->Arg(1)->Arg(0);
TEST(RegisteredKernels, CanCallGetAllRegisteredKernels) {
auto kernel_list = GetAllRegisteredKernels();
auto all_registered_kernels = kernel_list.kernel();
auto has_name_test1 = [](const KernelDef& k) { return k.op() == "Test1"; };
auto test1_it = std::find_if(all_registered_kernels.begin(),
all_registered_kernels.end(), has_name_test1);
ASSERT_NE(test1_it, all_registered_kernels.end());
EXPECT_EQ(test1_it->device_type(), "CPU");
++test1_it;
EXPECT_EQ(
std::find_if(test1_it, all_registered_kernels.end(), has_name_test1),
all_registered_kernels.end());
}
TEST(RegisteredKernels, CanLogAllRegisteredKernels) {
tensorflow::LogAllRegisteredKernels();
}
TEST(RegisteredKernels, GetFilteredRegisteredKernels) {
auto has_name_test1 = [](const KernelDef& k) { return k.op() == "Test1"; };
auto kernel_list = GetFilteredRegisteredKernels(has_name_test1);
ASSERT_EQ(kernel_list.kernel_size(), 1);
EXPECT_EQ(kernel_list.kernel(0).op(), "Test1");
EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU");
}
TEST(RegisteredKernels, GetRegisteredKernelsForOp) {
auto kernel_list = GetRegisteredKernelsForOp("Test1");
ASSERT_EQ(kernel_list.kernel_size(), 1);
EXPECT_EQ(kernel_list.kernel(0).op(), "Test1");
EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU");
}
#define EXTRACT_KERNEL_NAME_TO_STRING_IMPL(name, kernel_builder, ...) name
#define EXTRACT_KERNEL_NAME_TO_STRING(kernel_builder) \
TF_EXTRACT_KERNEL_NAME(EXTRACT_KERNEL_NAME_TO_STRING_IMPL, kernel_builder)
TEST(RegisterKernelMacro, ExtractName) {
static constexpr char const* kName = "Foo";
static constexpr char const* kExtractedName =
EXTRACT_KERNEL_NAME_TO_STRING(Name(kName).Label("Label"));
EXPECT_THAT(kExtractedName, ::testing::StrEq(kName));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d8fad02-ec28-44dc-a261-776f19b8ca59 | cpp | tensorflow/tensorflow | graph_def_util | tensorflow/core/framework/graph_def_util.cc | tensorflow/core/framework/graph_def_util_test.cc | #include "tensorflow/core/framework/graph_def_util.h"
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
string SummarizeGraphDef(const GraphDef& graph_def) {
string ret;
strings::StrAppend(
&ret, "versions = ", graph_def.versions().ShortDebugString(), ";\n");
for (const NodeDef& node : graph_def.node()) {
strings::StrAppend(&ret, SummarizeNodeDef(node), ";\n");
}
return ret;
}
Status ValidateExternalGraphDefSyntax(const GraphDef& graph_def) {
for (const NodeDef& node : graph_def.node()) {
TF_RETURN_IF_ERROR(ValidateExternalNodeDefSyntax(node));
}
return absl::OkStatus();
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset) {
return AddDefaultAttrsToGraphDef(graph_def, op_registry, node_offset, false);
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset, bool skip_unknown_ops) {
if (node_offset > graph_def->node_size()) {
return errors::InvalidArgument(
"Tried to add default attrs to GraphDef "
"starting at offset ",
node_offset, " with total nodes in graph: ", graph_def->node_size());
}
for (int i = node_offset; i < graph_def->node_size(); ++i) {
NodeDef* node_def = graph_def->mutable_node(i);
const OpDef* op_def;
Status s = op_registry.LookUpOpDef(node_def->op(), &op_def);
if (s.ok()) {
AddDefaultsToNodeDef(*op_def, node_def);
} else if (!skip_unknown_ops) {
return s;
}
}
return absl::OkStatus();
}
static Status RemoveNewDefaultAttrsFromNodeDef(
NodeDef* node_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
const OpDef* producer_op_def;
const OpDef* consumer_op_def;
TF_RETURN_IF_ERROR(
producer_op_registry.LookUpOpDef(node_def->op(), &producer_op_def));
TF_RETURN_IF_ERROR(
consumer_op_registry.LookUpOpDef(node_def->op(), &consumer_op_def));
std::vector<string> to_remove;
for (const auto& attr : node_def->attr()) {
if (!absl::StartsWith(attr.first, "_") &&
FindAttr(attr.first, *consumer_op_def) == nullptr) {
const OpDef::AttrDef* producer_attr_def =
FindAttr(attr.first, *producer_op_def);
if (producer_attr_def == nullptr) {
return errors::InvalidArgument(
"Attr '", attr.first,
"' missing in producer's OpDef: ", SummarizeOpDef(*producer_op_def),
" but found in node: ", FormatNodeDefForError(*node_def));
}
if (producer_attr_def->has_default_value() &&
AreAttrValuesEqual(producer_attr_def->default_value(), attr.second)) {
to_remove.emplace_back(attr.first);
}
}
}
for (const string& attr_name : to_remove) {
node_def->mutable_attr()->erase(attr_name);
if (op_attr_removed != nullptr) {
op_attr_removed->insert(std::make_pair(node_def->op(), attr_name));
}
}
return absl::OkStatus();
}
static bool IsFunction(const GraphDef& graph_def, const string& op_name) {
for (const auto& func_def : graph_def.library().function()) {
if (op_name == func_def.signature().name()) return true;
}
return false;
}
Status RemoveNewDefaultAttrsFromGraphDef(
GraphDef* graph_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
for (int n = 0; n < graph_def->node_size(); ++n) {
NodeDef* node_def = graph_def->mutable_node(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
for (int f = 0; f < graph_def->library().function_size(); ++f) {
FunctionDef* func_def = graph_def->mutable_library()->mutable_function(f);
for (int n = 0; n < func_def->node_def_size(); ++n) {
NodeDef* node_def = func_def->mutable_node_def(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
}
return absl::OkStatus();
}
void StripDefaultAttributes(const OpRegistryInterface& op_registry,
protobuf::RepeatedPtrField<NodeDef>* nodes) {
for (int i = 0; i < nodes->size(); ++i) {
NodeDef* node = nodes->Mutable(i);
const OpDef* op_def;
const OpRegistrationData* op_reg_data = nullptr;
Status s = op_registry.LookUp(node->op(), &op_reg_data);
if (!s.ok()) {
VLOG(1) << "Ignoring encountered unknown operation "
<< SummarizeNodeDef(*node)
<< " when stripping default attributes. It is likely a function, "
"in which case ignoring it is fine";
continue;
}
op_def = &op_reg_data->op_def;
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.has_default_value()) {
AttrValueMap* attrs = node->mutable_attr();
const string& name = attr_def.name();
auto iter = attrs->find(name);
if (iter != attrs->end()) {
const AttrValue& default_value = attr_def.default_value();
if (AreAttrValuesEqual(iter->second, default_value,
true)) {
attrs->erase(name);
}
}
}
}
}
}
void OpsUsedByGraph(const GraphDef& graph_def,
std::set<string>* ops_used_in_graph) {
std::unordered_map<string, const FunctionDef*> name_to_function;
for (const auto& function : graph_def.library().function()) {
name_to_function.insert(
std::make_pair(function.signature().name(), &function));
}
std::set<string> used_ops;
std::vector<const FunctionDef*> functions_to_process;
const auto mark_op_as_used = [&used_ops, &functions_to_process,
&name_to_function](const string& op) {
if (used_ops.insert(op).second) {
const auto it = name_to_function.find(op);
if (it != name_to_function.end()) {
functions_to_process.push_back(it->second);
}
}
};
for (const auto& node : graph_def.node()) {
mark_op_as_used(node.op());
}
while (!functions_to_process.empty()) {
const FunctionDef* fun = functions_to_process.back();
functions_to_process.pop_back();
for (const auto& node : fun->node_def()) {
mark_op_as_used(node.op());
}
}
ops_used_in_graph->clear();
for (const string& op_name : used_ops) {
if (name_to_function.find(op_name) == name_to_function.end()) {
ops_used_in_graph->insert(op_name);
}
}
}
Status StrippedOpListForGraph(const GraphDef& graph_def,
const OpRegistryInterface& op_registry,
OpList* stripped_op_list) {
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
stripped_op_list->clear_op();
for (const string& op_name : used_ops) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(op_name, &op_def));
OpDef* stripped_op = stripped_op_list->add_op();
stripped_op->CopyFrom(*op_def);
RemoveDescriptionsFromOpDef(stripped_op);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
Status FinalizeOpDef(const OpDefBuilder& b, OpDef* op_def) {
OpRegistrationData op_reg_data;
const Status s = b.Finalize(&op_reg_data);
*op_def = op_reg_data.op_def;
return s;
}
TEST(AddToGraphTest, MakeGraphDefWithNamespacedOpName) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("Project>SomeOp"), op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Project>SomeOp", ®istry)
.Finalize(graph_def.add_node()));
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeWithDefault) {
OpList op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("NoChangeWithDefault").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncwd", "NoChangeWithDefault", ®istry)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeNoDefault) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("NoChangeNoDefault").Attr("a: int"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncnd", "NoChangeNoDefault", ®istry)
.Attr("a", 42)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UsesDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &producer_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &consumer_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, ChangedFromDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("changed_from_default", "ChangedFromDefault",
&producer_registry)
.Attr("a", 9)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UnderscoreAttrs) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), producer_op_list.add_op()));
OpDef::AttrDef* attr = producer_op_list.mutable_op(0)->add_attr();
attr->set_name("_underscore");
attr->set_type("int");
attr->mutable_default_value()->set_i(17);
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Underscore", &producer_registry)
.Attr("_underscore", 17)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(op_attr_removed.size(), 0);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, HasFunction) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
*produced_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {{"a", 17}}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
OpList function_op_list;
*function_op_list.add_op() =
produced_graph_def.library().function(0).signature();
OpListOpRegistry function_registry(&function_op_list);
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
*expected_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(expected_graph_def.library().DebugString(),
produced_graph_def.library().DebugString());
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(StripDefaultAttributesTest, DefaultStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Finalize(graph_def.add_node()));
ASSERT_EQ(1, graph_def.node(0).attr_size());
ASSERT_EQ(12, graph_def.node(0).attr().at("a").i());
StripDefaultAttributes(registry, graph_def.mutable_node());
ASSERT_EQ(1, graph_def.node_size());
ASSERT_EQ(0, graph_def.node(0).attr_size());
}
TEST(StripDefaultAttributesTest, NonDefaultNotStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Attr("a", 9)
.Finalize(graph_def.add_node()));
GraphDef expected = graph_def;
StripDefaultAttributes(registry, graph_def.mutable_node());
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
TEST(StrippedOpListForGraphTest, FlatTest) {
OpList op_list;
for (const string& op : {"A", "B", "C", "D"}) {
OpDef* op_def = op_list.add_op();
op_def->set_name(op);
op_def->set_summary("summary");
op_def->set_description("description");
op_def->set_is_commutative(op == "B");
}
const string graph_ops[4][3] = {
{"C", "B", "B"}, {"B", "C", "B"}, {"B", "B", "C"}, {"C", "C", "B"}};
for (const bool use_function : {false, true}) {
for (int order = 0; order < 4; order++) {
GraphDef graph_def;
if (use_function) {
FunctionDef* function_def = graph_def.mutable_library()->add_function();
function_def->mutable_signature()->set_name("F");
for (const string& op : graph_ops[order]) {
function_def->add_node_def()->set_op(op);
}
graph_def.add_node()->set_op("F");
} else {
for (const string& op : graph_ops[order]) {
string name = strings::StrCat("name", graph_def.node_size());
NodeDef* node = graph_def.add_node();
node->set_name(name);
node->set_op(op);
}
}
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 2);
for (int i = 0; i < 2; i++) {
const OpDef& op = stripped_op_list.op(i);
EXPECT_EQ(op.name(), i ? "C" : "B");
EXPECT_EQ(op.summary(), "");
EXPECT_EQ(op.description(), "");
EXPECT_EQ(op.is_commutative(), !i);
}
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"B", "C"}), used_ops);
}
}
}
TEST(StrippedOpListForGraphTest, NestedFunctionTest) {
OpList op_list;
op_list.add_op()->set_name("A");
for (const bool recursive : {false, true}) {
GraphDef graph_def;
FunctionDef* b = graph_def.mutable_library()->add_function();
FunctionDef* c = graph_def.mutable_library()->add_function();
b->mutable_signature()->set_name("B");
c->mutable_signature()->set_name("C");
b->add_node_def()->set_op("A");
c->add_node_def()->set_op("B");
if (recursive) {
b->add_node_def()->set_op("B");
c->add_node_def()->set_op("C");
}
graph_def.add_node()->set_op("C");
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 1);
ASSERT_EQ(stripped_op_list.op(0).name(), "A");
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"A"}), used_ops);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3028451e-22e9-4606-b27f-2af99eba21ae | cpp | tensorflow/tensorflow | node_def_util | tensorflow/core/framework/node_def_util.cc | tensorflow/core/framework/node_def_util_test.cc | #include "tensorflow/core/framework/node_def_util.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/scanner.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char* const kColocationAttrName = "_class";
const char* const kColocationGroupPrefix = "loc:@";
const char* const kTpuExecuteStagingOp = "IdentityN";
const char* const kTpuExecuteStagingNodeName = "_variable_copy";
AttrSlice::AttrSlice() : ndef_(nullptr) {
static const AttrValueMap* const kEmptyAttrValueMap = new AttrValueMap;
attrs_ = kEmptyAttrValueMap;
}
AttrSlice::AttrSlice(const NodeDef& node_def)
: ndef_(&node_def), attrs_(nullptr) {}
AttrSlice::AttrSlice(const AttrValueMap* a) : ndef_(nullptr), attrs_(a) {}
string SummarizeAttrsHelper(AttrSlice attrs, StringPiece device) {
string ret;
std::vector<string> attr_names;
attr_names.reserve(attrs.size());
for (const auto& attr : attrs) {
attr_names.push_back(attr.first);
}
std::sort(attr_names.begin(), attr_names.end());
bool first = true;
for (const string& attr_name : attr_names) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, attr_name, "=",
SummarizeAttrValue(*attrs.Find(attr_name)));
}
if (!device.empty()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
strings::StrAppend(&ret, "_device=\"", device, "\"");
}
return ret;
}
string AttrSlice::SummarizeNode() const {
return ndef_ ? SummarizeNodeDef(*ndef_)
: strings::StrCat(
"[", SummarizeAttrsHelper(*this, StringPiece()), "]");
}
string AttrSlice::DebugString() const {
std::vector<string> attr_key_vals;
attr_key_vals.reserve(attrs()->size());
for (const auto& it : *this) {
const string& name = it.first;
const AttrValue& attr_value = it.second;
attr_key_vals.push_back(
absl::StrCat(name, "=", SummarizeAttrValue(attr_value)));
}
return absl::StrJoin(attr_key_vals, ", ");
}
string SummarizeNodeDef(const NodeDef& node_def, int max_inputs_in_summary) {
string ret = strings::StrCat(errors::FormatNodeNameForError(node_def.name()),
" = ", node_def.op(), "[");
strings::StrAppend(&ret, SummarizeAttrsHelper(node_def, node_def.device()));
strings::StrAppend(&ret, "](");
bool first = true;
for (const string& input : node_def.input()) {
if (!first) strings::StrAppend(&ret, ", ");
first = false;
if (max_inputs_in_summary-- == 0) {
strings::StrAppend(&ret, "...");
break;
}
strings::StrAppend(&ret, input);
}
strings::StrAppend(&ret, ")");
return ret;
}
string SummarizeAttrs(const NodeDef& node_def) {
return SummarizeAttrsHelper(node_def, node_def.device());
}
string FormatNodeDefForError(
StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info) {
return !has_experimental_debug_info ||
experimental_debug_info.original_node_names().empty()
? errors::FormatNodeNameForError(string(node_name))
: errors::FormatOriginalNodeLocationForError(
experimental_debug_info.original_node_names(),
experimental_debug_info.original_func_names());
}
string FormatNodeDefForError(const NodeDef& node_def) {
return FormatNodeDefForError(node_def.name(),
node_def.has_experimental_debug_info(),
node_def.experimental_debug_info());
}
const AttrValue* AttrSlice::Find(StringPiece attr_name) const {
for (const auto& attr : *attrs()) {
if (attr.first == attr_name) {
return &attr.second;
}
}
return nullptr;
}
const AttrValue* AttrSlice::FindByString(const string& attr_name) const {
auto iter = attrs()->find(attr_name);
if (iter != attrs()->end()) {
return &iter->second;
} else {
return nullptr;
}
}
Status AttrSlice::CheckFind(StringPiece attr_name,
const AttrValue* attr_value) const {
if (attr_value != nullptr) {
return absl::OkStatus();
}
Status s = errors::NotFound("No attr named '", attr_name, "' in NodeDef:");
if (!absl::StartsWith(attr_name, "_") && ndef_ != nullptr) {
s = AttachDef(s, *ndef_);
}
return s;
}
Status AttrSlice::Find(StringPiece attr_name,
const AttrValue** attr_value) const {
*attr_value = Find(attr_name);
return CheckFind(attr_name, *attr_value);
}
Status AttrSlice::FindByString(const string& attr_name,
const AttrValue** attr_value) const {
*attr_value = FindByString(attr_name);
return CheckFind(attr_name, *attr_value);
}
bool AttrSlice::EqualAttrs(AttrSlice other, Scratch* scratch) const {
if (size() != other.size()) return false;
for (const auto& attr : *other.attrs()) {
auto iter = attrs()->find(attr.first);
if (iter == attrs()->end()) return false;
iter->second.SerializeToString(&scratch->a);
attr.second.SerializeToString(&scratch->b);
if (scratch->a != scratch->b) return false;
}
return true;
}
#define DEFINE_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, ATTR_TYPE)); \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return OkStatus(); \
} \
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value; \
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value)); \
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")")); \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return OkStatus(); \
}
#define DEFINE_TRY_GET_ATTR(TYPE, FIELD, ATTR_TYPE, APPEND_OP, CAST, ...) \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
TYPE* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, ATTR_TYPE); \
if (!s.ok()) { \
return false; \
} \
const auto& v = attr_value->FIELD(); \
__VA_ARGS__; \
*value = CAST; \
return true; \
} \
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name, \
std::vector<TYPE>* value) { \
const AttrValue* attr_value = attrs.Find(attr_name); \
if (attr_value == nullptr) { \
return false; \
} \
Status s = AttrValueHasType(*attr_value, "list(" ATTR_TYPE ")"); \
if (!s.ok()) { \
return false; \
} \
value->reserve(attr_value->list().FIELD().size()); \
for (const auto& v : attr_value->list().FIELD()) { \
__VA_ARGS__; \
value->APPEND_OP(CAST); \
} \
return true; \
}
DEFINE_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(tstring, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(string, s, "string", emplace_back, v, ;)
DEFINE_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(int64_t, i, "int", emplace_back, v, ;)
DEFINE_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
return errors::InvalidArgument("Attr ", attr_name, " has value ", v,
" out of range for an int32");
})
DEFINE_TRY_GET_ATTR(
int32, i, "int", emplace_back, static_cast<int32>(v),
if (static_cast<int64_t>(static_cast<int32>(v)) != v) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has value " << v
<< " out of range for an int32";
}
return false;
})
DEFINE_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(float, f, "float", emplace_back, v, ;)
DEFINE_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_TRY_GET_ATTR(bool, b, "bool", emplace_back, v, ;)
DEFINE_GET_ATTR(DataType, type, "type", emplace_back, static_cast<DataType>(v),
;)
DEFINE_TRY_GET_ATTR(DataType, type, "type", emplace_back,
static_cast<DataType>(v),
;)
DEFINE_GET_ATTR(TensorShapeProto, shape, "shape", emplace_back, v, ;)
DEFINE_GET_ATTR(TensorShape, shape, "shape", emplace_back, TensorShape(v),
TF_RETURN_IF_ERROR(TensorShape::IsValidShape(v));)
DEFINE_TRY_GET_ATTR(
TensorShape, shape, "shape", emplace_back, TensorShape(v),
if (!TensorShape::IsValidShape(v).ok()) {
static int log_counter = 0;
if (log_counter < 10) {
log_counter++;
LOG(WARNING) << "Attr " << attr_name << " has invalid shape value "
<< v.DebugString();
}
return false;
})
DEFINE_GET_ATTR(PartialTensorShape, shape, "shape", emplace_back,
PartialTensorShape(v),
TF_RETURN_IF_ERROR(PartialTensorShape::IsValidShape(v));)
DEFINE_GET_ATTR(
Tensor, tensor, "tensor", emplace_back, t, Tensor t; if (!t.FromProto(v)) {
return errors::InvalidArgument("Attr ", attr_name, " has value ",
v.ShortDebugString(),
" that can't be converted to a Tensor");
})
DEFINE_GET_ATTR(NameAttrList, func, "func", emplace_back, v, ;);
#undef DEFINE_GET_ATTR
bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name) {
return node_def.attr().find(string(attr_name)) != node_def.attr().end();
}
static const string& kEmptyString = *new string();
const string& GetNodeAttrString(const AttrSlice& attrs, StringPiece attr_name) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return kEmptyString;
}
Status s = AttrValueHasType(*attr_value, "string");
if (!s.ok()) {
return kEmptyString;
}
return attr_value->s();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const string*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(string)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().s().size());
for (const auto& v : attr_value->list().s()) {
value->push_back(&v);
}
return true;
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
std::vector<const TensorShapeProto*>* value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "list(shape)");
if (!s.ok()) {
return false;
}
value->reserve(attr_value->list().shape().size());
for (const auto& v : attr_value->list().shape()) {
value->push_back(&v);
}
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
DataTypeVector* value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "list(type)"));
for (const auto& v : attr_value->list().type()) {
value->push_back(static_cast<DataType>(v));
}
return absl::OkStatus();
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "tensor"));
*value = &attr_value->tensor();
return absl::OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const TensorProto** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "tensor");
if (!s.ok()) {
return false;
}
*value = &attr_value->tensor();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(attr_name, &attr_value));
TF_RETURN_IF_ERROR(AttrValueHasType(*attr_value, "func"));
*value = &attr_value->func();
return absl::OkStatus();
}
bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
const NameAttrList** value) {
const AttrValue* attr_value = attrs.Find(attr_name);
if (attr_value == nullptr) {
return false;
}
Status s = AttrValueHasType(*attr_value, "func");
if (!s.ok()) {
return false;
}
*value = &attr_value->func();
return true;
}
Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
Padding* value) {
string str_value;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, attr_name, &str_value));
return GetPaddingFromString(str_value, value);
}
namespace {
template <class NodeDefOrAttrSlice>
Status AddArgToSig(const NodeDefOrAttrSlice& node_or_attrs,
const OpDef::ArgDef& arg_def, DataTypeVector* sig) {
const int original_size = sig->size();
if (!arg_def.number_attr().empty()) {
int64_t repeats = -1;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.number_attr(), &repeats));
if (static_cast<int64_t>(static_cast<int32>(repeats)) != repeats) {
return errors::InvalidArgument("Number of outputs is too big: ", repeats);
}
if (repeats < 0) {
return errors::InvalidArgument("Value for number_attr() ", repeats,
" < 0");
}
if (!arg_def.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(node_or_attrs, arg_def.type_attr(), &dtype));
for (int i = 0; i < repeats; ++i) {
sig->push_back(dtype);
}
} else if (arg_def.type() != DT_INVALID) {
for (int i = 0; i < repeats; ++i) {
sig->push_back(arg_def.type());
}
} else {
return errors::InvalidArgument("Missing type or type_attr field in ",
arg_def.ShortDebugString());
}
} else if (!arg_def.type_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(AttrSlice(node_or_attrs)
.FindByString(arg_def.type_attr(), &attr_value));
sig->push_back(attr_value->type());
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
AttrSlice(node_or_attrs)
.FindByString(arg_def.type_list_attr(), &attr_value));
for (int dtype : attr_value->list().type()) {
sig->push_back(static_cast<DataType>(dtype));
}
} else if (arg_def.type() != DT_INVALID) {
sig->push_back(arg_def.type());
} else {
return errors::InvalidArgument("No type fields in ",
arg_def.ShortDebugString());
}
if (arg_def.is_ref()) {
for (size_t i = original_size; i < sig->size(); ++i) {
if (IsRefType((*sig)[i])) {
return errors::InvalidArgument(
"Requested reference to a reference type: ",
arg_def.ShortDebugString());
}
(*sig)[i] = MakeRefType((*sig)[i]);
}
}
return absl::OkStatus();
}
}
Status InputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int input_port, DataType* input_type) {
DataTypeVector input_types;
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &input_types));
int input_types_size = input_types.size();
if (input_types_size > input_port) {
const DataType dtype = input_types[input_port];
*input_type = dtype;
return absl::OkStatus();
}
}
return errors::InvalidArgument("Input ", input_port, " not found for node ",
node_def.name());
}
Status InputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs) {
for (const auto& arg : op_def.input_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, inputs));
}
return absl::OkStatus();
}
Status OutputTypeForNode(const NodeDef& node_def, const OpDef& op_def,
int output_port, DataType* output_type) {
DataTypeVector output_types;
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, &output_types));
int output_types_size = output_types.size();
if (output_types_size > output_port) {
const DataType dtype = output_types[output_port];
*output_type = dtype;
return absl::OkStatus();
}
}
return errors::InvalidArgument("Output ", output_port, " not found for node ",
node_def.name());
}
Status OutputTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(node_def, arg, outputs));
}
return absl::OkStatus();
}
Status OutputTypesForNode(const AttrSlice& attrs, const OpDef& op_def,
DataTypeVector* outputs) {
for (const auto& arg : op_def.output_arg()) {
TF_RETURN_IF_ERROR(AddArgToSig(attrs, arg, outputs));
}
return absl::OkStatus();
}
Status InOutTypesForNode(const NodeDef& node_def, const OpDef& op_def,
DataTypeVector* inputs, DataTypeVector* outputs) {
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, op_def, inputs));
return OutputTypesForNode(node_def, op_def, outputs);
}
Status NumOutputsForNode(const NodeDef& node_def, const OpDef& op_def,
int* num_outputs) {
DataTypeVector outputs;
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, op_def, &outputs));
*num_outputs = outputs.size();
return absl::OkStatus();
}
int OpPortIdToArgId(const NodeDef& node,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
int port_id) {
for (int arg_id = 0; arg_id < args.size(); ++arg_id) {
if (port_id < 0) {
return -1;
} else if (port_id == 0) {
return arg_id;
}
int n = 1;
const auto& arg = args.Get(arg_id);
if (!arg.number_attr().empty()) {
n = node.attr().at(arg.number_attr()).i();
} else if (!arg.type_list_attr().empty()) {
n = node.attr().at(arg.type_list_attr()).list().type_size();
}
if (n < 0) {
DCHECK_GE(n, 0);
return -1;
} else if (port_id < n) {
return arg_id;
}
port_id -= n;
}
return -1;
}
Status ValidateNodeDef(const NodeDef& node_def, const OpDef& op_def) {
if (node_def.op() != op_def.name()) {
return errors::InvalidArgument(
"NodeDef op '", node_def.op(), "' does not match ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
bool seen_control = false;
size_t num_inputs = 0;
for (const string& input : node_def.input()) {
if (absl::StartsWith(input, "^")) {
seen_control = true;
if (input.find(':') != string::npos) {
return errors::InvalidArgument("Control input '", input,
"' must not have ':' in NodeDef: ",
FormatNodeDefForError(node_def));
}
} else if (seen_control) {
return errors::InvalidArgument("Non-control input '", input,
"' after control input in NodeDef: ",
FormatNodeDefForError(node_def));
} else {
++num_inputs;
}
}
std::unordered_map<string, const OpDef::AttrDef*> op_attrs;
for (const auto& attr : op_def.attr()) {
if (!gtl::InsertIfNotPresent(&op_attrs, attr.name(), &attr)) {
return errors::InvalidArgument("OpDef has duplicate attr name '",
attr.name(),
"': ", SummarizeOpDef(op_def));
}
}
for (const auto& attr : node_def.attr()) {
if (absl::StartsWith(attr.first, "_")) {
continue;
}
auto iter = op_attrs.find(attr.first);
if (iter == op_attrs.end()) {
LOG_EVERY_N_SEC(ERROR, 5)
<< "NodeDef mentions attribute " << attr.first
<< " which is not in the op definition: " << SummarizeOpDef(op_def)
<< " This may be expected if your graph generating binary is newer "
<< " than this binary. Unknown attributes will be ignored."
<< " NodeDef: " << FormatNodeDefForError(node_def);
continue;
}
if (attr.second.placeholder().empty()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
ValidateAttrValue(attr.second, *iter->second),
"; NodeDef: ", FormatNodeDefForError(node_def), "; ",
SummarizeOpDef(op_def));
}
op_attrs.erase(iter);
}
if (!op_attrs.empty()) {
string attrs;
for (const auto& attr_pair : op_attrs) {
if (!attrs.empty()) strings::StrAppend(&attrs, "', '");
strings::StrAppend(&attrs, attr_pair.first);
}
return errors::InvalidArgument(
"NodeDef missing attr", op_attrs.size() == 1 ? " '" : "s '", attrs,
"' from ", SummarizeOpDef(op_def),
"; NodeDef: ", FormatNodeDefForError(node_def));
}
DataTypeVector inputs, outputs;
TF_RETURN_IF_ERROR(InOutTypesForNode(node_def, op_def, &inputs, &outputs));
if (num_inputs != inputs.size()) {
return errors::InvalidArgument(
"NodeDef expected inputs '", DataTypeVectorString(inputs),
"' do not match ", num_inputs, " inputs specified; ",
SummarizeOpDef(op_def), "; NodeDef: ", FormatNodeDefForError(node_def));
}
return absl::OkStatus();
}
namespace {
Status ComputeArgRange(const AttrSlice& attrs, const OpDef::ArgDef& arg_def,
const OpDef& op_def, int* num) {
if (!arg_def.number_attr().empty()) {
return GetNodeAttr(attrs, arg_def.number_attr(), num);
} else if (!arg_def.type_list_attr().empty()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(attrs.Find(arg_def.type_list_attr(), &attr_value));
*num = attr_value->list().type_size();
} else if (!arg_def.type_attr().empty() || arg_def.type() != DT_INVALID) {
*num = 1;
} else {
return errors::InvalidArgument(
"Argument '", arg_def.name(),
"' incorrectly specified in op definition: ", SummarizeOpDef(op_def));
}
return absl::OkStatus();
}
Status NameRangesHelper(const AttrSlice& attrs,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
const OpDef& op_def, NameRangeMap* result) {
int start = 0;
int num;
for (const auto& arg : args) {
TF_RETURN_IF_ERROR(ComputeArgRange(attrs, arg, op_def, &num));
(*result)[arg.name()] = std::make_pair(start, start + num);
start += num;
}
return absl::OkStatus();
}
}
Status NameRangesForNode(const AttrSlice& attrs, const OpDef& op_def,
NameRangeMap* inputs, NameRangeMap* outputs) {
if (inputs != nullptr) {
TF_RETURN_IF_ERROR(
NameRangesHelper(attrs, op_def.input_arg(), op_def, inputs));
}
if (outputs != nullptr) {
return NameRangesHelper(attrs, op_def.output_arg(), op_def, outputs);
}
return absl::OkStatus();
}
void AddDefaultsToNodeDef(const OpDef& op_def, NodeDef* node_def) {
for (const auto& attr_def : op_def.attr()) {
AttrSlice attrs(*node_def);
if (attr_def.has_default_value() && !attrs.Find(attr_def.name())) {
AddNodeAttr(attr_def.name(), attr_def.default_value(), node_def);
}
}
}
void StripDefaultsFromNodeDef(const OpDef& op_def, NodeDef* node_def) {
AttrSlice attrs(*node_def);
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
const AttrValue* attr = attrs.Find(attr_def.name());
if (attr && AreAttrValuesEqual(*attr, attr_def.default_value()))
node_def->mutable_attr()->erase(attr_def.name());
}
}
}
namespace {
using ::tensorflow::tstring;
using ::tensorflow::strings::Scanner;
bool IsValidNodeName(StringPiece sp) {
Scanner scanner(sp);
scanner.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
bool IsValidDataInputName(StringPiece sp) {
Scanner scan(sp);
scan.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
if (scan.Peek() == ':') {
scan.OneLiteral(":");
if (scan.Peek() == '0') {
scan.OneLiteral("0");
} else {
scan.Many(Scanner::DIGIT);
}
} else {
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
}
bool IsValidControlInputName(StringPiece sp) {
Scanner scan(sp);
scan.OneLiteral("^")
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scan.GetResult())
return false;
if (scan.empty())
return true;
scan.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
const StringPiece kColocationGroupPrefixStringPiece(kColocationGroupPrefix);
}
Status ValidateOpInput(const string& input_name, bool* is_control_input) {
*is_control_input = false;
if (IsValidDataInputName(input_name)) {
return absl::OkStatus();
} else if (IsValidControlInputName(input_name)) {
*is_control_input = true;
return absl::OkStatus();
} else {
return errors::InvalidArgument("Illegal op input name '", input_name, "'");
}
}
Status ValidateNodeName(const string& node_name) {
if (IsValidNodeName(node_name)) {
return absl::OkStatus();
} else {
return errors::InvalidArgument("Illegal op name '", node_name, "'");
}
}
Status ValidateExternalNodeDefSyntax(const NodeDef& node_def) {
Status s = ValidateNodeName(node_def.name());
if (!s.ok()) {
return AttachDef(s, node_def);
}
bool in_control_inputs = false;
for (const string& input_name : node_def.input()) {
bool is_control_input;
s = ValidateOpInput(input_name, &is_control_input);
if (!s.ok()) {
return AttachDef(s, node_def);
}
if (in_control_inputs && !is_control_input) {
return AttachDef(errors::InvalidArgument(
"All control inputs must follow all data inputs"),
node_def);
}
in_control_inputs = is_control_input;
}
return absl::OkStatus();
}
Status AttachDef(const Status& status, const NodeDef& node_def,
bool allow_multiple_formatted_node) {
string node_error;
if (!allow_multiple_formatted_node &&
absl::StrContains(status.message(), "{{node ")) {
node_error = node_def.name();
} else {
node_error = FormatNodeDefForError(node_def);
}
return errors::CreateWithUpdatedMessage(
status,
strings::StrCat(status.message(), "\n\t", " [[", node_error, "]]"));
}
void AddNodeAttr(StringPiece name, const AttrValue& value, NodeDef* node_def) {
node_def->mutable_attr()->insert(
AttrValueMap::value_type(string(name), value));
}
void AddNodeAttr(StringPiece name, AttrValue&& value, NodeDef* node_def) {
(*node_def->mutable_attr())[string(name)] = std::move(value);
}
#define ADD_NODE_ATTR(T) \
void AddNodeAttr(StringPiece name, T value, NodeDef* node_def) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
AddNodeAttr(name, attr_value, node_def); \
}
ADD_NODE_ATTR(StringPiece)
ADD_NODE_ATTR(const char*)
ADD_NODE_ATTR(int32_t)
ADD_NODE_ATTR(int64_t)
ADD_NODE_ATTR(float)
ADD_NODE_ATTR(double)
ADD_NODE_ATTR(bool)
ADD_NODE_ATTR(DataType)
ADD_NODE_ATTR(const PartialTensorShape&)
ADD_NODE_ATTR(const Tensor&)
ADD_NODE_ATTR(const TensorProto&)
ADD_NODE_ATTR(const NameAttrList&)
ADD_NODE_ATTR(absl::Span<const StringPiece>)
ADD_NODE_ATTR(absl::Span<const char* const>)
ADD_NODE_ATTR(absl::Span<const string>)
ADD_NODE_ATTR(absl::Span<const int32>)
ADD_NODE_ATTR(absl::Span<const int64_t>)
ADD_NODE_ATTR(absl::Span<const float>)
ADD_NODE_ATTR(absl::Span<const bool>)
ADD_NODE_ATTR(const std::vector<bool>&)
ADD_NODE_ATTR(absl::Span<const DataType>)
ADD_NODE_ATTR(absl::Span<const TensorShape>)
ADD_NODE_ATTR(absl::Span<const PartialTensorShape>)
ADD_NODE_ATTR(absl::Span<const TensorShapeProto>)
ADD_NODE_ATTR(absl::Span<const Tensor>)
ADD_NODE_ATTR(absl::Span<const NameAttrList>)
#undef ADD_NODE_ATTR
void AddAttr(StringPiece name, const AttrValue& value, AttrValueMap* map) {
map->insert(AttrValueMap::value_type(string(name), value));
}
#define ADD_ATTR(T) \
void AddAttr(StringPiece name, T value, AttrValueMap* map) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
AddAttr(name, attr_value, map); \
}
ADD_ATTR(bool)
#undef ADD_ATTR
Status AddPrefixAndSuffixToNode(StringPiece prefix, StringPiece suffix,
NodeDef* node_def, bool uniquify_frame_name) {
node_def->set_name(strings::StrCat(prefix, node_def->name(), suffix));
if (uniquify_frame_name &&
(node_def->op() == "Enter" || node_def->op() == "RefEnter")) {
string frame_name;
TF_RETURN_IF_ERROR(GetNodeAttr(*node_def, "frame_name", &frame_name));
AttrValue& attr = (*node_def->mutable_attr())["frame_name"];
frame_name = strings::StrCat(prefix, frame_name, suffix);
attr.set_s(frame_name);
}
return absl::OkStatus();
}
Status MaybeAddPrefixToColocationConstraints(
const std::unordered_set<string>& match, StringPiece prefix,
NodeDef* node_def) {
auto attr = node_def->mutable_attr()->find(kColocationAttrName);
if (attr == node_def->mutable_attr()->end()) {
return absl::OkStatus();
}
auto constraints_list = attr->second.mutable_list();
auto constraints_size = constraints_list->s_size();
for (size_t i = 0; i < constraints_size; ++i) {
StringPiece original(constraints_list->s(i));
if (absl::ConsumePrefix(&original, kColocationGroupPrefixStringPiece)) {
if (match.find(string(original)) != match.end()) {
(*constraints_list->mutable_s(i)) =
strings::StrCat(kColocationGroupPrefix, prefix, original);
}
}
}
return absl::OkStatus();
}
Status MaybeUpdateColocationConstraintsWithMap(
const std::map<absl::string_view, absl::string_view>& node_name_map,
NodeDef* node_def) {
auto attr = node_def->mutable_attr()->find(kColocationAttrName);
if (attr == node_def->mutable_attr()->end()) {
return absl::OkStatus();
}
auto constraints_list = attr->second.mutable_list();
auto constraints_size = constraints_list->s_size();
for (size_t i = 0; i < constraints_size; ++i) {
StringPiece original(constraints_list->s(i));
if (absl::ConsumePrefix(&original, kColocationGroupPrefixStringPiece)) {
if (node_name_map.find(original) != node_name_map.end()) {
(*constraints_list->mutable_s(i)) =
strings::StrCat(kColocationGroupPrefix, node_name_map.at(original));
}
}
}
return absl::OkStatus();
}
void ChangeToNoOp(NodeDef* node_def) {
node_def->set_op("NoOp");
node_def->clear_experimental_type();
}
} | #include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
OpDef ToOpDef(const OpDefBuilder& builder) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(builder.Finalize(&op_reg_data));
return op_reg_data.op_def;
}
NodeDef ToNodeDef(const string& text) {
NodeDef node_def;
EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def));
return node_def;
}
NodeDef ToNodeDef(NodeDefBuilder&& builder) {
NodeDef node_def;
TF_EXPECT_OK(builder.Finalize(&node_def));
return node_def;
}
void ExpectSuccess(const NodeDef& good, const OpDef& op_def) {
EXPECT_EQ(absl::OkStatus(), ValidateNodeDef(good, op_def))
<< "NodeDef: " << SummarizeNodeDef(good)
<< "; OpDef: " << SummarizeOpDef(op_def);
}
void ExpectFailure(const NodeDef& bad, const OpDef& op_def,
const string& message) {
Status status = ValidateNodeDef(bad, op_def);
EXPECT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
if (status.ok()) return;
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def);
LOG(INFO) << "Message: " << status.message();
EXPECT_TRUE(absl::StrContains(status.ToString(), message))
<< "NodeDef: " << SummarizeNodeDef(bad)
<< "; OpDef: " << SummarizeOpDef(op_def) << "\nActual error: " << status
<< "\nDoes not contain: " << message;
}
TEST(NodeDefUtilTest, In) {
const OpDef op = ToOpDef(OpDefBuilder("In").Input("i: T").Attr("T: type"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'In'
input: 'a'
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = In[T=DT_FLOAT](a)", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.set_op("Wrong");
ExpectFailure(bad, op, "NodeDef op 'Wrong' does not match Op<name=In;");
bad = node_def;
bad.clear_attr();
ExpectFailure(bad, op, "NodeDef missing attr 'T' from Op<name=In;");
bad = node_def;
bad.clear_attr();
AddNodeAttr("T", 17, &bad);
ExpectFailure(
bad, op,
"AttrValue had value with type 'int' when 'type' expected\n\t for attr "
"'T'\n\t; NodeDef: ");
bad = node_def;
bad.add_input("b");
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 2 inputs specified;");
bad = node_def;
bad.clear_input();
ExpectFailure(
bad, op,
"NodeDef expected inputs 'float' do not match 0 inputs specified;");
NodeDef good = node_def;
good.add_input("^b");
ExpectSuccess(node_def, op);
bad = node_def;
bad.clear_input();
bad.add_input("^b");
bad.add_input("a");
ExpectFailure(bad, op,
"Non-control input 'a' after control input "
"in NodeDef:");
bad = node_def;
bad.add_input("^b:0");
ExpectFailure(bad, op, "Control input '^b:0' must not have ':' in NodeDef:");
}
TEST(NodeDefUtilTest, Out) {
const OpDef op =
ToOpDef(OpDefBuilder("Out").Output("o: T").Attr("T: numbertype"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Out'
attr {
key: 'T'
value { type: DT_INT32 }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Out[T=DT_INT32]()", SummarizeNodeDef(node_def));
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("T", DT_STRING, &bad);
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double, int32, uint8, int16, int8, complex64, "
"int64, qint8, quint8, qint32, bfloat16, qint16, quint16, "
"uint16, complex128, "
"half, uint32, uint64");
}
TEST(NodeDefUtilTest, Enum) {
const OpDef op = ToOpDef(OpDefBuilder("Enum").Attr("e: {'apple','orange'}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'Enum'
attr {
key: 'e'
value { s: 'apple' }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = Enum[e=\"apple\"]()", SummarizeNodeDef(node_def));
NodeDef good = node_def;
good.clear_attr();
AddNodeAttr("e", "orange", &good);
ExpectSuccess(good, op);
NodeDef bad = node_def;
bad.clear_attr();
AddNodeAttr("e", "foo", &bad);
ExpectFailure(bad, op,
"Value for attr 'e' of \"foo\" is not in the list of allowed "
"values: \"apple\", \"orange\"");
}
TEST(NodeDefUtilTest, SameIn) {
const OpDef op = ToOpDef(OpDefBuilder("SameIn")
.Input("i: N * T")
.Attr("N: int >= 2")
.Attr("T: {float,double}"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_DOUBLE }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = SameIn[N=2, T=DT_DOUBLE](a, b)",
SummarizeNodeDef(node_def));
NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 2 }
}
attr {
key: 'T'
value { type: DT_STRING }
}
)pb");
ExpectFailure(bad, op,
"Value for attr 'T' of string is not in the list of allowed "
"values: float, double");
bad = ToNodeDef(R"pb(
name: 'n'
op: 'SameIn'
input: 'a'
input: 'b'
attr {
key: 'N'
value { i: 1 }
}
attr {
key: 'T'
value { type: DT_FLOAT }
}
)pb");
ExpectFailure(bad, op, "Value for attr 'N' of 1 must be at least minimum 2");
}
TEST(NodeDefUtilTest, AnyIn) {
const OpDef op =
ToOpDef(OpDefBuilder("AnyIn").Input("i: T").Attr("T: list(type) >= 1"));
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectSuccess(node_def, op);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a, b)",
SummarizeNodeDef(node_def));
const NodeDef bad = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value { list {} }
}
)pb");
ExpectFailure(bad, op, "Length for attr 'T' of 0 must be at least minimum 1");
const NodeDef bad2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
attr {
key: 'T'
value {}
}
)pb");
ExpectFailure(bad2, op,
"Length for attr 'T' of 0 must be at least minimum 1");
}
TEST(NodeDefUtilTest, Device) {
const OpDef op_def1 = ToOpDef(OpDefBuilder("None"));
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("d", &op_def1).Device("/cpu:17")));
ExpectSuccess(node_def1, op_def1);
EXPECT_EQ("{{node d}} = None[_device=\"/cpu:17\"]()",
SummarizeNodeDef(node_def1));
const OpDef op_def2 = ToOpDef(OpDefBuilder("WithAttr").Attr("v: int"));
const NodeDef node_def2 = ToNodeDef(
std::move(NodeDefBuilder("d", &op_def2).Attr("v", 7).Device("/cpu:5")));
ExpectSuccess(node_def2, op_def2);
EXPECT_EQ("{{node d}} = WithAttr[v=7, _device=\"/cpu:5\"]()",
SummarizeNodeDef(node_def2));
}
void ExpectValidSyntax(const NodeDef& good) {
EXPECT_EQ(absl::OkStatus(), ValidateExternalNodeDefSyntax(good))
<< "NodeDef: " << SummarizeNodeDef(good);
}
void ExpectInvalidSyntax(const NodeDef& bad, const string& message) {
Status status = ValidateExternalNodeDefSyntax(bad);
ASSERT_FALSE(status.ok()) << "NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< status << "; NodeDef: " << SummarizeNodeDef(bad);
EXPECT_TRUE(absl::StrContains(StringPiece(status.ToString()), message))
<< "NodeDef: " << SummarizeNodeDef(bad) << ", " << status << ", "
<< message;
}
TEST(NodeDefUtilTest, ValidSyntax) {
const NodeDef node_def = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def);
const NodeDef node_def_namespace = ToNodeDef(R"pb(
name: 'n'
op: 'Project>AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_namespace);
const NodeDef node_def_explicit_inputs = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:0'
input: 'b:123'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs);
EXPECT_EQ("{{node n}} = AnyIn[T=[DT_INT32, DT_STRING]](a:0, b:123)",
SummarizeNodeDef(node_def_explicit_inputs));
const NodeDef node_def_explicit_inputs_namespace = ToNodeDef(R"pb(
name: 'Project>n'
op: 'Project>AnyIn'
input: 'Project>a:0'
input: 'Project>b:123'
input: '^Project>c'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_explicit_inputs_namespace);
EXPECT_EQ(
"{{node Project>n}} = Project>AnyIn[T=[DT_INT32, DT_STRING]]"
"(Project>a:0, Project>b:123, ^Project>c)",
SummarizeNodeDef(node_def_explicit_inputs_namespace));
const NodeDef node_def_partial_shape = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
attr {
key: 'shp'
value {
shape {
dim { size: -1 }
dim { size: 0 }
}
}
}
)pb");
ExpectValidSyntax(node_def_partial_shape);
const NodeDef node_def_control_input = ToNodeDef(R"pb(
name: 'n-'
op: 'AnyIn'
input: 'a'
input: '^b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectValidSyntax(node_def_control_input);
const NodeDef node_def_invalid_name = ToNodeDef(R"pb(
name: 'n:0'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_name, "Illegal op name 'n:0'");
const NodeDef node_def_internal_name = ToNodeDef(R"pb(
name: '_n'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_name, "Illegal op name '_n'");
const NodeDef node_def_slash_in_name = ToNodeDef(R"pb(
name: 'n\\'
op: 'AnyIn'
input: 'a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_slash_in_name, "Illegal op name 'n\\'");
const NodeDef node_def_internal_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '_a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_internal_input_name,
"Illegal op input name '_a'");
const NodeDef node_def_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a\\'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_input_name_slash, "Illegal op input name 'a\\'");
const NodeDef node_def_invalid_control_input_name = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b:0'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_invalid_control_input_name,
"Illegal op input name '^b:0'");
const NodeDef node_def_control_input_name_slash = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a'
input: '^b\\'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_control_input_name_slash,
"Illegal op input name '^b\\'");
const NodeDef node_def_data_input_after_control = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: '^a'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_after_control,
"All control inputs must follow all data inputs");
const NodeDef node_def_data_input_invalid_port = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:b'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port,
"Illegal op input name 'a:b");
const NodeDef node_def_data_input_invalid_port2 = ToNodeDef(R"pb(
name: 'n'
op: 'AnyIn'
input: 'a:00'
input: 'b'
attr {
key: 'T'
value { list { type: [ DT_INT32, DT_STRING ] } }
}
)pb");
ExpectInvalidSyntax(node_def_data_input_invalid_port2,
"Illegal op input name 'a:00");
}
TEST(InputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(InputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_FLOAT);
EXPECT_EQ(types[1], DT_INT32);
DataType type;
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_FLOAT);
EXPECT_TRUE(InputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_INT32);
EXPECT_FALSE(InputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(node_def, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
DataType type;
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 0, &type).ok());
EXPECT_EQ(type, DT_STRING);
EXPECT_TRUE(OutputTypeForNode(node_def, op_def, 1, &type).ok());
EXPECT_EQ(type, DT_BOOL);
EXPECT_FALSE(OutputTypeForNode(node_def, op_def, 2, &type).ok());
}
TEST(OutputTypesForNode, LargeOutput) {
const OpDef op_def = ToOpDef(OpDefBuilder("TestSplitOp")
.Input("value: int64")
.Output("output: num_split * int64")
.Attr("num_split: int >= 1"));
int64_t num_split = 1000000000000;
const NodeDef node_def =
ToNodeDef(std::move(NodeDefBuilder("test_split_op", &op_def)
.Input(FakeInput())
.Attr("num_split", num_split)));
DataTypeVector types;
EXPECT_FALSE(OutputTypesForNode(node_def, op_def, &types).ok());
}
TEST(OutputTypesForNode_AttrSliceOverload, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
const AttrSlice attr_slice =
AttrSlice(ToNodeDef(std::move(NodeDefBuilder("simple", &op_def)
.Input(FakeInput())
.Input(FakeInput()))));
DataTypeVector types;
EXPECT_TRUE(OutputTypesForNode(attr_slice, op_def, &types).ok());
EXPECT_EQ(types[0], DT_STRING);
EXPECT_EQ(types[1], DT_BOOL);
}
TEST(NameRangesForNodeTest, Simple) {
const OpDef op_def = ToOpDef(OpDefBuilder("Simple")
.Input("a: float")
.Input("b: int32")
.Output("c: string")
.Output("d: bool"));
NameRangeMap inputs, outputs;
const NodeDef node_def = ToNodeDef(std::move(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput())));
TF_EXPECT_OK(NameRangesForNode(node_def, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 2}}}), outputs);
EXPECT_EQ("{{node simple}} = Simple[](a, b)", SummarizeNodeDef(node_def));
OpDef bad_op_def = op_def;
bad_op_def.mutable_input_arg(0)->clear_type();
EXPECT_FALSE(NameRangesForNode(node_def, bad_op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, Polymorphic) {
const OpDef op_def = ToOpDef(OpDefBuilder("Polymorphic")
.Input("a: T")
.Input("b: T")
.Output("c: T")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_INT32](a, b)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(DT_BOOL))));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("{{node poly}} = Polymorphic[T=DT_BOOL](a, b)",
SummarizeNodeDef(node_def2));
}
TEST(NameRangesForNodeTest, NRepeats) {
const OpDef op_def = ToOpDef(OpDefBuilder("NRepeats")
.Input("a: N * int32")
.Input("b: N * T")
.Output("c: T")
.Output("d: N * string")
.Output("e: M * bool")
.Attr("N: int")
.Attr("M: int")
.Attr("T: type"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(4, DT_INT32))
.Input(FakeInput(4, DT_FLOAT))
.Attr("M", 3)));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 4}}, {"b", {4, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 5}}, {"e", {5, 8}}}),
outputs);
EXPECT_EQ(
"{{node nr}} = NRepeats[M=3, N=4, T=DT_FLOAT](a, a:1, a:2, a:3, b, b:1, "
"b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("nr", &op_def)
.Input(FakeInput(2, DT_INT32))
.Input(FakeInput(2, DT_DOUBLE))
.Attr("M", 7)));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 4}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ("{{node nr}} = NRepeats[M=7, N=2, T=DT_DOUBLE](a, a:1, b, b:1)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(NameRangesForNodeTest, TypeList) {
const OpDef op_def = ToOpDef(OpDefBuilder("TypeList")
.Input("a: T1")
.Input("b: T2")
.Output("c: T2")
.Output("d: T3")
.Output("e: T1")
.Attr("T1: list(type)")
.Attr("T2: list(type)")
.Attr("T3: list(type)"));
NameRangeMap inputs, outputs;
const NodeDef node_def1 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput({DT_BOOL, DT_FLOAT}))
.Input(FakeInput(4, DT_FLOAT))
.Attr("T3", {DT_INT32, DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 6}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 4}}, {"d", {4, 7}}, {"e", {7, 9}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_BOOL, DT_FLOAT],"
" T2=[DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT],"
" T3=[DT_INT32, DT_DOUBLE, DT_STRING]](a, a:1, b, b:1, b:2, b:3)",
SummarizeNodeDef(node_def1));
const NodeDef node_def2 =
ToNodeDef(std::move(NodeDefBuilder("tl", &op_def)
.Input(FakeInput(7, DT_INT32))
.Input(FakeInput({DT_DOUBLE}))
.Attr("T3", {DT_DOUBLE, DT_STRING})));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 7}}, {"b", {7, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
EXPECT_EQ(
"{{node tl}} = TypeList[T1=[DT_INT32, DT_INT32, DT_INT32, DT_INT32, "
"DT_INT32,"
" DT_INT32, DT_INT32], T2=[DT_DOUBLE], T3=[DT_DOUBLE, DT_STRING]]"
"(a, a:1, a:2, a:3, a:4, a:5, a:6, b)",
SummarizeNodeDef(node_def2));
NodeDef bad_node_def = node_def2;
bad_node_def.clear_attr();
EXPECT_FALSE(NameRangesForNode(bad_node_def, op_def, &inputs, &outputs).ok());
}
TEST(AddPrefixAndSuffixToNode, Enter) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
const string prefix = "prefix/";
const string suffix = "/suffix";
TF_ASSERT_OK(AddPrefixAndSuffixToNode(prefix, suffix, &node_def));
EXPECT_EQ("prefix/enter/suffix", node_def.name());
string frame_name;
TF_ASSERT_OK(GetNodeAttr(node_def, "frame_name", &frame_name));
EXPECT_EQ("prefix/test_frame/suffix", frame_name);
}
TEST(MaybeAddPrefixToColocationConstraints, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(
coloc_constraints,
std::vector<string>({"loc:@fn/Node1", "loc:@Node2", "loc:@fn/Node3"}));
}
TEST(MaybeAddPrefixToColocationConstraints, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::unordered_set<string> match;
match.insert("Node1");
match.insert("Node3");
TF_ASSERT_OK(MaybeAddPrefixToColocationConstraints(match, "fn/", &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(MaybeUpdateColocationConstraintsWithMap, Basic) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
AddNodeAttr(kColocationAttrName,
{strings::StrCat(kColocationGroupPrefix, "Node1"),
strings::StrCat(kColocationGroupPrefix, "Node2"),
strings::StrCat(kColocationGroupPrefix, "Node3")},
&node_def);
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
std::vector<string> coloc_constraints;
TF_ASSERT_OK(GetNodeAttr(node_def, kColocationAttrName, &coloc_constraints));
EXPECT_EQ(coloc_constraints,
std::vector<string>({"loc:@Node4", "loc:@Node2", "loc:@Node3"}));
}
TEST(MaybeUpdateColocationConstraintsWithMap, NoConstraints) {
NodeDef node_def;
node_def.set_name("Identity");
node_def.set_op("Identity");
std::map<absl::string_view, absl::string_view> node_map;
node_map["Node1"] = "Node4";
node_map["Invalid"] = "Node5";
TF_ASSERT_OK(MaybeUpdateColocationConstraintsWithMap(node_map, &node_def));
EXPECT_FALSE(HasNodeAttr(node_def, kColocationAttrName));
}
TEST(FormatNodeForErrorTest, Node) {
Graph g(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("enter", "NoOp").Finalize(&g, &node));
EXPECT_EQ("{{node enter}}", FormatNodeForError(*node));
}
TEST(FormatNodeForErrorTest, NodeDef) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
EXPECT_EQ("{{node enter}}", FormatNodeDefForError(node_def));
}
TEST(FormatNodeForErrorTest, NodeDefWithOriginalNames) {
NodeDef node_def;
node_def.set_name("enter");
node_def.set_op("Enter");
AddNodeAttr("frame_name", "test_frame", &node_def);
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name";
EXPECT_EQ("{{function_node func_name}}{{node node_name}}",
FormatNodeDefForError(node_def));
*(node_def.mutable_experimental_debug_info()->add_original_node_names()) =
"node_name2";
*(node_def.mutable_experimental_debug_info()->add_original_func_names()) =
"func_name2";
EXPECT_EQ(
"{{function_node func_name}}{{node node_name}}, "
"{{function_node func_name2}}{{node node_name2}}",
FormatNodeDefForError(node_def));
}
TEST(AttachDef, AllowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, true);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[{{node b}}]]", s3.message());
}
TEST(AttachDef, DisallowMultipleFormattedNode) {
NodeDef a;
a.set_name("a");
NodeDef b;
b.set_name("b");
Status s = Status(absl::StatusCode::kCancelled, "Error");
Status s2 = AttachDef(s, a, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]", s2.message());
Status s3 = AttachDef(s2, b, false);
EXPECT_EQ("Error\n\t [[{{node a}}]]\n\t [[b]]", s3.message());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef4420c7-b3f9-4685-bc2a-8478f1719c21 | cpp | tensorflow/tensorflow | memory_types | tensorflow/core/common_runtime/memory_types.cc | tensorflow/core/common_runtime/memory_types_test.cc | #include "tensorflow/core/common_runtime/memory_types.h"
#include <utility>
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
struct Endpoint {
int node_id;
int output_index;
};
struct EndpointHash {
uint32 operator()(const Endpoint& x) const {
return Hash32(reinterpret_cast<const char*>(&x.node_id), sizeof(int),
x.output_index);
}
};
struct EndpointEq {
uint32 operator()(const Endpoint& x, const Endpoint& y) const {
return (x.node_id == y.node_id) && (x.output_index == y.output_index);
}
};
static Status ProcessMemoryTypes(
const DeviceType& device_type, const Graph* g,
const std::function<Status(const Edge*, MemoryType, MemoryType)>& fn) {
if (device_type != DEVICE_GPU &&
!DeviceFactory::IsPluggableDevice(device_type.type_string())) {
return absl::OkStatus();
}
typedef std::unordered_map<Endpoint, MemoryType, EndpointHash, EndpointEq>
MemTypeMap;
MemTypeMap inp;
MemTypeMap out;
MemoryTypeVector inp_mvec;
MemoryTypeVector out_mvec;
for (const Node* n : g->nodes()) {
TF_RETURN_IF_ERROR(MemoryTypesForNode(g->op_registry(), device_type,
n->def(), &inp_mvec, &out_mvec));
for (size_t i = 0; i < inp_mvec.size(); ++i) {
VLOG(2) << "inp mvec " << n->id() << " " << i << " " << inp_mvec[i];
inp[{n->id(), static_cast<int>(i)}] = inp_mvec[i];
}
for (size_t i = 0; i < out_mvec.size(); ++i) {
VLOG(2) << "out mvec " << n->id() << " " << i << " " << out_mvec[i];
out[{n->id(), static_cast<int>(i)}] = out_mvec[i];
}
}
for (const Edge* e : g->edges()) {
if (e->IsControlEdge()) {
continue;
}
MemoryType sm = gtl::FindWithDefault(out, {e->src()->id(), e->src_output()},
DEVICE_MEMORY);
MemoryType dm = gtl::FindWithDefault(inp, {e->dst()->id(), e->dst_input()},
DEVICE_MEMORY);
VLOG(1) << e->src()->id() << ":" << e->src_output() << " -> "
<< e->dst()->id() << ":" << e->dst_input() << ": " << sm << " -> "
<< dm;
TF_RETURN_IF_ERROR(fn(e, sm, dm));
}
return absl::OkStatus();
}
Status ValidateMemoryTypes(const DeviceType& device_type, const Graph* g) {
return ProcessMemoryTypes(
device_type, g, [](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) {
return absl::OkStatus();
}
return errors::Internal("Memory type mismatch (", sm, " ", dm,
") between :", e->src()->id(), ":",
e->src_output(), " and ", e->dst()->id(), ":",
e->dst_input(), " : from ",
FormatNodeForError(*e->src()), " to ",
FormatNodeForError(*e->dst()));
});
}
static string GetTensorName(const Edge* edge) {
static std::atomic<int64_t> counter(0);
return strings::StrCat("memtype_", counter.fetch_add(1), "_",
edge->src()->name());
}
static Node* Send(Graph* g, const string& tensor_name,
const string& device_name, bool host, const Edge* edge) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), host ? "_HostSend" : "_Send")
.Input(edge->src(), edge->src_output())
.Attr("tensor_name", tensor_name)
.Attr("send_device", device_name)
.Attr("send_device_incarnation", 0)
.Attr("recv_device", device_name)
.Attr("_hostmem_sendrecv", true)
.Attr("_src", edge->src()->name())
.Attr("_dst", edge->dst()->name())
.Finalize(g, &ret));
return ret;
}
static Node* Recv(Graph* g, const string& tensor_name,
const string& device_name, bool host, const Edge* edge) {
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("n"), host ? "_HostRecv" : "_Recv")
.Attr("tensor_type", edge->src()->output_type(edge->src_output()))
.Attr("tensor_name", tensor_name)
.Attr("send_device", device_name)
.Attr("send_device_incarnation", 0)
.Attr("recv_device", device_name)
.Attr("_hostmem_sendrecv", true)
.Attr("_src", edge->src()->name())
.Attr("_dst", edge->dst()->name())
.Finalize(g, &ret));
return ret;
}
Status EnsureMemoryTypes(const DeviceType& device_type,
const string& device_name, Graph* g) {
struct Item {
const Edge* edge;
MemoryType sm;
MemoryType dm;
};
std::vector<Item> edges;
TF_RETURN_IF_ERROR(ProcessMemoryTypes(
device_type, g, [&edges](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) {
return absl::OkStatus();
}
if (((sm == HOST_MEMORY) && (dm == DEVICE_MEMORY)) ||
((sm == DEVICE_MEMORY) && (dm == HOST_MEMORY))) {
edges.push_back({e, sm, dm});
return absl::OkStatus();
}
return errors::Internal("Unexpected memory type pair on an edge: ", sm,
" vs. ", dm);
}));
if (!edges.empty()) {
std::unordered_map<Endpoint, Node*, EndpointHash, EndpointEq> recv_nodes;
for (const auto& item : edges) {
const Edge* e = item.edge;
const bool has_ref = IsRefType(e->src()->output_type(e->src_output()));
Node* recv = nullptr;
Endpoint key{e->src()->id(), e->src_output()};
auto iter = recv_nodes.find(key);
if (iter == recv_nodes.end()) {
const string tensor_name = GetTensorName(e);
Node* send =
Send(g, tensor_name, device_name, (item.sm == HOST_MEMORY), e);
recv = Recv(g, tensor_name, device_name, (item.dm == HOST_MEMORY), e);
if (!has_ref) {
recv_nodes[key] = recv;
}
g->AddControlEdge(send, recv);
} else {
recv = iter->second;
}
g->AddEdge(recv, 0, e->dst(), e->dst_input());
g->RemoveEdge(e);
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Dumped graph after EnsureMemoryTypes to "
<< DumpGraphToFile("EnsureMemoryTypes", *g);
}
return ValidateMemoryTypes(device_type, g);
}
Status MemoryTypeForOutput(const DeviceType& device_type, const Graph* g,
const Node* n, int index, MemoryType* memory_type) {
MemoryTypeVector inp_mvec;
MemoryTypeVector out_mvec;
TF_RETURN_IF_ERROR(MemoryTypesForNode(g->op_registry(), device_type, n->def(),
&inp_mvec, &out_mvec));
if (out_mvec.size() <= index) {
return errors::Internal("Trying to get the memory type for ", index,
"'th output of node ", FormatNodeForError(*n),
" that has only ", out_mvec.size(), " outputs");
}
*memory_type = out_mvec[index];
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/memory_types.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MemoryTypeChecker, Int32OK) {
Graph* g = new Graph(OpRegistry::Global());
Tensor v(DT_INT32, {});
v.scalar<int32>().setZero();
auto in0 = test::graph::Constant(g, v);
auto in1 = test::graph::Constant(g, v);
test::graph::Add(g, in0, in1);
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_CPU, g));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_GPU, g));
#endif
delete g;
}
TEST(MemoryTypeChecker, Int32NotOk) {
Graph* g = new Graph(OpRegistry::Global());
Tensor v(DT_INT32, {});
v.scalar<int32>().setZero();
auto x = test::graph::Constant(g, v);
test::graph::Cast(g, x, DT_FLOAT);
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_CPU, g));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_TRUE(errors::IsInternal(ValidateMemoryTypes(DEVICE_GPU, g)));
TF_EXPECT_OK(EnsureMemoryTypes(DEVICE_GPU, "/device:GPU:0", g));
TF_EXPECT_OK(ValidateMemoryTypes(DEVICE_GPU, g));
#endif
delete g;
}
TEST(MemoryTypeChecker, MemoryTypeForOutput) {
Graph* g = new Graph(OpRegistry::Global());
Tensor vb(DT_BOOL);
Tensor vi(DT_INT32);
Tensor vf(DT_FLOAT);
auto pred = test::graph::Constant(g, vb);
auto sf = test::graph::Switch(g, test::graph::Constant(g, vf), pred);
MemoryType memory_type;
TF_EXPECT_OK(MemoryTypeForOutput(DEVICE_CPU, g, sf, 0, &memory_type));
EXPECT_EQ(memory_type, DEVICE_MEMORY);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto si = test::graph::Switch(g, test::graph::Constant(g, vi), pred);
TF_EXPECT_OK(MemoryTypeForOutput(DEVICE_GPU, g, si, 0, &memory_type));
EXPECT_EQ(memory_type, HOST_MEMORY);
#endif
delete g;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/memory_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/memory_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ff66a8f-ddcf-4b7d-80b1-e7448a66996e | cpp | tensorflow/tensorflow | kernel_def_builder | tensorflow/core/framework/kernel_def_builder.cc | tensorflow/core/framework/kernel_def_builder_test.cc | #include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
namespace tensorflow {
KernelDefBuilder::KernelDefBuilder(const char* op_name) {
kernel_def_ = new KernelDef;
kernel_def_->set_op(op_name);
}
KernelDefBuilder::~KernelDefBuilder() {
DCHECK(kernel_def_ == nullptr) << "Did not call Build()";
}
KernelDefBuilder& KernelDefBuilder::Device(const char* device_type) {
kernel_def_->set_device_type(device_type);
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<int64_t>(
const char* attr_name, absl::Span<const int64_t> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const int64_t integer : allowed) {
allowed_values->add_i(integer);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<int64_t>(
const char* attr_name, int64_t allowed) {
return AttrConstraint(
attr_name,
absl::Span<const int64_t>(std::initializer_list<int64_t>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<string>(
const char* attr_name, absl::Span<const string> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const auto& str : allowed) {
allowed_values->add_s(str);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<string>(
const char* attr_name, string allowed) {
return AttrConstraint(
attr_name,
absl::Span<const string>(std::initializer_list<string>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<const char*>(
const char* attr_name, absl::Span<const char* const> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const auto& str : allowed) {
allowed_values->add_s(str);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<const char*>(
const char* attr_name, const char* allowed) {
return AttrConstraint(attr_name,
absl::Span<const char* const>(
std::initializer_list<const char*>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<bool>(const char* attr_name,
bool allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
allowed_values->add_b(allowed);
return *this;
}
KernelDefBuilder& KernelDefBuilder::TypeConstraint(
const char* attr_name, absl::Span<const DataType> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (DataType dt : allowed) {
allowed_values->add_type(dt);
}
return *this;
}
KernelDefBuilder& KernelDefBuilder::TypeConstraint(const char* attr_name,
DataType allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
constraint->mutable_allowed_values()->mutable_list()->add_type(allowed);
return *this;
}
KernelDefBuilder& KernelDefBuilder::HostMemory(const char* arg_name) {
kernel_def_->add_host_memory_arg(arg_name);
return *this;
}
KernelDefBuilder& KernelDefBuilder::Label(const char* label) {
CHECK_EQ(kernel_def_->label(), "")
<< "Trying to set a kernel's label a second time: '" << label
<< "' in: " << kernel_def_->DebugString();
kernel_def_->set_label(label);
return *this;
}
KernelDefBuilder& KernelDefBuilder::Priority(int32_t priority) {
kernel_def_->set_priority(priority);
return *this;
}
const KernelDef* KernelDefBuilder::Build() {
KernelDef* r = kernel_def_;
kernel_def_ = nullptr;
return r;
}
} | #include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(KernelDefBuilderTest, Basic) {
const KernelDef* def = KernelDefBuilder("A").Device(DEVICE_CPU).Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString("op: 'A' device_type: 'CPU'",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, TypeConstraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B' device_type: 'GPU'
constraint { name: 'T' allowed_values { list { type: DT_FLOAT } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("U")
.TypeConstraint<bool>("V")
.Build();
protobuf::TextFormat::ParseFromString(R"proto(
op: 'C' device_type: 'GPU'
constraint { name: 'U' allowed_values { list { type: DT_INT32 } } }
constraint { name: 'V' allowed_values { list { type: DT_BOOL } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("D")
.Device(DEVICE_CPU)
.TypeConstraint("W", {DT_DOUBLE, DT_STRING})
.Build();
protobuf::TextFormat::ParseFromString(R"proto(
op: 'D' device_type: 'CPU'
constraint { name: 'W'
allowed_values { list { type: [DT_DOUBLE, DT_STRING] } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, Int64Constraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.AttrConstraint("T", int64_t{5})
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B'
device_type: 'GPU'
constraint {
name: 'T'
allowed_values { list { i: 5 } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.AttrConstraint("U",
absl::Span<const int64_t>{int64_t{5}, int64_t{17}})
.AttrConstraint("V", string("proto"))
.Build();
protobuf::TextFormat::ParseFromString(
R"proto(
op: 'C'
device_type: 'GPU'
constraint {
name: 'U'
allowed_values { list { i: [ 5, 17 ] } }
}
constraint {
name: 'V'
allowed_values { list { s: 'proto' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, StringConstraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.AttrConstraint("T", "hi")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B'
device_type: 'GPU'
constraint {
name: 'T'
allowed_values { list { s: 'hi' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.AttrConstraint("U", absl::Span<const char* const>{"boo", "ya"})
.AttrConstraint("V", string("proto"))
.Build();
protobuf::TextFormat::ParseFromString(
R"proto(
op: 'C'
device_type: 'GPU'
constraint {
name: 'U'
allowed_values { list { s: [ 'boo', 'ya' ] } }
}
constraint {
name: 'V'
allowed_values { list { s: 'proto' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, HostMemory) {
const KernelDef* def = KernelDefBuilder("E")
.Device(DEVICE_GPU)
.HostMemory("in")
.HostMemory("out")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(
"op: 'E' device_type: 'GPU' "
"host_memory_arg: ['in', 'out']",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18a5dc02-a8ff-4ba0-826b-14f5980d794c | cpp | tensorflow/tensorflow | resource_mgr | tensorflow/core/framework/resource_mgr.cc | tensorflow/core/framework/resource_mgr_test.cc | #include "tensorflow/core/framework/resource_mgr.h"
#include <atomic>
#include <memory>
#include <variant>
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/demangle.h"
#include "tensorflow/core/platform/stacktrace.h"
namespace tensorflow {
ResourceHandle MakeResourceHandle(
const string& container, const string& name, const DeviceBase& device,
const TypeIndex& type_index,
const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes,
const absl::optional<ManagedStackTrace>& definition_stack_trace) {
ResourceHandle result;
result.set_device(device.name());
result.set_container(container);
result.set_definition_stack_trace(definition_stack_trace);
if (name == ResourceHandle::ANONYMOUS_NAME) {
result.set_name(
strings::StrCat("_AnonymousVar", ResourceHandle::GenerateUniqueId()));
} else {
result.set_name(name);
}
result.set_hash_code(type_index.hash_code());
result.set_maybe_type_name(type_index.name());
result.set_dtypes_and_shapes(dtypes_and_shapes);
return result;
}
Status MakeResourceHandleToOutput(OpKernelContext* context, int output_index,
const string& container, const string& name,
const TypeIndex& type_index) {
Tensor* handle;
TF_RETURN_IF_ERROR(
context->allocate_output(output_index, TensorShape({}), &handle));
handle->scalar<ResourceHandle>()() =
MakeResourceHandle(container, name, *context->device(), type_index);
return absl::OkStatus();
}
namespace internal {
Status ValidateDevice(OpKernelContext* ctx, const ResourceHandle& p) {
if (ctx->device()->attributes().name() != p.device()) {
return errors::InvalidArgument(
"Trying to access resource ", p.name(), " located in device ",
p.device(), " from device ", ctx->device()->attributes().name());
}
return absl::OkStatus();
}
}
Status ResourceMgr::InsertDebugTypeName(uint64 hash_code,
const string& type_name) {
auto iter = debug_type_names_.emplace(hash_code, type_name);
if (iter.first->second != type_name) {
return errors::AlreadyExists("Duplicate hash code found for type ",
type_name);
}
return absl::OkStatus();
}
const char* ResourceMgr::DebugTypeName(uint64 hash_code) const {
auto type_name_iter = debug_type_names_.find(hash_code);
if (type_name_iter == debug_type_names_.end()) {
return "<unknown>";
} else {
return type_name_iter->second.c_str();
}
}
ResourceMgr::ResourceAndName::ResourceAndName() : name(nullptr) {}
ResourceMgr::ResourceAndName::ResourceAndName(const string& name)
: name(std::make_unique<string>(name)) {}
core::RefCountPtr<ResourceBase> ResourceMgr::ResourceAndName::GetResource()
const {
if (std::holds_alternative<core::RefCountPtr<ResourceBase>>(resource)) {
ResourceBase* ptr =
std::get<core::RefCountPtr<ResourceBase>>(resource).get();
ptr->Ref();
return core::RefCountPtr<ResourceBase>(ptr);
} else if (std::holds_alternative<core::WeakPtr<ResourceBase>>(resource)) {
return std::get<core::WeakPtr<ResourceBase>>(resource).GetNewRef();
} else {
return nullptr;
}
}
ResourceMgr::ResourceAndName::ResourceAndName(
ResourceAndName&& other) noexcept {
name = std::move(other.name);
resource = std::move(other.resource);
}
ResourceMgr::ResourceAndName::~ResourceAndName() {}
ResourceMgr::ResourceAndName& ResourceMgr::ResourceAndName::operator=(
ResourceAndName&& other) noexcept {
name = std::move(other.name);
resource = std::move(other.resource);
return *this;
}
ResourceMgr::ResourceMgr() : default_container_("localhost") {}
ResourceMgr::ResourceMgr(const string& default_container)
: default_container_(default_container) {}
ResourceMgr::~ResourceMgr() { Clear(); }
void ResourceMgr::Clear() {
absl::flat_hash_map<string, Container*> tmp_containers;
{
mutex_lock l(mu_);
tmp_containers = std::move(containers_);
containers_.clear();
}
for (const auto& p : tmp_containers) {
delete p.second;
}
}
string ResourceMgr::DebugString() const {
mutex_lock l(mu_);
struct Line {
const string* container;
const string type;
const string* resource;
const string detail;
};
std::vector<Line> lines;
for (const auto& p : containers_) {
const string& container = p.first;
for (const auto& q : *p.second) {
const Key& key = q.first;
const char* type = DebugTypeName(key.first);
const core::RefCountPtr<ResourceBase> resource = q.second.GetResource();
Line l{&container, port::Demangle(type), q.second.name.get(),
resource ? resource->DebugString() : "<nullptr>"};
lines.push_back(l);
}
}
std::vector<string> text;
text.reserve(lines.size());
for (const Line& line : lines) {
text.push_back(strings::Printf(
"%-20s | %-40s | %-40s | %-s", line.container->c_str(),
line.type.c_str(), line.resource->c_str(), line.detail.c_str()));
}
std::sort(text.begin(), text.end());
return absl::StrJoin(text, "\n");
}
Status ResourceMgr::DoCreate(const string& container_name, TypeIndex type,
const string& name, ResourceBase* resource,
bool owns_resource) {
Container* container = [&]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
Container** ptr = &containers_[container_name];
if (*ptr == nullptr) {
*ptr = new Container;
}
return *ptr;
}();
ResourceAndName resource_and_name(name);
StringPiece borrowed_name(*resource_and_name.name);
if (owns_resource) {
resource_and_name.resource = core::RefCountPtr<ResourceBase>(resource);
} else {
auto cleanup_fn = [this, container, type, borrowed_name]() {
mutex_lock l(mu_);
auto iter = container->find({type.hash_code(), borrowed_name});
if (iter != container->end()) {
container->erase(iter);
}
};
resource_and_name.resource =
core::WeakPtr<ResourceBase>(resource, cleanup_fn);
}
Container::value_type key_and_value(Key(type.hash_code(), borrowed_name),
std::move(resource_and_name));
auto st = container->insert(std::move(key_and_value));
if (st.second) {
TF_RETURN_IF_ERROR(InsertDebugTypeName(type.hash_code(), type.name()));
return absl::OkStatus();
}
return errors::AlreadyExists("Resource ", container_name, "/", name, "/",
type.name());
}
Status ResourceMgr::Lookup(const ResourceHandle& handle,
ResourceBase** resource) const {
tf_shared_lock l(mu_);
return DoLookup(handle.container(), handle.hash_code(),
"ResourceBase", handle.name(), resource);
}
Status ResourceMgr::DoLookup(const string& container, TypeIndex type,
const string& name,
ResourceBase** resource) const {
return DoLookup(container, type.hash_code(), type.name(), name, resource);
}
Status ResourceMgr::DoLookup(const string& container, uint64 type_hash_code,
const string& type_name,
const string& resource_name,
ResourceBase** resource) const {
const Container* b = gtl::FindPtrOrNull(containers_, container);
if (b == nullptr) {
return errors::NotFound("Container ", container,
" does not exist. (Could not find resource: ",
container, "/", resource_name, ")");
}
auto iter = b->find({type_hash_code, resource_name});
if (iter == b->end()) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " does not exist.");
}
ResourceBase* ptr = iter->second.GetResource().release();
if (ptr == nullptr) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " has been destroyed.");
}
*resource = ptr;
return absl::OkStatus();
}
Status ResourceMgr::PopResourceAndName(const string& container,
uint64 type_hash_code,
const string& resource_name,
const string& type_name,
ResourceAndName& resource_and_name) {
mutex_lock l(mu_);
Container* b = gtl::FindPtrOrNull(containers_, container);
if (b == nullptr) {
return errors::NotFound("Container ", container, " does not exist.");
}
auto iter = b->find({type_hash_code, resource_name});
if (iter == b->end()) {
return errors::NotFound("Resource ", container, "/", resource_name, "/",
type_name, " does not exist.");
}
std::swap(resource_and_name, iter->second);
b->erase(iter);
return absl::OkStatus();
}
Status ResourceMgr::DoDelete(const string& container, uint64 type_hash_code,
const string& resource_name,
const string& type_name) {
ResourceAndName resource_and_name;
TF_RETURN_IF_ERROR(PopResourceAndName(
container, type_hash_code, resource_name, type_name, resource_and_name));
if (std::holds_alternative<core::WeakPtr<ResourceBase>>(
resource_and_name.resource)) {
return errors::Internal(
"Cannot delete an unowned Resource ", container, "/", resource_name,
"/", type_name, " from ResourceMgr. ",
"This indicates ref-counting ResourceHandle is exposed to weak "
"ResourceHandle code paths.");
}
return absl::OkStatus();
}
Status ResourceMgr::DoDelete(const string& container, TypeIndex type,
const string& resource_name) {
return DoDelete(container, type.hash_code(), resource_name, type.name());
}
Status ResourceMgr::Delete(const ResourceHandle& handle) {
return DoDelete(handle.container(), handle.hash_code(), handle.name(),
"<unknown>");
}
Status ResourceMgr::Cleanup(const string& container) {
{
tf_shared_lock l(mu_);
if (!gtl::FindOrNull(containers_, container)) {
return absl::OkStatus();
}
}
Container* b = nullptr;
{
mutex_lock l(mu_);
auto iter = containers_.find(container);
if (iter == containers_.end()) {
return absl::OkStatus();
}
b = iter->second;
containers_.erase(iter);
}
CHECK(b != nullptr);
delete b;
return absl::OkStatus();
}
static bool IsValidContainerName(StringPiece s) {
using ::tensorflow::strings::Scanner;
return Scanner(s)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH)
.Eos()
.GetResult();
}
Status ContainerInfo::Init(ResourceMgr* rmgr, const NodeDef& ndef,
bool use_node_name_as_default) {
CHECK(rmgr);
rmgr_ = rmgr;
string attr_container;
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "container", &attr_container));
if (!attr_container.empty() && !IsValidContainerName(attr_container)) {
return errors::InvalidArgument("container contains invalid characters: ",
attr_container);
}
string attr_shared_name;
TF_RETURN_IF_ERROR(GetNodeAttr(ndef, "shared_name", &attr_shared_name));
if (!attr_shared_name.empty() && (attr_shared_name[0] == '_')) {
return errors::InvalidArgument("shared_name cannot start with '_':",
attr_shared_name);
}
if (!attr_container.empty()) {
container_ = attr_container;
} else {
container_ = rmgr_->default_container();
}
if (!attr_shared_name.empty()) {
name_ = attr_shared_name;
} else if (use_node_name_as_default) {
name_ = ndef.name();
} else {
resource_is_private_to_kernel_ = true;
static std::atomic<int64_t> counter(0);
name_ = strings::StrCat("_", counter.fetch_add(1), "_", ndef.name());
}
return absl::OkStatus();
}
string ContainerInfo::DebugString() const {
return strings::StrCat("[", container(), ",", name(), ",",
resource_is_private_to_kernel() ? "private" : "public",
"]");
}
const ResourceHandle& HandleFromInput(OpKernelContext* ctx, int input) {
return ctx->input(input).flat<ResourceHandle>()(0);
}
Status HandleFromInput(OpKernelContext* ctx, int input,
ResourceHandle* handle) {
TF_ASSIGN_OR_RETURN(const Tensor* tensor, ctx->get_input(input));
if (tensor->NumElements() == 0) {
return absl::InvalidArgumentError("Empty resource handle");
}
*handle = tensor->flat<ResourceHandle>()(0);
return absl::OkStatus();
}
Status HandleFromInput(OpKernelContext* ctx, StringPiece input,
ResourceHandle* handle) {
const Tensor* tensor;
TF_RETURN_IF_ERROR(ctx->input(input, &tensor));
if (tensor->NumElements() == 0) {
return absl::InvalidArgumentError("Empty resource handle");
}
*handle = tensor->flat<ResourceHandle>()(0);
return absl::OkStatus();
}
Status LookupResource(OpKernelContext* ctx, const ResourceHandle& p,
ResourceBase** value) {
TF_RETURN_IF_ERROR(internal::ValidateDevice(ctx, p));
if (p.IsRefCounting()) {
TF_ASSIGN_OR_RETURN(*value, p.GetResource<ResourceBase>());
(*value)->Ref();
return absl::OkStatus();
}
return ctx->resource_manager()->Lookup(p, value);
}
Status DeleteResource(OpKernelContext* ctx, const ResourceHandle& p) {
TF_RETURN_IF_ERROR(internal::ValidateDevice(ctx, p));
if (p.IsRefCounting()) {
return absl::OkStatus();
}
return ctx->resource_manager()->Delete(p);
}
} | #include "tensorflow/core/framework/resource_mgr.h"
#include <memory>
#include <vector>
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/resource_handle.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
class Resource : public ResourceBase {
public:
explicit Resource(const string& label) : label_(label) {}
~Resource() override {}
string DebugString() const override { return strings::StrCat("R/", label_); }
private:
string label_;
};
class Other : public ResourceBase {
public:
explicit Other(const string& label) : label_(label) {}
~Other() override {}
string DebugString() const override { return strings::StrCat("O/", label_); }
private:
string label_;
};
template <typename T>
string Find(const ResourceMgr& rm, const string& container,
const string& name) {
T* r;
TF_CHECK_OK(rm.Lookup(container, name, &r));
const string ret = r->DebugString();
r->Unref();
return ret;
}
template <typename T>
string LookupOrCreate(ResourceMgr* rm, const string& container,
const string& name, const string& label) {
T* r;
TF_CHECK_OK(rm->LookupOrCreate<T>(container, name, &r, [&label](T** ret) {
*ret = new T(label);
return absl::OkStatus();
}));
const string ret = r->DebugString();
r->Unref();
return ret;
}
static void HasError(const Status& s, const error::Code code,
const string& substr) {
EXPECT_EQ(s.code(), code);
EXPECT_TRUE(absl::StrContains(s.message(), substr))
<< s << ", expected substring " << substr;
}
template <typename T>
Status FindErr(const ResourceMgr& rm, const string& container,
const string& name) {
T* r;
Status s = rm.Lookup(container, name, &r);
CHECK(!s.ok());
return s;
}
TEST(ResourceMgrTest, Basic) {
ResourceMgr rm;
TF_CHECK_OK(rm.Create("foo", "bar", new Resource("cat")));
TF_CHECK_OK(rm.Create("foo", "baz", new Resource("dog")));
TF_CHECK_OK(rm.Create("foo", "bar", new Other("tiger")));
HasError(rm.Create("foo", "bar", new Resource("kitty")),
error::ALREADY_EXISTS, "Resource foo/bar");
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
EXPECT_EQ("R/dog", Find<Resource>(rm, "foo", "baz"));
EXPECT_EQ("O/tiger", Find<Other>(rm, "foo", "bar"));
HasError(FindErr<Resource>(rm, "bar", "foo"), error::NOT_FOUND,
"Container bar");
HasError(FindErr<Resource>(rm, "foo", "xxx"), error::NOT_FOUND,
"Resource foo/xxx");
HasError(FindErr<Other>(rm, "foo", "baz"), error::NOT_FOUND,
"Resource foo/baz");
TF_CHECK_OK(rm.Delete<Resource>("foo", "bar"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
HasError(rm.Delete<Resource>("foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
TF_CHECK_OK(rm.Create("foo", "bar", new Resource("kitty")));
EXPECT_EQ("R/kitty", Find<Resource>(rm, "foo", "bar"));
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("bar"));
}
TEST(ResourceMgrTest, CreateUnowned) {
core::RefCountPtr<Resource> cat{new Resource("cat")};
core::RefCountPtr<Resource> kitty{new Resource("kitty")};
ASSERT_TRUE(cat->RefCountIsOne());
ASSERT_TRUE(kitty->RefCountIsOne());
ResourceMgr rm;
TF_CHECK_OK(rm.CreateUnowned("foo", "bar", cat.get()));
EXPECT_TRUE(cat->RefCountIsOne());
HasError(rm.CreateUnowned("foo", "bar", kitty.get()), error::ALREADY_EXISTS,
"Resource foo/bar");
EXPECT_TRUE(kitty->RefCountIsOne());
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
HasError(FindErr<Resource>(rm, "bar", "foo"), error::NOT_FOUND,
"Container bar");
HasError(FindErr<Resource>(rm, "foo", "xxx"), error::NOT_FOUND,
"Resource foo/xxx");
HasError(rm.Delete<Resource>("foo", "bar"), error::INTERNAL,
"Cannot delete an unowned Resource foo/bar");
TF_CHECK_OK(rm.CreateUnowned("foo", "bar", kitty.get()));
EXPECT_TRUE(kitty->RefCountIsOne());
EXPECT_EQ("R/kitty", Find<Resource>(rm, "foo", "bar"));
{
core::RefCountPtr<Resource> dog{new Resource("dog")};
TF_CHECK_OK(rm.CreateUnowned("foo", "bark", dog.get()));
EXPECT_EQ("R/dog", Find<Resource>(rm, "foo", "bark"));
EXPECT_EQ(1, dog->WeakRefCount());
{
ResourceMgr rm1;
TF_CHECK_OK(rm1.CreateUnowned("foo", "bark", dog.get()));
EXPECT_EQ("R/dog", Find<Resource>(rm1, "foo", "bark"));
EXPECT_EQ(2, dog->WeakRefCount());
}
EXPECT_EQ(1, dog->WeakRefCount());
}
HasError(FindErr<Resource>(rm, "foo", "bark"), error::NOT_FOUND,
"Resource foo/bark");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("foo"));
HasError(FindErr<Resource>(rm, "foo", "bar"), error::NOT_FOUND,
"Container foo");
TF_CHECK_OK(rm.Cleanup("bar"));
EXPECT_TRUE(cat->RefCountIsOne());
EXPECT_TRUE(kitty->RefCountIsOne());
}
TEST(ResourceMgrTest, CreateOrLookup) {
ResourceMgr rm;
EXPECT_EQ("R/cat", LookupOrCreate<Resource>(&rm, "foo", "bar", "cat"));
EXPECT_EQ("R/cat", LookupOrCreate<Resource>(&rm, "foo", "bar", "dog"));
EXPECT_EQ("R/cat", Find<Resource>(rm, "foo", "bar"));
EXPECT_EQ("O/tiger", LookupOrCreate<Other>(&rm, "foo", "bar", "tiger"));
EXPECT_EQ("O/tiger", LookupOrCreate<Other>(&rm, "foo", "bar", "lion"));
TF_CHECK_OK(rm.Delete<Other>("foo", "bar"));
HasError(FindErr<Other>(rm, "foo", "bar"), error::NOT_FOUND,
"Resource foo/bar");
}
TEST(ResourceMgrTest, CreateOrLookupRaceCondition) {
ResourceMgr rm;
std::atomic<int> atomic_int(0);
{
thread::ThreadPool threads(Env::Default(), "racing_creates", 2);
for (int i = 0; i < 2; i++) {
threads.Schedule([&rm, &atomic_int] {
Resource* r;
TF_CHECK_OK(rm.LookupOrCreate<Resource>(
"container", "resource-name", &r, [&atomic_int](Resource** ret) {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
atomic_int += 1;
*ret = new Resource("label");
return absl::OkStatus();
}));
r->Unref();
});
}
}
EXPECT_EQ(1, atomic_int);
}
Status ComputePolicy(const string& attr_container,
const string& attr_shared_name,
bool use_node_name_as_default, string* result) {
ContainerInfo cinfo;
ResourceMgr rmgr;
NodeDef ndef;
ndef.set_name("foo");
if (attr_container != "none") {
AddNodeAttr("container", attr_container, &ndef);
}
if (attr_shared_name != "none") {
AddNodeAttr("shared_name", attr_shared_name, &ndef);
}
TF_RETURN_IF_ERROR(cinfo.Init(&rmgr, ndef, use_node_name_as_default));
*result = cinfo.DebugString();
return absl::OkStatus();
}
string Policy(const string& attr_container, const string& attr_shared_name,
bool use_node_name_as_default) {
string ret;
TF_CHECK_OK(ComputePolicy(attr_container, attr_shared_name,
use_node_name_as_default, &ret));
return ret;
}
TEST(ContainerInfo, Basic) {
EXPECT_TRUE(RE2::FullMatch(Policy("", "", false),
"\\[localhost,_\\d+_foo,private\\]"));
EXPECT_EQ(Policy("", "", true), "[localhost,foo,public]");
EXPECT_EQ(Policy("", "bar", false), "[localhost,bar,public]");
EXPECT_EQ(Policy("", "bar", true), "[localhost,bar,public]");
EXPECT_TRUE(
RE2::FullMatch(Policy("cat", "", false), "\\[cat,_\\d+_foo,private\\]"));
EXPECT_EQ(Policy("cat", "", true), "[cat,foo,public]");
EXPECT_EQ(Policy("cat", "bar", false), "[cat,bar,public]");
EXPECT_EQ(Policy("cat", "bar", true), "[cat,bar,public]");
EXPECT_EQ(Policy("cat.0-dog", "bar", true), "[cat.0-dog,bar,public]");
EXPECT_EQ(Policy(".cat", "bar", true), "[.cat,bar,public]");
}
Status WrongPolicy(const string& attr_container, const string& attr_shared_name,
bool use_node_name_as_default) {
string dbg;
auto s = ComputePolicy(attr_container, attr_shared_name,
use_node_name_as_default, &dbg);
CHECK(!s.ok());
return s;
}
TEST(ContainerInfo, Error) {
HasError(WrongPolicy("none", "", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("", "none", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("none", "none", false), error::NOT_FOUND, "No attr");
HasError(WrongPolicy("12$%", "", false), error::INVALID_ARGUMENT,
"container contains invalid char");
HasError(WrongPolicy("-cat", "", false), error::INVALID_ARGUMENT,
"container contains invalid char");
HasError(WrongPolicy("", "_foo", false), error::INVALID_ARGUMENT,
"shared_name cannot start with '_'");
}
class StubDevice : public DeviceBase {
public:
explicit StubDevice(const string& name) : DeviceBase(nullptr) {
attr_.set_name(name);
}
Allocator* GetAllocator(AllocatorAttributes) override {
return cpu_allocator();
}
const DeviceAttributes& attributes() const override { return attr_; }
const string& name() const override { return attr_.name(); }
private:
DeviceAttributes attr_;
};
class StubResource : public ResourceBase {
public:
string DebugString() const override { return ""; }
int value_{0};
};
TEST(ResourceHandleTest, CRUD) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
{
auto* r = new StubResource();
r->value_ = 42;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
}
{
core::RefCountPtr<StubResource> r;
TF_ASSERT_OK(LookupResource(&ctx, p, &r));
ASSERT_TRUE(r != nullptr);
EXPECT_EQ(r->value_, 42);
}
{
TF_EXPECT_OK(DeleteResource<StubResource>(&ctx, p));
core::RefCountPtr<StubResource> unused;
EXPECT_FALSE(LookupResource(&ctx, p, &unused).ok());
}
}
TEST(ResourceHandleTest, ResourceFromValidIntInput) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 1);
ResourceHandleProto proto;
proto.set_device("cpu:0");
proto.set_container("test_container");
proto.set_name("test_var");
auto handle = std::make_unique<ResourceHandle>(proto);
auto expected_summary =
"ResourceHandle(name=\"test_var\", device=\"cpu:0\", "
"container=\"test_container\", type=\"\", dtype and shapes : \"[ ]\")";
EXPECT_EQ(handle->SummarizeValue(), expected_summary);
Tensor arg0(DT_RESOURCE, TensorShape({2}));
arg0.flat<ResourceHandle>()(0) = *handle;
std::vector<tensorflow::TensorValue> inputs{TensorValue(new Tensor(arg0))};
params.inputs = inputs;
ResourceHandle get_int_handle;
TF_ASSERT_OK(HandleFromInput(&ctx, 0, &get_int_handle));
EXPECT_EQ(get_int_handle.SummarizeValue(), expected_summary);
delete inputs.at(0).tensor;
}
TEST(ResourceHandleTest, ResourceFromInvalidIntInput) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle get_int_handle;
EXPECT_FALSE(HandleFromInput(&ctx, 0, &get_int_handle).ok());
}
TEST(ResourceHandleTest, ResourceFromIntInputWithoutResource) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 1);
std::vector<tensorflow::TensorValue> inputs{TensorValue(new Tensor())};
params.inputs = inputs;
ResourceHandle get_int_handle;
EXPECT_FALSE(HandleFromInput(&ctx, 0, &get_int_handle).ok());
delete inputs.at(0).tensor;
}
TEST(ResourceHandleTest, LookupDeleteGenericResource) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
{
auto* r = new StubResource();
r->value_ = 42;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
}
{
ResourceBase* r;
TF_ASSERT_OK(LookupResource(&ctx, p, &r));
ASSERT_TRUE(r != nullptr);
core::ScopedUnref unref(r);
EXPECT_EQ(static_cast<StubResource*>(r)->value_, 42);
}
{
TF_EXPECT_OK(DeleteResource(&ctx, p));
ResourceBase* unused;
EXPECT_FALSE(LookupResource(&ctx, p, &unused).ok());
}
}
TEST(ResourceHandleTest, DifferentDevice) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
ResourceMgr other_resource_mgr("");
OpKernelContext::Params other_params;
other_params.resource_manager = &other_resource_mgr;
StubDevice other_device("other_device_name");
other_params.device = &other_device;
OpKernelContext other_ctx(&other_params, 0);
auto* r = new StubResource();
ASSERT_FALSE(CreateResource(&other_ctx, p, r).ok());
r->Unref();
}
class OtherStubResource : public ResourceBase {
public:
string DebugString() const override { return ""; }
};
TEST(ResourceHandleTest, DifferentType) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
auto* r = new OtherStubResource;
ASSERT_FALSE(CreateResource(&ctx, p, r).ok());
r->Unref();
}
TEST(ResourceHandleTest, DeleteUsingResourceHandle) {
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
StubDevice device("device_name");
params.device = &device;
OpKernelContext ctx(¶ms, 0);
ResourceHandle p =
MakeResourceHandle<StubResource>(&ctx, "container", "name");
StubResource* r = new StubResource;
TF_EXPECT_OK(CreateResource(&ctx, p, r));
core::RefCountPtr<StubResource> lookup_r;
TF_EXPECT_OK(LookupResource<StubResource>(&ctx, p, &lookup_r));
EXPECT_EQ(lookup_r.get(), r);
TF_EXPECT_OK(DeleteResource(&ctx, p));
EXPECT_NE(LookupResource<StubResource>(&ctx, p, &lookup_r).ok(), true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/resource_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
78b40b32-1ec5-4330-b01f-7f1832369dd2 | cpp | tensorflow/tensorflow | tensor_slice | tensorflow/core/framework/tensor_slice.cc | tensorflow/core/framework/tensor_slice_test.cc | #include "tensorflow/core/framework/tensor_slice.h"
#include <limits>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
TensorSlice::TensorSlice(int dim) { SetFullSlice(dim); }
TensorSlice::TensorSlice(const TensorSliceProto& proto) {
starts_.reserve(proto.extent_size());
lengths_.reserve(proto.extent_size());
for (const auto& e : proto.extent()) {
starts_.push_back(e.start());
lengths_.push_back(GetExtentLength(e));
}
}
TensorSlice::TensorSlice(
std::initializer_list<std::pair<int64_t, int64_t>> extents) {
starts_.reserve(extents.size());
lengths_.reserve(extents.size());
for (const auto& e : extents) {
starts_.push_back(e.first);
lengths_.push_back(e.second);
}
}
Status TensorSlice::BuildTensorSlice(const TensorSliceProto& proto,
TensorSlice* output) {
output->Clear();
output->starts_.reserve(proto.extent_size());
output->lengths_.reserve(proto.extent_size());
for (const auto& e : proto.extent()) {
int64_t l = GetExtentLength(e);
if (e.start() != 0 || l != kFullExtent) {
if (e.start() < 0 || l <= 0) {
return errors::InvalidArgument(
"Expected non-negative start and positive length but got start = ",
e.start(), ", length = ", l, ": extent = ", e.ShortDebugString());
}
if (static_cast<uint64_t>(e.start()) + static_cast<uint64_t>(e.length()) >
std::numeric_limits<int64_t>::max()) {
return errors::InvalidArgument(
"Extent end exceeds the maximum possible size: extent = ",
e.ShortDebugString());
}
}
output->starts_.push_back(e.start());
output->lengths_.push_back(l);
}
return absl::OkStatus();
}
Status TensorSlice::Parse(const string& str, TensorSlice* slice) {
std::vector<string> items = str_util::Split(str, ':', str_util::SkipEmpty());
slice->starts_.reserve(items.size());
slice->lengths_.reserve(items.size());
for (const string& x : items) {
int64_t s, l;
if (x == "-") {
s = 0;
l = kFullExtent;
} else {
std::vector<string> sl = str_util::Split(x, ',', str_util::SkipEmpty());
if (sl.size() != 2 || !strings::safe_strto64(sl[0], &s) ||
!strings::safe_strto64(sl[1], &l)) {
return errors::InvalidArgument(
"Expected a pair of numbers or '-' "
"but got '",
x, "': string = ", str);
}
if (s < 0 || l <= 0) {
return errors::InvalidArgument(
"Expected non-negative start and "
"positive length but got start = ",
s, ", length = ", l, ": string = ", str);
}
}
slice->starts_.push_back(s);
slice->lengths_.push_back(l);
}
return absl::OkStatus();
}
void TensorSlice::Clear() {
starts_.clear();
lengths_.clear();
}
bool TensorSlice::IsFull() const {
for (int d = 0; d < dims(); ++d) {
if (!IsFullAt(d)) return false;
}
return true;
}
void TensorSlice::SetFullSlice(int dim) {
Clear();
starts_.reserve(dim);
lengths_.reserve(dim);
for (int d = 0; d < dim; ++d) {
starts_.push_back(0);
lengths_.push_back(kFullExtent);
}
}
void TensorSlice::Extend(int dim) {
int old_dim = dims();
DCHECK_LE(old_dim, dim);
starts_.resize(dim);
lengths_.resize(dim);
for (int d = old_dim; d < dim; ++d) {
starts_[d] = 0;
lengths_[d] = kFullExtent;
}
}
void TensorSlice::AsProto(TensorSliceProto* proto) const {
for (int d = 0; d < dims(); ++d) {
TensorSliceProto::Extent* e = proto->add_extent();
if (!IsFullAt(d)) {
e->set_start(starts_[d]);
e->set_length(lengths_[d]);
}
}
}
string TensorSlice::DebugString() const {
string buffer;
bool first = true;
for (int d = 0; d < dims(); ++d) {
if (!first) {
buffer.append(":");
}
if (IsFullAt(d)) {
buffer.append("-");
} else {
strings::StrAppend(&buffer, starts_[d], ",", lengths_[d]);
}
first = false;
}
return buffer;
}
bool TensorSlice::Intersect(const TensorSlice& other,
TensorSlice* result) const {
if (dims() != other.dims()) {
return false;
}
if (result) {
result->SetFullSlice(dims());
}
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
if (result) {
result->set_start(d, other.start(d));
result->set_length(d, other.length(d));
}
} else if (other.IsFullAt(d)) {
if (result) {
result->set_start(d, start(d));
result->set_length(d, length(d));
}
} else {
int64_t s = std::max(start(d), other.start(d));
int64_t l = std::min(end(d), other.end(d)) - s;
if (l > 0) {
if (result) {
result->set_start(d, s);
result->set_length(d, l);
}
} else {
if (result) {
result->Clear();
}
return false;
}
}
}
return true;
}
bool TensorSlice::operator==(const TensorSlice& other) const {
return dims() == other.dims() && starts_ == other.starts_ &&
lengths_ == other.lengths_;
}
void TensorSlice::ComputeRelative(const TensorSlice& sub,
TensorSlice* relative) const {
DCHECK_EQ(dims(), sub.dims());
relative->SetFullSlice(dims());
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
relative->set_start(d, sub.start(d));
relative->set_length(d, sub.length(d));
} else {
relative->set_start(d, sub.start(d) - start(d));
relative->set_length(d, sub.length(d));
}
}
}
void TensorSlice::UpdateToCover(const TensorSlice& other) {
DCHECK_EQ(dims(), other.dims());
for (int d = 0; d < dims(); ++d) {
if (!IsFullAt(d)) {
if (other.IsFullAt(d)) {
starts_[d] = 0;
lengths_[d] = kFullExtent;
} else {
const auto new_end = std::max(end(d), other.end(d));
set_start(d, std::min(start(d), other.start(d)));
set_length(d, new_end - start(d));
}
}
}
}
bool TensorSlice::HasExtentLength(const TensorSliceProto::Extent& extent) {
return extent.has_length_case() == TensorSliceProto::Extent::kLength;
}
int64_t TensorSlice::GetExtentLength(const TensorSliceProto::Extent& extent) {
if (!HasExtentLength(extent)) return -1;
return extent.length();
}
Status TensorSlice::SliceTensorShape(const TensorShape& shape,
TensorShape* result_shape) const {
result_shape->Clear();
if (shape.dims() != dims()) {
return errors::Internal("Mismatching ranks: shape = ", shape.DebugString(),
", slice = ", DebugString());
}
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
result_shape->AddDim(shape.dim_size(d));
} else {
if (end(d) <= shape.dim_size(d)) {
result_shape->AddDim(length(d));
} else {
result_shape->Clear();
return errors::Internal("Extent in dimension ", d,
" out of bounds: shape = ", shape.DebugString(),
", slice = ", DebugString());
}
}
}
return absl::OkStatus();
}
const int64_t TensorSlice::kFullExtent = -1;
} | #include "tensorflow/core/framework/tensor_slice.h"
#include <limits>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(TensorSliceTest, Basic) {
{
TensorSlice s(3);
EXPECT_EQ("-:-:-", s.DebugString());
EXPECT_TRUE(s.IsFull());
s.SetFullSlice(4);
EXPECT_EQ("-:-:-:-", s.DebugString());
EXPECT_TRUE(s.IsFull());
}
}
TEST(TensorSliceTest, Serialization) {
{
TensorSlice s({{0, -1}, {0, 10}, {14, 1}, {0, -1}});
EXPECT_EQ("-:0,10:14,1:-", s.DebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSliceProto proto;
const char* ptxt = R"pb(
extent {}
extent { start: 0 length: 10 }
extent { start: 14 length: 1 }
extent {}
)pb";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(ptxt, &proto));
TensorSlice s(proto);
EXPECT_EQ("-:0,10:14,1:-", s.DebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice s = TensorSlice::ParseOrDie("-:-:1,3:4,5");
TensorSliceProto proto;
s.AsProto(&proto);
TensorSliceProto expected_slice_proto;
protobuf::TextFormat::ParseFromString(
"extent { } "
"extent { } "
"extent { start: 1 length: 3 } "
"extent { start: 4 length: 5 }",
&expected_slice_proto);
EXPECT_EQ(proto.ShortDebugString(),
expected_slice_proto.ShortDebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice slice;
Status s = TensorSlice::Parse("-:-:1,3:4:5", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(),
"Expected a pair of numbers or '-' but got '4': "
"string = -:-:1,3:4:5"));
}
{
TensorSlice slice;
Status s = TensorSlice::Parse("-:-1,3", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(),
"Expected non-negative start and positive length but got "
"start = -1, length = 3: string = -:-1,3"));
}
{
TensorSlice s =
TensorSlice::ParseOrDie("9223372036854775807,9223372036854775807");
TensorSliceProto proto;
s.AsProto(&proto);
TensorSliceProto expected_slice_proto;
protobuf::TextFormat::ParseFromString(
"extent { start: 9223372036854775807 length: 9223372036854775807 }",
&expected_slice_proto);
EXPECT_EQ(proto.ShortDebugString(),
expected_slice_proto.ShortDebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice slice;
Status s =
TensorSlice::Parse("19223372036854775808,19223372036854775808", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(),
"Expected a pair of numbers or '-' but got "
"'19223372036854775808,19223372036854775808': string = "
"19223372036854775808,19223372036854775808"));
}
}
TEST(TensorSliceTest, BuildTensorSlice) {
TensorSliceProto proto;
TensorSlice({{0, -1}, {0, 10}, {14, 1}}).AsProto(&proto);
TensorSlice s;
{
TF_ASSERT_OK(TensorSlice::BuildTensorSlice(proto, &s));
EXPECT_EQ("-:0,10:14,1", s.DebugString());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(0)->set_start(-1);
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->set_length(-1);
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->clear_length();
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->set_length(
std::numeric_limits<int64_t>::max());
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
}
TEST(TensorSliceTest, Intersection) {
{
TensorSlice a = TensorSlice::ParseOrDie("-:-");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("1,2:3,4", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("-:-");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4");
TensorSlice c;
EXPECT_TRUE(b.Intersect(a, &c));
EXPECT_EQ("1,2:3,4", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,5:2,6:3,7:5,10");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4:9,10:12,1");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("1,2:3,4:9,1:12,1", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("-:1,1");
TensorSlice b = TensorSlice::ParseOrDie("-:0,2");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("-:1,1", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,2:3,1:5,6");
TensorSlice b = TensorSlice::ParseOrDie("1,3:4,5:1,6");
TensorSlice c;
EXPECT_FALSE(a.Intersect(b, &c));
EXPECT_EQ("", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,2:3,1:-");
TensorSlice b = TensorSlice::ParseOrDie("-:-");
TensorSlice c;
EXPECT_FALSE(a.Intersect(b, &c));
EXPECT_EQ("", c.DebugString());
}
}
TEST(TensorSliceTest, SliceTensorShape) {
{
TensorSlice a = TensorSlice::ParseOrDie("1,1:-:4,1:2,6");
TensorShape x({2, 4, 5, 8});
TensorShape y;
TF_EXPECT_OK(a.SliceTensorShape(x, &y));
EXPECT_EQ("[1,4,1,6]", y.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,1:1,4:-:-");
TensorShape x({2, 4, 5, 8});
TensorShape y;
Status s = a.SliceTensorShape(x, &y);
EXPECT_EQ(s.code(), error::INTERNAL);
EXPECT_TRUE(absl::StrContains(s.message(),
"Extent in dimension 1 out of bounds: "
"shape = [2,4,5,8], slice = 1,1:1,4:-:-"));
EXPECT_EQ("[]", y.DebugString());
}
}
TEST(TensorSliceTest, ComputeRelative) {
{
TensorSlice base = TensorSlice::ParseOrDie("-:-:-:-");
TensorSlice sub = TensorSlice::ParseOrDie("-:1,2:-:3,4");
TensorSlice relative;
base.ComputeRelative(sub, &relative);
EXPECT_EQ("-:1,2:-:3,4", relative.DebugString());
}
{
TensorSlice base = TensorSlice::ParseOrDie("1,2:3,4:-:5,1");
TensorSlice sub = TensorSlice::ParseOrDie("1,1:4,2:3,3:5,1");
TensorSlice relative;
base.ComputeRelative(sub, &relative);
EXPECT_EQ("0,1:1,2:3,3:0,1", relative.DebugString());
}
}
TEST(TensorSliceTest, ExtentLength) {
TensorSliceProto proto;
const char* ptxt = R"pb(
extent {}
extent { start: 0 length: 10 }
extent { start: 14 length: 1 }
extent {}
)pb";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(ptxt, &proto));
EXPECT_FALSE(TensorSlice::HasExtentLength(proto.extent(0)));
EXPECT_TRUE(TensorSlice::HasExtentLength(proto.extent(1)));
EXPECT_TRUE(TensorSlice::HasExtentLength(proto.extent(2)));
EXPECT_FALSE(TensorSlice::HasExtentLength(proto.extent(3)));
EXPECT_EQ(-1, TensorSlice::GetExtentLength(proto.extent(0)));
EXPECT_EQ(10, TensorSlice::GetExtentLength(proto.extent(1)));
EXPECT_EQ(1, TensorSlice::GetExtentLength(proto.extent(2)));
EXPECT_EQ(-1, TensorSlice::GetExtentLength(proto.extent(3)));
}
TEST(TensorSliceTest, Deserialization) {
const char pb2[] =
"\x0A\x02\x10\x05\x0A\x04\x08\x00"
"\x10\x0A\x0A\x04\x08\x0E\x10\x01\x0A\x02\x08\x01\x0A\x00";
const char pb3[] =
"\x0A\x02\x10\x05\x0A\x02"
"\x10\x0A\x0A\x04\x08\x0E\x10\x01\x0A\x02\x08\x01\x0A\x00";
TensorSliceProto proto2;
ASSERT_TRUE(proto2.ParseFromArray(pb2, sizeof(pb2) - 1));
TensorSlice ts2(proto2);
TensorSliceProto proto3;
ASSERT_TRUE(proto3.ParseFromArray(pb3, sizeof(pb3) - 1));
TensorSlice ts3(proto3);
EXPECT_EQ("0,5:0,10:14,1:1,-1:-", ts2.DebugString());
EXPECT_EQ("0,5:0,10:14,1:1,-1:-", ts3.DebugString());
}
TEST(TensorSliceTest, UpdateToCover) {
TensorSlice s({{2, 2}, {0, -1}, {3, 7}});
TensorSlice other({{0, -1}, {1, 3}, {2, 2}});
s.UpdateToCover(other);
EXPECT_EQ("-:-:2,8", s.DebugString());
}
TEST(TensorSliceTest, IsFull) {
TensorSlice slice(3);
EXPECT_TRUE(slice.IsFull());
TensorSlice slice2({{0, -1}});
EXPECT_TRUE(slice2.IsFull());
TensorSlice slice3({{0, -1}, {0, -1}, {14, 1}});
EXPECT_TRUE(!slice3.IsFull());
}
TEST(TensorSliceTest, Equality) {
{
TensorSlice slice1(3);
TensorSlice slice2(2);
EXPECT_TRUE(slice1 != slice2);
EXPECT_TRUE(slice2 != slice1);
}
{
TensorSlice slice1(3);
TensorSlice slice2({{0, -1}, {0, -1}, {0, -1}});
EXPECT_TRUE(slice1 == slice2);
EXPECT_TRUE(slice2 == slice1);
}
{
TensorSlice slice1(3);
TensorSlice slice2({{0, -1}, {0, 1}, {0, -1}});
EXPECT_TRUE(slice1 != slice2);
EXPECT_TRUE(slice2 != slice1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b6ab60b-4ff4-4235-aa67-57e09a1db005 | cpp | tensorflow/tensorflow | device_base | tensorflow/core/framework/device_base.cc | tensorflow/core/framework/device_base_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/device_base.h"
#include <algorithm>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/notification.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
DeviceBase::~DeviceBase() {
for (auto& temp : eigen_cpu_devices_) {
delete temp;
}
eigen_cpu_devices_.clear();
}
Status DeviceContext::CopyDeviceTensorToCPUSync(const Tensor* device_tensor,
StringPiece tensor_name,
Device* device,
Tensor* cpu_tensor) {
absl::Notification n;
Status status;
CopyDeviceTensorToCPU(device_tensor, tensor_name, device, cpu_tensor,
[&](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
Status DeviceContext::CopyCPUTensorToDeviceSync(const Tensor* cpu_tensor,
Device* device,
Tensor* device_tensor) const {
absl::Notification n;
Status status;
CopyCPUTensorToDevice(cpu_tensor, device, device_tensor,
[&](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
const DeviceAttributes& DeviceBase::attributes() const {
LOG(FATAL) << "DeviceBase does not implement attributes()";
std::abort();
}
const string& DeviceBase::name() const {
LOG(FATAL) << "DeviceBase does not implement name()";
std::abort();
}
const DeviceNameUtils::ParsedName& DeviceBase::parsed_name() const {
LOG(FATAL) << "DeviceBase does not implement parsed_name()";
std::abort();
}
const std::string& DeviceBase::device_type() const {
LOG(FATAL) << "DeviceBase does not implement device_type()";
std::abort();
}
void DeviceBase::set_eigen_cpu_device(Eigen::ThreadPoolDevice* d) {
for (int i = 1; i <= d->numThreads(); ++i) {
eigen_cpu_devices_.push_back(new Eigen::ThreadPoolDevice(
d->getPool(), i , d->allocator()));
}
}
const Eigen::ThreadPoolDevice* DeviceBase::eigen_cpu_device() {
const int parallelism = std::max<int>(
1,
std::min<int>(GetPerThreadMaxParallelism(), eigen_cpu_devices_.size()));
return eigen_cpu_devices_[parallelism - 1];
}
namespace {
absl::flat_hash_set<std::string>* GetSymbolicDeviceList() {
static absl::flat_hash_set<std::string>* symbolic_device_list =
new absl::flat_hash_set<std::string>();
return symbolic_device_list;
}
}
void AddSymbolicExecutionDevice(const absl::string_view device_name) {
GetSymbolicDeviceList()->insert(std::string(device_name));
}
bool IsSymbolicExecutionDevice(const absl::string_view device_name) {
absl::flat_hash_set<std::string>* symbolic_devices = GetSymbolicDeviceList();
if (symbolic_devices->contains(device_name)) {
return true;
} else {
return false;
}
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/device_base.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
TEST(DeviceBaseTest, CpuDevice) {
DeviceBase dbase(Env::Default());
thread::ThreadPool pool(Env::Default(), "test", 16);
Eigen::ThreadPoolDevice eigen_device(pool.AsEigenThreadPool(),
pool.NumThreads());
ASSERT_FALSE(dbase.has_eigen_cpu_device());
dbase.set_eigen_cpu_device(&eigen_device);
ASSERT_TRUE(dbase.has_eigen_cpu_device());
{
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 16);
}
{
ScopedPerThreadMaxParallelism maxp(4);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 4);
}
{
ScopedPerThreadMaxParallelism maxp(1);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 1);
}
{
ScopedPerThreadMaxParallelism maxp(1000);
auto d = dbase.eigen_cpu_device();
EXPECT_EQ(d->numThreads(), 16);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/device_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/device_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
73030787-d952-461e-beaf-51cb58b76e6a | cpp | tensorflow/tensorflow | node_def_builder | tensorflow/core/framework/node_def_builder.cc | tensorflow/core/framework/node_def_builder_test.cc | #include "tensorflow/core/framework/node_def_builder.h"
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
NodeDefBuilder::NodeOut::NodeOut(StringPiece n, int i, DataType dt)
: node(n), index(i), data_type(dt) {}
NodeDefBuilder::NodeOut::NodeOut() {
}
void NodeDefBuilder::NodeOut::Reset(StringPiece n, int i, DataType dt) {
node = string(n);
index = i;
data_type = dt;
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug) {
node_def_.set_name(string(name));
const Status status = op_registry->LookUpOpDef(string(op_name), &op_def_);
if (status.ok()) {
Initialize();
} else {
errors_.push_back(std::string(status.message()));
inputs_specified_ = 0;
}
if (debug != nullptr) MergeDebugInfo(*debug, &node_def_);
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, StringPiece op_name,
const NodeDebugInfo& debug)
: NodeDefBuilder(name, op_name) {
MergeDebugInfo(debug, &node_def_);
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, const OpDef* op_def)
: op_def_(op_def) {
node_def_.set_name(string(name));
Initialize();
}
void NodeDefBuilder::Initialize() {
inputs_specified_ = 0;
node_def_.set_op(op_def_->name());
}
const OpDef::ArgDef* NodeDefBuilder::NextArgDef() {
if (!NextArgAvailable()) return nullptr;
return &op_def_->input_arg(inputs_specified_++);
}
bool NodeDefBuilder::NextArgAvailable() {
if (op_def_ == nullptr) {
return false;
} else if (inputs_specified_ >= op_def_->input_arg_size()) {
errors_.push_back(strings::StrCat("More Input() calls than the ",
op_def_->input_arg_size(),
" input_args"));
return false;
}
return true;
}
NodeDefBuilder& NodeDefBuilder::Input(FakeInputFunctor fake_input) {
if (NextArgAvailable()) {
Status status = fake_input(*op_def_, inputs_specified_, node_def_, this);
if (!status.ok()) errors_.push_back(std::string(status.message()));
}
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(StringPiece src_node, int src_index,
DataType dt) {
const OpDef::ArgDef* arg = NextArgDef();
if (arg != nullptr) SingleInput(arg, src_node, src_index, dt);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(const NodeOut& src) {
Input(src.node, src.index, src.data_type);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(absl::Span<const NodeOut> src_list) {
const OpDef::ArgDef* arg = NextArgDef();
if (arg != nullptr) ListInput(arg, src_list);
return *this;
}
void NodeDefBuilder::SingleInput(const OpDef::ArgDef* input_arg,
StringPiece src_node, int src_index,
DataType dt) {
AddInput(src_node, src_index);
if (!input_arg->number_attr().empty() ||
!input_arg->type_list_attr().empty()) {
errors_.push_back(strings::StrCat("Single tensor passed to '",
input_arg->name(), "', expected list"));
return;
}
if (input_arg->type() != DT_INVALID) {
const DataType expected = MaybeAddRef(input_arg, input_arg->type());
VerifyInputType(input_arg, expected, dt);
} else {
VerifyInputRef(input_arg, dt);
Attr(input_arg->type_attr(), BaseType(dt));
}
}
void NodeDefBuilder::ListInput(const OpDef::ArgDef* input_arg,
absl::Span<const NodeOut> src_list) {
for (const auto& node_out : src_list) {
AddInput(node_out.node, node_out.index);
}
if (!input_arg->number_attr().empty()) {
Attr(input_arg->number_attr(), static_cast<int64_t>(src_list.size()));
if (input_arg->type() != DT_INVALID) {
const DataType expected = MaybeAddRef(input_arg, input_arg->type());
for (const auto& node_out : src_list) {
VerifyInputType(input_arg, expected, node_out.data_type);
}
} else if (!src_list.empty()) {
const DataType base = BaseType(src_list[0].data_type);
Attr(input_arg->type_attr(), base);
const DataType expected = MaybeAddRef(input_arg, base);
for (const auto& node_out : src_list) {
VerifyInputType(input_arg, expected, node_out.data_type);
}
}
} else if (!input_arg->type_list_attr().empty()) {
DataTypeVector type_vec;
type_vec.reserve(src_list.size());
for (const auto& node_out : src_list) {
const DataType dt = node_out.data_type;
VerifyInputRef(input_arg, dt);
type_vec.push_back(BaseType(dt));
}
Attr(input_arg->type_list_attr(), type_vec);
} else {
errors_.push_back(strings::StrCat("List provided to input '",
input_arg->name(),
"' when single Tensor expected"));
}
}
void NodeDefBuilder::AddInput(StringPiece src_node, int src_index) {
if (src_node.empty()) {
errors_.push_back("Empty input node name");
} else if (src_node[0] == '^') {
errors_.push_back(
strings::StrCat("Non-control input starting with ^: ", src_node));
} else if (src_index > 0) {
node_def_.add_input(strings::StrCat(src_node, ":", src_index));
} else {
node_def_.add_input(string(src_node));
}
}
void NodeDefBuilder::VerifyInputType(const OpDef::ArgDef* input_arg,
DataType expected, DataType dt) {
if (!TypesCompatible(expected, dt)) {
errors_.push_back(strings::StrCat("Input '", input_arg->name(), "' passed ",
DataTypeString(dt), " expected ",
DataTypeString(expected)));
}
}
void NodeDefBuilder::VerifyInputRef(const OpDef::ArgDef* input_arg,
DataType dt) {
if (input_arg->is_ref() && !IsRefType(dt)) {
errors_.push_back(strings::StrCat("Input '", input_arg->name(), "' passed ",
DataTypeString(dt),
" expected ref type"));
}
}
NodeDefBuilder& NodeDefBuilder::ControlInput(StringPiece src_node) {
control_inputs_.emplace_back(src_node);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Device(StringPiece device_spec) {
node_def_.set_device(string(device_spec));
return *this;
}
Status NodeDefBuilder::Finalize(NodeDef* node_def, bool consume) {
const std::vector<string>* errors_ptr = &errors_;
std::vector<string> errors_storage;
if (op_def_ != nullptr && inputs_specified_ < op_def_->input_arg_size()) {
errors_storage = errors_;
errors_storage.push_back(
strings::StrCat(inputs_specified_, " inputs specified of ",
op_def_->input_arg_size(), " inputs in Op"));
errors_ptr = &errors_storage;
}
if (!errors_ptr->empty()) {
if (errors_ptr->size() == 1) {
if (op_def_ == nullptr) {
return errors::InvalidArgument((*errors_ptr)[0],
" while building NodeDef '",
node_def_.name(), "'");
}
return errors::InvalidArgument(
(*errors_ptr)[0], " while building NodeDef '", node_def_.name(),
"' using ", SummarizeOpDef(*op_def_));
} else {
if (op_def_ == nullptr) {
return errors::InvalidArgument(
errors_ptr->size(), " errors while building NodeDef '",
node_def_.name(), "':\n", absl::StrJoin(*errors_ptr, "\n"));
}
return errors::InvalidArgument(
errors_ptr->size(), " errors while building NodeDef '",
node_def_.name(), "' using ", SummarizeOpDef(*op_def_), ":\n",
absl::StrJoin(*errors_ptr, "\n"));
}
} else {
NodeDef node_def_backup;
if (node_def == nullptr) node_def = &node_def_backup;
if (consume) {
*node_def = std::move(node_def_);
} else {
*node_def = node_def_;
}
for (const auto& control_input : control_inputs_) {
node_def->add_input(strings::StrCat("^", control_input));
}
AddDefaultsToNodeDef(*op_def_, node_def);
return absl::OkStatus();
}
}
bool NodeDefBuilder::AttrValueAlreadyPresent(StringPiece name,
const AttrValue& value) {
if (const AttrValue* found = AttrSlice(node_def_).Find(name)) {
if (!AreAttrValuesEqual(*found, value)) {
errors_.push_back(strings::StrCat("Inconsistent values for attr '", name,
"' ", SummarizeAttrValue(*found),
" vs. ", SummarizeAttrValue(value)));
}
return true;
}
return false;
}
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, const AttrValue& value) {
if (!AttrValueAlreadyPresent(name, value)) {
AddNodeAttr(name, value, &node_def_);
}
return *this;
}
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, AttrValue&& value) {
if (!AttrValueAlreadyPresent(name, value)) {
AddNodeAttr(name, std::move(value), &node_def_);
}
return *this;
}
#define ATTR(T) \
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, T value) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
return Attr(name, attr_value); \
}
ATTR(StringPiece)
ATTR(const char*)
ATTR(int32_t)
ATTR(int64_t)
ATTR(float)
ATTR(double)
ATTR(bool)
ATTR(DataType)
ATTR(const PartialTensorShape&)
ATTR(const Tensor&)
ATTR(const TensorProto&)
ATTR(const NameAttrList&)
ATTR(absl::Span<const StringPiece>)
ATTR(absl::Span<const char* const>)
ATTR(absl::Span<const string>)
ATTR(absl::Span<const tstring>)
ATTR(absl::Span<const int32>)
ATTR(absl::Span<const int64_t>)
ATTR(absl::Span<const float>)
ATTR(absl::Span<const bool>)
ATTR(const std::vector<bool>&)
ATTR(absl::Span<const DataType>)
ATTR(absl::Span<const TensorShape>)
ATTR(absl::Span<const PartialTensorShape>)
ATTR(absl::Span<const TensorShapeProto>)
ATTR(absl::Span<const Tensor>)
ATTR(absl::Span<const NameAttrList>)
#undef ATTR
} | #include "tensorflow/core/framework/node_def_builder.h"
#include <memory>
#include <vector>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class NodeDefBuilderTest : public ::testing::Test {
protected:
void Op(const OpDefBuilder& op_def_builder) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(op_def_builder.Finalize(&op_reg_data));
op_def_ = op_reg_data.op_def;
}
NodeDefBuilder& Builder() {
EXPECT_FALSE(op_def_.name().empty()) << "Must call Op() before Builder()";
builder_ = std::make_unique<NodeDefBuilder>("n", &op_def_);
return *builder_;
}
void ExpectSuccess(NodeDefBuilder& builder,
DataTypeSlice expected_in_types,
DataTypeSlice expected_out_types, StringPiece proto) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
TF_EXPECT_OK(status);
if (!status.ok()) return;
NodeDef expected;
protobuf::TextFormat::ParseFromString(strings::StrCat("name: 'n' ", proto),
&expected);
EXPECT_EQ(node_def.DebugString(), expected.DebugString());
DataTypeVector in_types, out_types;
status =
InOutTypesForNode(node_def, builder.op_def(), &in_types, &out_types);
TF_EXPECT_OK(status);
if (!status.ok()) return;
EXPECT_EQ(DataTypeSliceString(expected_in_types),
DataTypeVectorString(in_types));
EXPECT_EQ(DataTypeSliceString(expected_out_types),
DataTypeVectorString(out_types));
status = ValidateNodeDef(node_def, op_def_);
TF_EXPECT_OK(status);
}
void ExpectFailures(NodeDefBuilder& builder,
const std::vector<string>& messages) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
EXPECT_FALSE(status.ok()) << SummarizeNodeDef(node_def);
if (status.ok()) return;
for (const string& message : messages) {
EXPECT_TRUE(absl::StrContains(status.message(), message))
<< status << ", " << message;
}
}
void ExpectFailure(NodeDefBuilder& builder,
const string& message) {
ExpectFailures(builder, {message});
}
void ExpectInvalid(NodeDefBuilder& builder,
const string& message) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
if (status.ok()) {
status = ValidateNodeDef(node_def, op_def_);
}
EXPECT_FALSE(status.ok()) << SummarizeNodeDef(node_def);
if (status.ok()) return;
EXPECT_TRUE(absl::StrContains(status.message(), message))
<< "Actual error: " << status.message()
<< "\nDoes not contain: " << message;
}
OpDef op_def_;
std::unique_ptr<NodeDefBuilder> builder_;
};
TEST_F(NodeDefBuilderTest, Simple) {
Op(OpDefBuilder("Simple").Input("a: int32").Output("out: float"));
ExpectSuccess(Builder().Input("x", 0, DT_INT32), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "x")proto");
ExpectSuccess(Builder().Input("y", 2, DT_INT32), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "y:2")proto");
ExpectSuccess(Builder().Input(FakeInput()), {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "a")proto");
ExpectSuccess(Builder().Input(FakeInput(DT_INT32_REF)), {DT_INT32},
{DT_FLOAT}, R"proto(op: "Simple" input: "a")proto");
ExpectSuccess(
Builder().ControlInput("x").Input(FakeInput()).ControlInput("y"),
{DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: [ "a", "^x", "^y" ])proto");
ExpectSuccess(Builder().Input(FakeInput()).Device("ddd"), {DT_INT32},
{DT_FLOAT}, R"proto(
op: "Simple" input: "a" device: "ddd")proto");
ExpectFailure(Builder().Input("x", 0, DT_INT32).Input("y", 0, DT_INT32),
"More Input() calls than the 1 input_args while building "
"NodeDef 'n' using Op<name=Simple; signature=a:int32 -> "
"out:float>");
ExpectFailure(Builder(), "0 inputs specified of 1 inputs in Op while");
{
NodeDefBuilder& builder = Builder();
TF_EXPECT_OK(builder.Input(FakeInput()).Finalize(nullptr));
ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
}
{
NodeDefBuilder& builder = Builder();
ExpectFailure(builder, "0 inputs specified of 1 inputs in Op while");
builder.Input(FakeInput());
ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
builder.Input(FakeInput(DT_INT32));
ExpectFailure(builder, "More Input() calls than the 1 input_args while");
}
ExpectFailure(Builder().Input("x", 0, DT_FLOAT),
"Input 'a' passed float expected int32 ");
ExpectFailure(Builder().Input("x", 0, DT_FLOAT_REF),
"Input 'a' passed float_ref expected int32 ");
ExpectFailure(Builder().Input(FakeInput(3, DT_FLOAT)),
"List provided to input 'a' when single Tensor expected while");
ExpectFailure(Builder().Input(FakeInput(3)),
"List provided to input 'a' when single Tensor expected while");
ExpectInvalid(Builder().Input(FakeInput()).ControlInput("z:2"),
"Control input '^z:2' must not have ':' in NodeDef:");
ExpectFailure(Builder().Input("", 0, DT_INT32),
"Empty input node name while");
ExpectFailure(Builder().Input("^x", 0, DT_INT32),
"Non-control input starting with ^: ^x while");
}
TEST_F(NodeDefBuilderTest, OpDoesNotExist) {
NodeDefBuilder builder("n", "Op Does Not Exist");
builder.Input(FakeInput())
.Input(FakeInput(12))
.ControlInput("y")
.Attr("foo", 12)
.Device("device");
ExpectFailures(builder, {"Op type not registered 'Op Does Not Exist'",
"while building NodeDef 'n'"});
}
TEST_F(NodeDefBuilderTest, Polymorphic) {
Op(OpDefBuilder("Polymorphic")
.Input("v: T")
.Output("out: T")
.Attr("T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)), {DT_INT32}, {DT_INT32},
R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_FLOAT)), {DT_FLOAT}, {DT_FLOAT},
R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_BOOL),
{DT_BOOL}, {DT_BOOL}, R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_STRING),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");
ExpectFailure(Builder().Attr("T", DT_STRING).Input(FakeInput(DT_BOOL)),
"Inconsistent values for attr 'T' DT_STRING vs. DT_BOOL while");
ExpectFailure(Builder().Attr("T", 12).Input(FakeInput(DT_BOOL)),
"Inconsistent values for attr 'T' 12 vs. DT_BOOL while");
}
TEST_F(NodeDefBuilderTest, PolymorphicOut) {
Op(OpDefBuilder("PolymorphicOut").Output("out: T").Attr("T: type"));
ExpectSuccess(Builder().Attr("T", DT_INT32), {}, {DT_INT32}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Attr("T", DT_FLOAT), {}, {DT_FLOAT}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectSuccess(Builder().Attr("T", DT_FLOAT).Attr("T", DT_FLOAT), {},
{DT_FLOAT}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectFailure(Builder().Attr("T", DT_BOOL).Attr("T", DT_FLOAT),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_FLOAT while");
ExpectInvalid(Builder(), "NodeDef missing attr 'T' from");
ExpectInvalid(
Builder().Attr("T", {DT_INT32, DT_BOOL}),
"AttrValue had value with type 'list(type)' when 'type' expected");
ExpectInvalid(Builder().Attr("T", 12),
"AttrValue had value with type 'int' when 'type' expected");
}
TEST_F(NodeDefBuilderTest, PolymorphicDefaultOut) {
Op(OpDefBuilder("PolymorphicDefaultOut")
.Output("out: T")
.Attr("T: type = DT_STRING"));
ExpectSuccess(Builder(), {}, {DT_STRING}, R"proto(
op: "PolymorphicDefaultOut"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectSuccess(Builder().Attr("T", DT_BOOL), {}, {DT_BOOL}, R"proto(
op: "PolymorphicDefaultOut"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
}
TEST_F(NodeDefBuilderTest, Binary) {
Op(OpDefBuilder("Binary").Input("a: T").Input("b: T").Output("out: T").Attr(
"T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)).Input(FakeInput(DT_INT32)),
{DT_INT32, DT_INT32}, {DT_INT32}, R"proto(
op: "Binary"
input: "a"
input: "b"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_STRING)).Input(FakeInput()),
{DT_STRING, DT_STRING}, {DT_STRING}, R"proto(
op: "Binary"
input: "a"
input: "b"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Input(FakeInput(DT_STRING)),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, Restrict) {
Op(OpDefBuilder("Restrict")
.Input("a: T")
.Output("out: T")
.Attr("T: {string, bool}"));
ExpectSuccess(Builder().Input(FakeInput(DT_STRING)), {DT_STRING}, {DT_STRING},
R"proto(
op: "Restrict"
input: "a"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Input(FakeInput(DT_INT32)),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, TypeList) {
Op(OpDefBuilder("TypeList").Input("a: T").Attr("T: list(type)"));
ExpectSuccess(Builder().Input(FakeInput({DT_STRING, DT_INT32})),
{DT_STRING, DT_INT32}, {}, R"proto(
op: "TypeList"
input: [ "a", "a:1" ]
attr {
key: "T"
value { list { type: [ DT_STRING, DT_INT32 ] } }
}
)proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_BOOL)),
{DT_BOOL, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "TypeList"
input: [ "a", "a:1", "a:2" ]
attr {
key: "T"
value { list { type: [ DT_BOOL, DT_BOOL, DT_BOOL ] } }
}
)proto");
ExpectInvalid(Builder().Input(FakeInput(0)),
"Length for attr 'T' of 0 must be at least minimum 1");
ExpectInvalid(Builder().Input(FakeInput({})),
"Length for attr 'T' of 0 must be at least minimum 1");
ExpectInvalid(Builder().Input(FakeInput(DT_BOOL)),
"Single tensor passed to 'a', expected list while");
ExpectFailures(Builder().Input(FakeInput()),
{"2 errors while building NodeDef",
"Could not infer list of types for input 'a': "
"No attr named 'T' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
}
TEST_F(NodeDefBuilderTest, TypeListNoMin) {
Op(OpDefBuilder("TypeListNoMin").Input("a: T").Attr("T: list(type) >= 0"));
ExpectSuccess(Builder().Input(FakeInput(0)), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DataTypeVector())), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput({})), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput({DT_BOOL})), {DT_BOOL}, {}, R"proto(
op: "TypeListNoMin"
input: "a"
attr {
key: "T"
value { list { type: DT_BOOL } }
})proto");
}
TEST_F(NodeDefBuilderTest, TypeListTwice) {
Op(OpDefBuilder("TypeListTwice")
.Input("a: T")
.Input("b: T")
.Attr("T: list(type) >= 0"));
ExpectSuccess(Builder()
.Input(FakeInput({DT_INT32, DT_BOOL}))
.Input(FakeInput({DT_INT32, DT_BOOL})),
{DT_INT32, DT_BOOL, DT_INT32, DT_BOOL}, {}, R"proto(
op: "TypeListTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "T"
value { list { type: [ DT_INT32, DT_BOOL ] } }
})proto");
ExpectSuccess(
Builder().Input(FakeInput({DT_INT32, DT_BOOL})).Input(FakeInput()),
{DT_INT32, DT_BOOL, DT_INT32, DT_BOOL}, {}, R"proto(
op: "TypeListTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "T"
value { list { type: [ DT_INT32, DT_BOOL ] } }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput(0)), {}, {},
R"proto(
op: "TypeListTwice"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput()), {}, {},
R"proto(
op: "TypeListTwice"
attr {
key: "T"
value { list {} }
})proto");
ExpectFailure(Builder()
.Input(FakeInput({DT_INT32, DT_BOOL}))
.Input(FakeInput({DT_INT32, DT_STRING})),
"Inconsistent values for attr 'T' [DT_INT32, DT_BOOL] vs. "
"[DT_INT32, DT_STRING] while");
}
TEST_F(NodeDefBuilderTest, OutTypeList) {
Op(OpDefBuilder("OutTypeList").Output("out: T").Attr("T: list(type) >= 0"));
ExpectSuccess(Builder().Attr("T", {DT_FLOAT}), {}, {DT_FLOAT}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list { type: DT_FLOAT } }
})proto");
ExpectSuccess(Builder().Attr("T", {DT_STRING, DT_BOOL}), {},
{DT_STRING, DT_BOOL}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list { type: [ DT_STRING, DT_BOOL ] } }
})proto");
ExpectSuccess(Builder().Attr("T", DataTypeVector()), {}, {}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list {} }
})proto");
ExpectInvalid(
Builder().Attr("T", DT_FLOAT),
"AttrValue had value with type 'type' when 'list(type)' expected");
}
TEST_F(NodeDefBuilderTest, TypeListRestrict) {
Op(OpDefBuilder("TypeListRestrict")
.Input("a: T")
.Attr("T: list({string, bool}) >= 0"));
ExpectSuccess(Builder().Input(FakeInput({DT_STRING, DT_BOOL})),
{DT_STRING, DT_BOOL}, {}, R"proto(
op: "TypeListRestrict"
input: [ "a", "a:1" ]
attr {
key: "T"
value { list { type: [ DT_STRING, DT_BOOL ] } }
})proto");
ExpectInvalid(Builder().Input(FakeInput({DT_STRING, DT_INT32})),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, OutTypeListRestrict) {
Op(OpDefBuilder("OutTypeListRestrict")
.Output("out: t")
.Attr("t: list({string, bool}) >= 0"));
ExpectSuccess(Builder().Attr("t", {DT_BOOL, DT_STRING}), {},
{DT_BOOL, DT_STRING}, R"proto(
op: "OutTypeListRestrict"
attr {
key: "t"
value { list { type: [ DT_BOOL, DT_STRING ] } }
})proto");
ExpectInvalid(Builder().Attr("t", {DT_STRING, DT_INT32}),
"Value for attr 't' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, Attr) {
Op(OpDefBuilder("Attr").Attr("a: int"));
ExpectSuccess(Builder().Attr("a", 12), {}, {}, R"proto(
op: "Attr"
attr {
key: "a"
value { i: 12 }
})proto");
ExpectInvalid(Builder().Attr("a", "bad"),
"AttrValue had value with type 'string' when 'int' expected");
ExpectInvalid(
Builder().Attr("a", {12}),
"AttrValue had value with type 'list(int)' when 'int' expected");
ExpectInvalid(Builder(), "NodeDef missing attr 'a' from Op<");
ExpectSuccess(Builder().Attr("a", 10).Attr("b", 12), {}, {},
R"proto(
op: "Attr"
attr {
key: "a"
value { i: 10 }
}
attr {
key: "b"
value { i: 12 }
}
)proto");
}
TEST_F(NodeDefBuilderTest, AttrFloat) {
Op(OpDefBuilder("AttrFloat").Attr("a: float"));
ExpectSuccess(Builder().Attr("a", 1.2f ), {}, {}, R"proto(
op: "AttrFloat"
attr {
key: "a"
value { f: 1.2 }
}
)proto");
ExpectSuccess(Builder().Attr("a", 1.2 ), {}, {}, R"proto(
op: "AttrFloat"
attr {
key: "a"
value { f: 1.2 }
}
)proto");
ExpectInvalid(Builder().Attr("a", 12),
"AttrValue had value with type 'int' when 'float' expected");
}
TEST_F(NodeDefBuilderTest, AttrBoolList) {
Op(OpDefBuilder("AttrBoolList").Attr("a: list(bool)"));
ExpectSuccess(Builder().Attr("a", {true, false, true}), {}, {}, R"proto(
op: "AttrBoolList"
attr {
key: "a"
value { list { b: [ true, false, true ] } }
}
)proto");
ExpectSuccess(Builder().Attr("a", std::vector<bool>()), {}, {}, R"proto(
op: "AttrBoolList"
attr {
key: "a"
value { list {} }
}
)proto");
ExpectInvalid(Builder().Attr("a", {0}),
"AttrValue had value with type 'list(int)' when 'list(bool)' "
"expected");
}
TEST_F(NodeDefBuilderTest, AttrMin) {
Op(OpDefBuilder("AttrMin").Attr("a: int >= 5"));
ExpectSuccess(Builder().Attr("a", 12), {}, {}, R"proto(
op: "AttrMin"
attr {
key: "a"
value { i: 12 }
})proto");
ExpectInvalid(Builder().Attr("a", 2),
"Value for attr 'a' of 2 must be at least minimum 5");
}
TEST_F(NodeDefBuilderTest, AttrListMin) {
Op(OpDefBuilder("AttrListMin").Attr("a: list(int) >= 2"));
ExpectSuccess(Builder().Attr("a", {1, 2}), {}, {}, R"proto(
op: "AttrListMin"
attr {
key: "a"
value { list { i: [ 1, 2 ] } }
})proto");
ExpectInvalid(Builder().Attr("a", {17}),
"Length for attr 'a' of 1 must be at least minimum 2");
}
TEST_F(NodeDefBuilderTest, AttrEnum) {
Op(OpDefBuilder("AttrEnum").Attr("a: {'apples', 'oranges'}"));
ExpectSuccess(Builder().Attr("a", "oranges"), {}, {}, R"proto(
op: "AttrEnum"
attr {
key: "a"
value { s: "oranges" }
})proto");
ExpectInvalid(
Builder().Attr("a", "invalid"),
"Value for attr 'a' of \"invalid\" is not in the list of allowed values: "
"\"apples\", \"oranges\"");
}
TEST_F(NodeDefBuilderTest, AttrEnumList) {
Op(OpDefBuilder("AttrEnumList").Attr("a: list({'apples', 'oranges'})"));
ExpectSuccess(Builder().Attr("a", {"oranges", "apples"}), {}, {}, R"proto(
op: "AttrEnumList"
attr {
key: "a"
value { list { s: [ "oranges", "apples" ] } }
})proto");
ExpectInvalid(
Builder().Attr("a", {"apples", "invalid", "oranges"}),
"Value for attr 'a' of \"invalid\" is not in the list of allowed values: "
"\"apples\", \"oranges\"");
}
TEST_F(NodeDefBuilderTest, AttrShape) {
Op(OpDefBuilder("AttrShape").Attr("a: shape"));
ExpectSuccess(Builder().Attr("a", TensorShape({5})), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value { shape { dim { size: 5 } } }
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape({4, 3, 2})), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value {
shape {
dim { size: 4 }
dim { size: 3 }
dim { size: 2 }
}
}
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape({3, 2})), {}, {},
R"proto(
op: "AttrShape"
attr {
key: "a"
value {
shape {
dim { size: 3 }
dim { size: 2 }
}
}
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape()), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value { shape {} }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrDefault) {
Op(OpDefBuilder("AttrDefault").Attr("a: string = 'banana'"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrDefault"
attr {
key: "a"
value { s: "banana" }
})proto");
ExpectSuccess(Builder().Attr("a", "kiwi"), {}, {}, R"proto(
op: "AttrDefault"
attr {
key: "a"
value { s: "kiwi" }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrManyDefault) {
Op(OpDefBuilder("AttrManyDefault")
.Attr("a: string = 'banana'")
.Attr("b: string = 'kiwi'"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrManyDefault"
attr {
key: "a"
value { s: "banana" }
}
attr {
key: "b"
value { s: "kiwi" }
})proto");
Op(OpDefBuilder("AttrManyDefaultWithMandatory")
.Attr("a: string = 'banana'")
.Attr("b: string = 'kiwi'")
.Attr("c: string"));
ExpectSuccess(Builder().Attr("c", "strawberry"), {}, {}, R"proto(
op: "AttrManyDefaultWithMandatory"
attr {
key: "c"
value { s: "strawberry" }
}
attr {
key: "a"
value { s: "banana" }
}
attr {
key: "b"
value { s: "kiwi" }
})proto");
Op(OpDefBuilder("AttrManyDefaultAndInferred")
.Input("input: T")
.Attr("T: {float, double}")
.Attr("a: string")
.Attr("b: list(string) >= 1")
.Attr("c: bool = true")
.Attr("d: float = 0.3")
.Attr("e: string")
.Attr("f: float = 0.25"));
ExpectSuccess(Builder()
.Input(FakeInput(DT_FLOAT))
.Attr("a", "foo")
.Attr("e", "foo")
.Attr("b", std::vector<string>({"bar", "baz"}))
.Attr("f", 1.0f),
{DT_FLOAT}, {}, R"proto(
op: "AttrManyDefaultAndInferred"
input: "a"
attr {
key: "T"
value { type: DT_FLOAT }
}
attr {
key: "a"
value { s: "foo" }
}
attr {
key: "e"
value { s: "foo" }
}
attr {
key: "b"
value { list { s: "bar" s: "baz" } }
}
attr {
key: "f"
value { f: 1.0 }
}
attr {
key: "c"
value { b: true }
}
attr {
key: "d"
value { f: 0.3 }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrListDefault) {
Op(OpDefBuilder("AttrListDefault").Attr("a: list(int) = [5, 15]"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list { i: [ 5, 15 ] } }
})proto");
ExpectSuccess(Builder().Attr("a", {3}), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list { i: 3 } }
})proto");
ExpectSuccess(Builder().Attr("a", std::vector<int>()), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list {} }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrEmptyListDefault) {
Op(OpDefBuilder("AttrEmptyListDefault").Attr("a: list(int) = []"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list {} }
})proto");
ExpectSuccess(Builder().Attr("a", {3}), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list { i: 3 } }
})proto");
ExpectSuccess(Builder().Attr("a", std::vector<int>()), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list {} }
})proto");
}
TEST_F(NodeDefBuilderTest, NIntsIn) {
Op(OpDefBuilder("NIntsIn").Input("a: N*int32").Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2)), {DT_INT32, DT_INT32}, {},
R"proto(
op: "NIntsIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(5, DT_INT32)),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "NIntsIn"
input: [ "a", "a:1", "a:2", "a:3", "a:4" ]
attr {
key: "N"
value { i: 5 }
})proto");
ExpectFailures(Builder().Input(FakeInput(2, DT_STRING)),
{"2 errors while building NodeDef",
"Input 'a' passed string expected int32"});
ExpectInvalid(Builder().Input(FakeInput(1)),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectFailures(
Builder().Input(FakeInput(DT_INT32)),
{"2 errors while building NodeDef",
"Could not infer length of input 'a': No attr named 'N' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
ExpectFailure(Builder().Input({{"in", 0, DT_INT32}, {"in", 1, DT_STRING}}),
"Input 'a' passed string expected int32 while");
ExpectFailures(
Builder().Input(FakeInput()),
{"2 errors while building NodeDef",
"Could not infer length of input 'a': No attr named 'N' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
}
TEST_F(NodeDefBuilderTest, NPolymorphicIn) {
Op(OpDefBuilder("NPolymorphicIn")
.Input("a: N*T")
.Attr("T: type")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_INT32)), {DT_INT32, DT_INT32},
{}, R"proto(
op: "NPolymorphicIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_STRING)),
{DT_STRING, DT_STRING, DT_STRING}, {}, R"proto(
op: "NPolymorphicIn"
input: [ "a", "a:1", "a:2" ]
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectFailures(
Builder().Input(FakeInput(2)),
{"2 errors while building NodeDef",
"Could not infer type for input 'a': No attr named 'T' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
ExpectFailure(Builder().Input(FakeInput({DT_INT32, DT_STRING})),
"Input 'a' passed string expected int32 while");
ExpectFailure(Builder().Input({{"in", 0, DT_INT32}, {"in", 1, DT_STRING}}),
"Input 'a' passed string expected int32 while");
ExpectInvalid(Builder().Input(FakeInput(1, DT_INT32)),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectFailure(Builder().Input("in", 0, DT_INT32),
"Single tensor passed to 'a', expected list while");
}
TEST_F(NodeDefBuilderTest, NPolymorphicRestrictIn) {
Op(OpDefBuilder("NPolymorphicRestrictIn")
.Input("a: N*T")
.Attr("T: {string, bool}")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_BOOL)), {DT_BOOL, DT_BOOL}, {},
R"proto(
op: "NPolymorphicRestrictIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_STRING)),
{DT_STRING, DT_STRING, DT_STRING}, {}, R"proto(
op: "NPolymorphicRestrictIn"
input: [ "a", "a:1", "a:2" ]
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Input(FakeInput(2, DT_INT32)),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, NInTwice) {
Op(OpDefBuilder("NInTwice")
.Input("a: N*int32")
.Input("b: N*string")
.Attr("N: int >= 0"));
ExpectSuccess(Builder().Input(FakeInput(2)).Input(FakeInput(2)),
{DT_INT32, DT_INT32, DT_STRING, DT_STRING}, {}, R"proto(
op: "NInTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput()), {}, {},
R"proto(
op: "NInTwice"
attr {
key: "N"
value { i: 0 }
})proto");
ExpectFailure(Builder().Input(FakeInput(3)).Input(FakeInput(1)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
}
TEST_F(NodeDefBuilderTest, NInPolymorphicTwice) {
Op(OpDefBuilder("NInPolymorphicTwice")
.Input("a: N*T")
.Input("b: N*T")
.Attr("T: type")
.Attr("N: int >= 0"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput()),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "NInPolymorphicTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectFailure(
Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1, DT_INT32)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
ExpectFailure(Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, NInTwoTypeVariables) {
Op(OpDefBuilder("NInTwoTypeVariables")
.Input("a: N*S")
.Input("b: N*T")
.Attr("S: type")
.Attr("T: type")
.Attr("N: int >= 0"));
ExpectSuccess(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_BOOL)),
{DT_INT32, DT_INT32, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "NInTwoTypeVariables"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "S"
value { type: DT_INT32 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(DT_BOOL)),
{DT_INT32, DT_INT32, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "NInTwoTypeVariables"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "S"
value { type: DT_INT32 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(
Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1, DT_STRING)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
}
TEST_F(NodeDefBuilderTest, InPolymorphicTwice) {
Op(OpDefBuilder("InPolymorphicTwice")
.Input("a: N*T")
.Input("b: M*T")
.Attr("T: type")
.Attr("N: int >= 0")
.Attr("M: int >= 0"));
ExpectSuccess(
Builder().Input(FakeInput(1, DT_INT32)).Input(FakeInput(3, DT_INT32)),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "InPolymorphicTwice"
input: [ "a", "b", "b:1", "b:2" ]
attr {
key: "N"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "M"
value { i: 3 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(1, DT_BOOL)).Input(FakeInput(0)),
{DT_BOOL}, {}, R"proto(
op: "InPolymorphicTwice"
input: "a"
attr {
key: "N"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_BOOL }
}
attr {
key: "M"
value { i: 0 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput(1, DT_BOOL)),
{DT_BOOL}, {}, R"proto(
op: "InPolymorphicTwice"
input: "b"
attr {
key: "N"
value { i: 0 }
}
attr {
key: "M"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, NIntsOut) {
Op(OpDefBuilder("NIntsOut").Output("a: N*int32").Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("N", 2), {}, {DT_INT32, DT_INT32}, R"proto(
op: "NIntsOut"
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3), {}, {DT_INT32, DT_INT32, DT_INT32},
R"proto(
op: "NIntsOut"
attr {
key: "N"
value { i: 3 }
})proto");
ExpectInvalid(Builder().Attr("N", 1),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectInvalid(
Builder().Attr("N", {3}),
"AttrValue had value with type 'list(int)' when 'int' expected");
ExpectInvalid(Builder(), "NodeDef missing attr 'N' from");
}
TEST_F(NodeDefBuilderTest, NIntsOutDefault) {
Op(OpDefBuilder("NIntsOutDefault")
.Output("a: N*int32")
.Attr("N: int >= 2 = 3"));
ExpectSuccess(Builder(), {}, {DT_INT32, DT_INT32, DT_INT32}, R"proto(
op: "NIntsOutDefault"
attr {
key: "N"
value { i: 3 }
})proto");
ExpectSuccess(Builder().Attr("N", 2), {}, {DT_INT32, DT_INT32}, R"proto(
op: "NIntsOutDefault"
attr {
key: "N"
value { i: 2 }
})proto");
}
TEST_F(NodeDefBuilderTest, NPolymorphicOut) {
Op(OpDefBuilder("NPolymorphicOut")
.Output("a: N*T")
.Attr("T: type")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("T", DT_INT32).Attr("N", 2), {},
{DT_INT32, DT_INT32}, R"proto(
op: "NPolymorphicOut"
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_STRING), {},
{DT_STRING, DT_STRING, DT_STRING}, R"proto(
op: "NPolymorphicOut"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Attr("N", 1).Attr("T", DT_STRING),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectInvalid(
Builder().Attr("N", 3).Attr("T", {DT_STRING}),
"AttrValue had value with type 'list(type)' when 'type' expected");
}
TEST_F(NodeDefBuilderTest, NPolymorphicOutDefault) {
Op(OpDefBuilder("NPolymorphicOutDefault")
.Output("a: N*T")
.Attr("T: type = DT_BOOL")
.Attr("N: int >= 2 = 2"));
ExpectSuccess(Builder(), {}, {DT_BOOL, DT_BOOL}, R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "T"
value { type: DT_BOOL }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3), {}, {DT_BOOL, DT_BOOL, DT_BOOL},
R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(Builder().Attr("T", DT_INT32), {}, {DT_INT32, DT_INT32},
R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_INT32), {},
{DT_INT32, DT_INT32, DT_INT32}, R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
}
TEST_F(NodeDefBuilderTest, NPolymorphicRestrictOut) {
Op(OpDefBuilder("NPolymorphicRestrictOut")
.Output("a: N*T")
.Attr("T: {string, bool}")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_BOOL), {},
{DT_BOOL, DT_BOOL, DT_BOOL}, R"proto(
op: "NPolymorphicRestrictOut"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectInvalid(Builder().Attr("N", 3).Attr("T", DT_INT32),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, RefIn) {
Op(OpDefBuilder("RefIn").Input("a: Ref(int32)"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32_REF)), {DT_INT32_REF}, {},
R"proto(
op: "RefIn" input: "a")proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL_REF)),
"Input 'a' passed bool_ref expected int32_ref while");
ExpectFailure(Builder().Input(FakeInput(DT_INT32)),
"Input 'a' passed int32 expected int32_ref while");
}
TEST_F(NodeDefBuilderTest, PolymorphicRefIn) {
Op(OpDefBuilder("PolymorphicRefIn").Input("a: Ref(T)").Attr("T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_BOOL_REF)), {DT_BOOL_REF}, {},
R"proto(
op: "PolymorphicRefIn"
input: "a"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)),
"Input 'a' passed bool expected ref type while");
}
TEST_F(NodeDefBuilderTest, RefOut) {
Op(OpDefBuilder("RefOut").Output("a: Ref(string)"));
ExpectSuccess(Builder(), {}, {DT_STRING_REF}, R"proto(
op: "RefOut")proto");
}
TEST_F(NodeDefBuilderTest, PolymorphicRefOut) {
Op(OpDefBuilder("PolymorphicRefOut").Output("a: Ref(t)").Attr("t: type"));
ExpectSuccess(Builder().Attr("t", DT_BOOL), {}, {DT_BOOL_REF}, R"proto(
op: "PolymorphicRefOut"
attr {
key: "t"
value { type: DT_BOOL }
})proto");
}
TEST_F(NodeDefBuilderTest, SpecifyDevice) {
Op(OpDefBuilder("SpecifyDevice"));
ExpectSuccess(Builder().Device("ADevice"), {}, {}, R"proto(
op: "SpecifyDevice"
device: "ADevice")proto");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b1761777-ae28-417d-ac87-0c180d616599 | cpp | tensorflow/tensorflow | run_handler | tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_test.cc | #include <algorithm>
#include <atomic>
#include <functional>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#define EIGEN_USE_THREADS
#include <optional>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/lib/core/threadpool_interface.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/denormal.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/setround.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_util.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tsl/platform/tracing.h"
#include "tfrt/host_context/async_dispatch.h"
namespace tfrt {
namespace tf {
namespace {
typedef typename internal::RunHandlerEnvironment::Task Task;
typedef Eigen::RunQueue<Task, 1024> Queue;
}
namespace internal {
RunHandlerEnvironment::RunHandlerEnvironment(
tensorflow::Env* env, const tensorflow::ThreadOptions& thread_options,
const std::string& name)
: env_(env), thread_options_(thread_options), name_(name) {}
RunHandlerEnvironment::EnvThread* RunHandlerEnvironment::CreateThread(
std::function<void()> f) {
return env_->StartThread(thread_options_, name_, [=]() {
tensorflow::port::ScopedFlushDenormal flush;
tensorflow::port::ScopedSetRound round(FE_TONEAREST);
if (thread_options_.numa_node != tensorflow::port::kNUMANoAffinity) {
tensorflow::port::NUMASetThreadNodeAffinity(thread_options_.numa_node);
}
f();
});
}
RunHandlerEnvironment::Task RunHandlerEnvironment::CreateTask(TaskFunction f) {
uint64_t id = 0;
if (tsl::tracing::EventCollector::IsEnabled()) {
id = tsl::tracing::GetUniqueArg();
tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure,
id);
}
return Task{
std::unique_ptr<TaskImpl>(new TaskImpl{
std::move(f),
tensorflow::Context(tensorflow::ContextKind::kThread),
id,
}),
};
}
void RunHandlerEnvironment::ExecuteTask(const Task& t) {
tensorflow::WithContext wc(t.f->context);
tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure,
t.f->trace_id);
t.f->f();
}
void WaitOnWaiter(Waiter* waiter, Waiter* queue_head, tensorflow::mutex* mutex,
int sleep_micros, bool adaptive_sleep_time) {
{
tensorflow::mutex_lock l(*mutex);
CHECK_EQ(waiter->next, waiter);
CHECK_EQ(waiter->prev, waiter);
waiter->prev = queue_head;
waiter->next = queue_head->next;
waiter->next->prev = waiter;
waiter->prev->next = waiter;
}
{
tensorflow::mutex_lock l(waiter->mu);
waiter->num_waiting_threads++;
int max_sleep_micros = adaptive_sleep_time
? sleep_micros * waiter->num_waiting_threads
: sleep_micros;
waiter->cv.wait_for(l, std::chrono::microseconds(max_sleep_micros));
waiter->num_waiting_threads--;
}
tensorflow::mutex_lock l(*mutex);
if (waiter->next != waiter) {
CHECK(waiter->prev != waiter);
waiter->next->prev = waiter->prev;
waiter->prev->next = waiter->next;
waiter->next = waiter;
waiter->prev = waiter;
} else {
CHECK_EQ(waiter->prev, waiter);
}
}
ThreadWorkSource::ThreadWorkSource()
: non_blocking_work_sharding_factor_(
static_cast<int32_t>(ParamFromEnvWithDefault(
"TF_RUN_HANDLER_NUM_OF_NON_BLOCKING_QUEUES", 1))),
non_blocking_work_queues_(non_blocking_work_sharding_factor_),
blocking_inflight_(0),
non_blocking_inflight_(0),
pending_tasks_(0),
traceme_id_(0),
version_(0),
sub_thread_pool_waiter_(nullptr) {
queue_waiters_.next = &queue_waiters_;
queue_waiters_.prev = &queue_waiters_;
for (int i = 0; i < NonBlockingWorkShardingFactor(); ++i) {
non_blocking_work_queues_.emplace_back(new NonBlockingQueue());
}
}
ThreadWorkSource::~ThreadWorkSource() {
for (int i = 0; i < non_blocking_work_queues_.size(); ++i) {
delete non_blocking_work_queues_[i];
}
}
Task ThreadWorkSource::EnqueueTask(Task t, bool is_blocking,
bool enable_wake_up) {
uint64_t id = t.f->trace_id;
tsl::profiler::TraceMe activity(
[id, is_blocking] {
return tsl::profiler::TraceMeEncode(
"Enqueue", {{"id", id}, {"is_blocking", is_blocking}});
},
tsl::profiler::TraceMeLevel::kInfo);
tensorflow::mutex* mu = nullptr;
Queue* task_queue = nullptr;
thread_local int64_t closure_counter = 0;
if (!is_blocking) {
int queue_index = ++closure_counter % non_blocking_work_sharding_factor_;
task_queue = &(non_blocking_work_queues_[queue_index]->queue);
mu = &non_blocking_work_queues_[queue_index]->queue_op_mu;
} else {
task_queue = &blocking_work_queue_;
mu = &blocking_queue_op_mu_;
}
{
tensorflow::mutex_lock l(*mu);
t = task_queue->PushFront(std::move(t));
}
IncrementPendingTaskCount();
if (enable_wake_up) {
Waiter* w = nullptr;
Waiter* waiter_queue;
tensorflow::mutex* waiter_queue_mu;
{
tensorflow::tf_shared_lock lock(run_handler_waiter_mu_);
waiter_queue = sub_thread_pool_waiter_;
waiter_queue_mu = sub_thread_pool_waiter_mu_;
}
{
tensorflow::mutex_lock l(*waiter_queue_mu);
if (waiter_queue->next != waiter_queue) {
w = waiter_queue->next;
CHECK(w->prev != w);
CHECK(w->next != w);
w->next->prev = w->prev;
w->prev->next = w->next;
w->next = w;
w->prev = w;
}
}
if (w != nullptr) {
w->cv.notify_one();
}
}
VLOG(3) << "Added " << (is_blocking ? "inter" : "intra") << " work from "
<< traceme_id_.load(std::memory_order_relaxed);
return t;
}
Task ThreadWorkSource::PopBlockingTask() {
return blocking_work_queue_.PopBack();
}
Task ThreadWorkSource::PopNonBlockingTask(int start_index,
bool search_from_all_queue) {
Task t;
unsigned sharding_factor = NonBlockingWorkShardingFactor();
for (unsigned j = 0; j < sharding_factor; ++j) {
t = non_blocking_work_queues_[(start_index + j) % sharding_factor]
->queue.PopBack();
if (t.f) {
return t;
}
if (!search_from_all_queue) {
break;
}
}
return t;
}
int ThreadWorkSource::TaskQueueSize(bool is_blocking) {
if (is_blocking) {
return blocking_work_queue_.Size();
} else {
unsigned total_size = 0;
for (int i = 0; i < non_blocking_work_sharding_factor_; ++i) {
total_size += non_blocking_work_queues_[i]->queue.Size();
}
return total_size;
}
}
int64_t ThreadWorkSource::GetTracemeId() {
return traceme_id_.load(std::memory_order_relaxed);
}
void ThreadWorkSource::SetTracemeId(int64_t value) { traceme_id_ = value; }
void ThreadWorkSource::SetWaiter(uint64_t version, Waiter* waiter,
tensorflow::mutex* mutex) {
{
tensorflow::tf_shared_lock lock(run_handler_waiter_mu_);
if (sub_thread_pool_waiter_ == waiter) {
return;
}
if (version_ > version) {
return;
}
}
tensorflow::mutex_lock l(run_handler_waiter_mu_);
sub_thread_pool_waiter_ = waiter;
sub_thread_pool_waiter_mu_ = mutex;
version_ = version;
}
int64_t ThreadWorkSource::GetInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
return counter->load(std::memory_order_relaxed);
}
void ThreadWorkSource::IncrementInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
counter->fetch_add(1, std::memory_order_relaxed);
}
void ThreadWorkSource::DecrementInflightTaskCount(bool is_blocking) {
std::atomic<int64_t>* counter =
is_blocking ? &blocking_inflight_ : &non_blocking_inflight_;
counter->fetch_sub(1, std::memory_order_relaxed);
}
int64_t ThreadWorkSource::GetPendingTaskCount() {
return pending_tasks_.load(std::memory_order_acquire);
}
void ThreadWorkSource::IncrementPendingTaskCount() {
pending_tasks_.fetch_add(1, std::memory_order_relaxed);
}
void ThreadWorkSource::DecrementPendingTaskCount() {
pending_tasks_.fetch_sub(1, std::memory_order_release);
}
unsigned ThreadWorkSource::NonBlockingWorkShardingFactor() {
return non_blocking_work_sharding_factor_;
}
std::string ThreadWorkSource::ToString() {
return tensorflow::strings::StrCat(
"traceme_id = ", GetTracemeId(),
", inter queue size = ", TaskQueueSize(true),
", inter inflight = ", GetInflightTaskCount(true),
", intra queue size = ", TaskQueueSize(false),
", intra inflight = ", GetInflightTaskCount(false));
}
RunHandlerThreadPool::RunHandlerThreadPool(
Options options, tensorflow::Env* env,
const tensorflow::ThreadOptions& thread_options, const std::string& name,
Eigen::MaxSizeVector<tensorflow::mutex>* waiters_mu,
Eigen::MaxSizeVector<Waiter>* queue_waiters)
: num_threads_(options.num_blocking_threads +
options.num_non_blocking_threads),
num_blocking_threads_(options.num_blocking_threads),
num_non_blocking_threads_(options.num_non_blocking_threads),
adaptive_sleep_time_(options.use_adaptive_waiting_time),
wait_if_no_active_request_(options.wait_if_no_active_request),
non_blocking_thread_sleep_time_(
options.non_blocking_threads_sleep_time_micro_sec),
blocking_thread_max_waiting_time_(
options.blocking_threads_max_sleep_time_micro_sec),
enable_wake_up_(options.enable_wake_up),
thread_data_(num_threads_),
env_(env, thread_options, name),
name_(name),
waiters_mu_(waiters_mu),
queue_waiters_(queue_waiters),
num_threads_in_sub_thread_pool_(options.num_threads_in_sub_thread_pool),
sub_thread_pool_end_request_percentage_(
options.sub_thread_request_percentage) {
thread_data_.resize(num_threads_);
for (int i = 0; i < num_threads_; ++i) {
thread_data_[i].new_thread_work_sources =
std::make_unique<Eigen::MaxSizeVector<ThreadWorkSource*>>(
options.max_concurrent_handler);
thread_data_[i].current_thread_work_sources =
std::make_unique<Eigen::MaxSizeVector<ThreadWorkSource*>>(
options.max_concurrent_handler);
}
VLOG(1) << "Creating RunHandlerThreadPool " << name << " with "
<< num_blocking_threads_ << " blocking threads and "
<< num_non_blocking_threads_ << " non-blocking threads.";
}
RunHandlerThreadPool::~RunHandlerThreadPool() {
VLOG(1) << "Exiting RunHandlerThreadPool " << name_;
cancelled_ = true;
for (size_t i = 0; i < thread_data_.size(); ++i) {
{
tensorflow::mutex_lock l(thread_data_[i].mu);
thread_data_[i].sources_not_empty.notify_all();
}
thread_data_[i].thread.reset();
}
}
void RunHandlerThreadPool::Start() {
cancelled_ = false;
int num_blocking_threads = num_blocking_threads_;
for (int i = 0; i < num_threads_; i++) {
int sub_thread_pool_id = num_threads_in_sub_thread_pool_.size() - 1;
for (int j = 0; j < num_threads_in_sub_thread_pool_.size(); ++j) {
if (i < num_threads_in_sub_thread_pool_[j]) {
sub_thread_pool_id = j;
break;
}
}
thread_data_[i].sub_thread_pool_id = sub_thread_pool_id;
thread_data_[i].thread.reset(
env_.CreateThread([this, i, num_blocking_threads]() {
WorkerLoop(i, i < num_blocking_threads);
}));
}
}
void RunHandlerThreadPool::StartOneThreadForTesting() {
cancelled_ = false;
thread_data_[0].sub_thread_pool_id = 0;
thread_data_[0].thread.reset(
env_.CreateThread([this]() { WorkerLoop(0, true); }));
}
void RunHandlerThreadPool::AddWorkToQueue(ThreadWorkSource* tws,
bool is_blocking, TaskFunction fn) {
Task t = env_.CreateTask(std::move(fn));
t = tws->EnqueueTask(std::move(t), is_blocking, enable_wake_up_);
if (t.f) {
VLOG(3) << "Running " << (is_blocking ? "inter" : "intra") << " work for "
<< tws->GetTracemeId();
env_.ExecuteTask(t);
}
}
void RunHandlerThreadPool::SetThreadWorkSources(
int tid, uint64_t version,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources) {
tensorflow::mutex_lock l(thread_data_[tid].mu);
if (version > thread_data_[tid].new_version) {
thread_data_[tid].new_version = version;
} else {
return;
}
auto original_size = thread_data_[tid].current_thread_work_sources->size();
thread_data_[tid].new_thread_work_sources->resize(0);
for (int i = 0; i < thread_work_sources.size(); ++i) {
thread_data_[tid].new_thread_work_sources->emplace_back(
thread_work_sources[i]);
}
if (original_size == 0) {
thread_data_[tid].sources_not_empty.notify_all();
}
}
RunHandlerThreadPool::PerThread* RunHandlerThreadPool::GetPerThread() {
thread_local RunHandlerThreadPool::PerThread per_thread_;
RunHandlerThreadPool::PerThread* pt = &per_thread_;
return pt;
}
int RunHandlerThreadPool::CurrentThreadId() const {
const PerThread* pt = const_cast<RunHandlerThreadPool*>(this)->GetPerThread();
if (pt->pool == this) {
return pt->thread_id;
} else {
return -1;
}
}
int RunHandlerThreadPool::NumThreads() const { return num_threads_; }
int RunHandlerThreadPool::NumBlockingThreads() const {
return num_blocking_threads_;
}
int RunHandlerThreadPool::NumNonBlockingThreads() const {
return num_non_blocking_threads_;
}
RunHandlerThreadPool::ThreadData::ThreadData()
: new_version(0), current_index(0), current_version(0) {}
Task RunHandlerThreadPool::FindTask(
int searching_range_start, int searching_range_end, int thread_id,
int sub_thread_pool_id, int max_blocking_inflight,
bool may_steal_blocking_work,
const Eigen::MaxSizeVector<ThreadWorkSource*>& thread_work_sources,
bool* task_from_blocking_queue, ThreadWorkSource** tws) {
Task t;
int current_index = thread_data_[thread_id].current_index;
*task_from_blocking_queue = false;
for (int i = 0; i < searching_range_end - searching_range_start; ++i) {
if (current_index >= searching_range_end ||
current_index < searching_range_start) {
current_index = searching_range_start;
}
*tws = thread_work_sources[current_index];
++current_index;
if (may_steal_blocking_work &&
(*tws)->GetInflightTaskCount(true) < max_blocking_inflight) {
t = (*tws)->PopBlockingTask();
if (t.f) {
*task_from_blocking_queue = true;
break;
}
}
t = (*tws)->PopNonBlockingTask(thread_id, true);
if (t.f) {
break;
}
}
thread_data_[thread_id].current_index = current_index;
return t;
}
void RunHandlerThreadPool::WorkerLoop(int thread_id,
bool may_steal_blocking_work) {
PerThread* pt = GetPerThread();
pt->pool = this;
pt->thread_id = thread_id;
static constexpr int32_t kMaxBlockingInflight = 10;
while (!cancelled_) {
Task t;
ThreadWorkSource* tws = nullptr;
bool task_from_blocking_queue = true;
int sub_thread_pool_id;
{
tensorflow::mutex_lock l(thread_data_[thread_id].mu);
if (thread_data_[thread_id].current_version <
thread_data_[thread_id].new_version) {
thread_data_[thread_id].current_version =
thread_data_[thread_id].new_version;
thread_data_[thread_id].current_thread_work_sources.swap(
thread_data_[thread_id].new_thread_work_sources);
}
}
Eigen::MaxSizeVector<ThreadWorkSource*>* thread_work_sources =
thread_data_[thread_id].current_thread_work_sources.get();
sub_thread_pool_id = thread_data_[thread_id].sub_thread_pool_id;
int active_requests = thread_work_sources->size();
if (may_steal_blocking_work) {
int search_range_start =
sub_thread_pool_id == 0
? 0
: active_requests *
sub_thread_pool_end_request_percentage_[sub_thread_pool_id -
1];
int search_range_end =
active_requests *
sub_thread_pool_end_request_percentage_[sub_thread_pool_id];
search_range_end = std::min(
active_requests, std::max(search_range_end, search_range_start + 1));
t = FindTask(search_range_start, search_range_end, thread_id,
sub_thread_pool_id, kMaxBlockingInflight,
true, *thread_work_sources,
&task_from_blocking_queue, &tws);
if (!t.f) {
t = FindTask(0, active_requests, thread_id, sub_thread_pool_id,
kMaxBlockingInflight,
true, *thread_work_sources,
&task_from_blocking_queue, &tws);
}
} else {
t = FindTask(0, active_requests, thread_id, sub_thread_pool_id,
kMaxBlockingInflight,
false, *thread_work_sources,
&task_from_blocking_queue, &tws);
}
if (t.f) {
VLOG(2) << "Running " << (task_from_blocking_queue ? "inter" : "intra")
<< " work from " << tws->GetTracemeId();
tws->IncrementInflightTaskCount(task_from_blocking_queue);
env_.ExecuteTask(t);
tws->DecrementInflightTaskCount(task_from_blocking_queue);
tws->DecrementPendingTaskCount();
} else {
tsl::profiler::TraceMe activity(
[thread_id] {
return tsl::profiler::TraceMeEncode("Sleeping",
{{"thread_id", thread_id}});
},
tsl::profiler::TraceMeLevel::kInfo);
if (VLOG_IS_ON(4)) {
for (int i = 0; i < thread_work_sources->size(); ++i) {
VLOG(4) << "source id " << i << " "
<< (*thread_work_sources)[i]->ToString();
}
}
WaitForWorkInSubThreadPool(thread_id, may_steal_blocking_work,
sub_thread_pool_id);
}
}
}
void RunHandlerThreadPool::WaitForWorkInSubThreadPool(int thread_id,
bool is_blocking,
int sub_thread_pool_id) {
if (wait_if_no_active_request_) {
tensorflow::mutex_lock l(thread_data_[thread_id].mu);
if (thread_data_[thread_id].new_version >
thread_data_[thread_id].current_version) {
thread_data_[thread_id].current_thread_work_sources.swap(
thread_data_[thread_id].new_thread_work_sources);
thread_data_[thread_id].current_version =
thread_data_[thread_id].new_version;
}
Eigen::MaxSizeVector<ThreadWorkSource*>* thread_work_sources =
thread_data_[thread_id].current_thread_work_sources.get();
bool already_wait = false;
while (!cancelled_ && thread_work_sources->empty()) {
thread_data_[thread_id].sources_not_empty.wait(l);
if (thread_data_[thread_id].new_version >
thread_data_[thread_id].current_version) {
thread_data_[thread_id].current_thread_work_sources.swap(
thread_data_[thread_id].new_thread_work_sources);
thread_data_[thread_id].current_version =
thread_data_[thread_id].new_version;
thread_work_sources =
thread_data_[thread_id].current_thread_work_sources.get();
}
already_wait = true;
}
if (already_wait || cancelled_) {
return;
}
}
if (!is_blocking) {
tensorflow::Env::Default()->SleepForMicroseconds(
non_blocking_thread_sleep_time_);
return;
}
if (enable_wake_up_) {
thread_local Waiter waiter;
WaitOnWaiter(&waiter, &(*queue_waiters_)[sub_thread_pool_id],
&(*waiters_mu_)[sub_thread_pool_id],
blocking_thread_max_waiting_time_, adaptive_sleep_time_);
} else {
tensorflow::Env::Default()->SleepForMicroseconds(
blocking_thread_max_waiting_time_);
}
}
}
class RunHandler::Impl {
public:
explicit Impl(RunHandlerPool::Impl* pool_impl);
~Impl() = default;
uint64_t start_time_us() const { return start_time_us_; }
int64_t step_id() const { return step_id_; }
void ScheduleInterOpClosure(TaskFunction fn);
void ScheduleIntraOpClosure(TaskFunction fn);
void Reset(int64_t step_id, const RunHandlerOptions& options);
RunHandlerPool::Impl* pool_impl() { return pool_impl_; }
tensorflow::thread::ThreadPoolInterface* thread_pool_interface() {
return &eigen_thread_pool_;
}
internal::ThreadWorkSource* tws() { return &tws_; }
int64_t priority() const { return options_.priority; }
private:
class RunHandlerEigenThreadPool
: public tensorflow::thread::ThreadPoolInterface {
public:
explicit RunHandlerEigenThreadPool(RunHandler::Impl* run_handler)
: run_handler_(run_handler) {
DCHECK(run_handler);
}
void Schedule(std::function<void()> fn) override {
run_handler_->ScheduleIntraOpClosure(tensorflow::tfrt_stub::WrapWork(
run_handler_->tws()->GetTracemeId(), "intra", std::move(fn)));
}
int NumThreads() const override;
int CurrentThreadId() const override;
private:
RunHandler::Impl* run_handler_;
};
RunHandlerPool::Impl* pool_impl_;
RunHandlerEigenThreadPool eigen_thread_pool_;
uint64_t start_time_us_;
int64_t step_id_;
internal::ThreadWorkSource tws_;
RunHandlerOptions options_;
};
class RunHandlerPool::Impl {
public:
explicit Impl(Options options)
: max_handlers_(options.max_concurrent_handler),
waiters_mu_(options.num_sub_thread_pool),
queue_waiters_(options.num_sub_thread_pool),
run_handler_thread_pool_(new internal::RunHandlerThreadPool(
internal::RunHandlerThreadPool::Options(
options.num_inter_op_threads, options.num_intra_op_threads,
options.wait_if_no_active_request,
options.non_blocking_threads_sleep_time_micro_sec,
options.blocking_threads_max_sleep_time_micro_sec,
options.use_adaptive_waiting_time, options.enable_wake_up,
options.max_concurrent_handler,
options.num_threads_in_sub_thread_pool,
options.sub_thread_request_percentage),
tensorflow::Env::Default(), tensorflow::ThreadOptions(),
"tf_run_handler_pool", &waiters_mu_, &queue_waiters_)),
iterations_(0),
version_(0),
wait_if_no_active_request_(options.wait_if_no_active_request),
sub_thread_pool_end_request_percentage_(
options.sub_thread_request_percentage) {
VLOG(1) << "Creating a RunHandlerPool with max handlers: " << max_handlers_;
free_handlers_.reserve(max_handlers_);
handlers_.reserve(max_handlers_);
for (int i = 0; i < max_handlers_; ++i) {
handlers_.emplace_back(new RunHandler::Impl(this));
free_handlers_.push_back(handlers_.back().get());
}
queue_waiters_.resize(options.num_sub_thread_pool);
waiters_mu_.resize(options.num_sub_thread_pool);
for (auto& queue_waiter : queue_waiters_) {
queue_waiter.next = &queue_waiter;
queue_waiter.prev = &queue_waiter;
}
run_handler_thread_pool_->Start();
}
~Impl() {
DCHECK_EQ(handlers_.size(), max_handlers_);
DCHECK_EQ(free_handlers_.size(), handlers_.size());
DCHECK_EQ(sorted_active_handlers_.size(), 0);
run_handler_thread_pool_.reset();
}
internal::RunHandlerThreadPool* run_handler_thread_pool() {
return run_handler_thread_pool_.get();
}
bool has_free_handler() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return !free_handlers_.empty();
}
std::unique_ptr<RunHandler> Get(int64_t step_id, int64_t timeout_in_ms,
const RunHandlerOptions& options)
TF_LOCKS_EXCLUDED(mu_) {
thread_local auto thread_work_sources =
std::make_unique<Eigen::MaxSizeVector<internal::ThreadWorkSource*>>(
max_handlers_);
uint64_t version;
int num_active_requests;
RunHandler::Impl* handler_impl;
{
tensorflow::mutex_lock l(mu_);
if (!has_free_handler()) {
tsl::profiler::TraceMe activity(
[step_id] {
return tsl::profiler::TraceMeEncode("WaitingForHandler",
{{"step_id", step_id}});
},
tsl::profiler::TraceMeLevel::kInfo);
if (timeout_in_ms == 0) {
mu_.Await(tensorflow::Condition(this, &Impl::has_free_handler));
} else if (!mu_.AwaitWithDeadline(
tensorflow::Condition(this, &Impl::has_free_handler),
tensorflow::EnvTime::NowNanos() +
timeout_in_ms * 1000 * 1000)) {
return nullptr;
}
}
handler_impl = free_handlers_.back();
handler_impl->Reset(step_id, options);
free_handlers_.pop_back();
num_active_requests = sorted_active_handlers_.size() + 1;
thread_work_sources->resize(num_active_requests);
int priority = options.priority;
auto it = sorted_active_handlers_.cbegin();
bool new_handler_inserted = false;
for (int i = 0; i < num_active_requests; ++i) {
if (!new_handler_inserted && (it == sorted_active_handlers_.cend() ||
priority > (*it)->priority())) {
sorted_active_handlers_.insert(it, handler_impl);
new_handler_inserted = true;
--it;
}
(*thread_work_sources)[i] = (*it)->tws();
++it;
}
version = ++version_;
}
RecomputePoolStats(num_active_requests, version, *thread_work_sources);
return std::unique_ptr<RunHandler>(new RunHandler(handler_impl));
}
void ReleaseHandler(RunHandler::Impl* handler) TF_LOCKS_EXCLUDED(mu_) {
tensorflow::mutex_lock l(mu_);
DCHECK_GT(sorted_active_handlers_.size(), 0);
CHECK_EQ(handler->tws()->TaskQueueSize(true), 0);
CHECK_EQ(handler->tws()->TaskQueueSize(false), 0);
uint64_t now = tensorflow::EnvTime::NowMicros();
double elapsed = (now - handler->start_time_us()) / 1000.0;
time_hist_.Add(elapsed);
auto iter = std::find(sorted_active_handlers_.begin(),
sorted_active_handlers_.end(), handler);
DCHECK(iter != sorted_active_handlers_.end())
<< "Unexpected handler: " << handler
<< " is being requested for release";
sorted_active_handlers_.erase(iter);
free_handlers_.push_back(handler);
DCHECK_LE(free_handlers_.size(), max_handlers_);
LogInfo();
if (wait_if_no_active_request_ && sorted_active_handlers_.empty()) {
thread_local auto thread_work_sources =
std::make_unique<Eigen::MaxSizeVector<internal::ThreadWorkSource*>>(
max_handlers_);
thread_work_sources->resize(0);
auto version = ++version_;
RecomputePoolStats(0, version, *thread_work_sources);
}
}
std::vector<int64_t> GetActiveHandlerPrioritiesForTesting()
TF_LOCKS_EXCLUDED(mu_) {
tensorflow::mutex_lock l(mu_);
std::vector<int64_t> ret;
for (const auto& handler_impl : sorted_active_handlers_) {
ret.push_back(handler_impl->priority());
}
return ret;
}
void Quiesce() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
{
tensorflow::tf_shared_lock l(mu_);
bool all_empty = true;
for (const auto& handler : sorted_active_handlers_) {
if (handler->tws()->GetPendingTaskCount() != 0) {
all_empty = false;
break;
}
}
if (all_empty) {
break;
}
}
const int sleep_time = 50000;
tensorflow::Env::Default()->SleepForMicroseconds(sleep_time);
}
}
private:
void RecomputePoolStats(
int num_active_requests, uint64_t version,
const Eigen::MaxSizeVector<internal::ThreadWorkSource*>&
thread_work_sources);
void LogInfo() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const int max_handlers_;
Eigen::MaxSizeVector<tensorflow::mutex> waiters_mu_;
Eigen::MaxSizeVector<internal::Waiter> queue_waiters_;
std::unique_ptr<internal::RunHandlerThreadPool> run_handler_thread_pool_;
std::list<RunHandler::Impl*> sorted_active_handlers_ TF_GUARDED_BY(mu_);
std::vector<RunHandler::Impl*> free_handlers_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<RunHandler::Impl>> handlers_ TF_GUARDED_BY(mu_);
tensorflow::histogram::Histogram time_hist_ TF_GUARDED_BY(mu_);
int64_t iterations_ TF_GUARDED_BY(mu_);
tensorflow::mutex mu_;
int64_t version_ TF_GUARDED_BY(mu_);
bool wait_if_no_active_request_;
const std::vector<double> sub_thread_pool_end_request_percentage_;
};
void RunHandlerPool::Impl::RecomputePoolStats(
int num_active_requests, uint64_t version,
const Eigen::MaxSizeVector<internal::ThreadWorkSource*>&
thread_work_sources) {
int sub_thread_pool_id = 0;
for (int i = 0; i < num_active_requests; ++i) {
while (
sub_thread_pool_id <
sub_thread_pool_end_request_percentage_.size() - 1 &&
i >= num_active_requests *
sub_thread_pool_end_request_percentage_[sub_thread_pool_id]) {
sub_thread_pool_id++;
}
thread_work_sources[i]->SetWaiter(version,
&queue_waiters_[sub_thread_pool_id],
&waiters_mu_[sub_thread_pool_id]);
}
int num_threads = run_handler_thread_pool()->NumThreads();
int num_blocking_threads = run_handler_thread_pool()->NumBlockingThreads();
int num_non_blocking_threads = num_threads - num_blocking_threads;
for (int i = 0; i < num_blocking_threads + num_non_blocking_threads; ++i) {
run_handler_thread_pool()->SetThreadWorkSources(i, version,
thread_work_sources);
}
}
void RunHandlerPool::Impl::LogInfo() {
if (iterations_++ % 50000 == 10 && VLOG_IS_ON(1)) {
int num_active_requests = sorted_active_handlers_.size();
VLOG(1) << "Printing time histogram: " << time_hist_.ToString();
VLOG(1) << "Active session runs: " << num_active_requests;
uint64_t now = tensorflow::Env::Default()->NowMicros();
std::string times_str = "";
std::string ids_str = "";
auto it = sorted_active_handlers_.cbegin();
for (int i = 0; i < num_active_requests; ++i) {
if (i > 0) {
times_str += " ";
ids_str += " ";
}
times_str += tensorflow::strings::StrCat(
(now - (*it)->start_time_us()) / 1000.0, " ms.");
ids_str += tensorflow::strings::StrCat((*it)->tws()->GetTracemeId());
++it;
}
VLOG(1) << "Elapsed times are: " << times_str;
VLOG(1) << "Step ids are: " << ids_str;
}
}
RunHandler::Impl::Impl(RunHandlerPool::Impl* pool_impl)
: pool_impl_(pool_impl), eigen_thread_pool_(this) {
Reset(0, RunHandlerOptions());
}
void RunHandler::Impl::ScheduleInterOpClosure(TaskFunction fn) {
VLOG(3) << "Scheduling inter work for " << tws()->GetTracemeId();
pool_impl_->run_handler_thread_pool()->AddWorkToQueue(tws(), true,
std::move(fn));
}
void RunHandler::Impl::ScheduleIntraOpClosure(TaskFunction fn) {
VLOG(3) << "Scheduling intra work for " << tws()->GetTracemeId();
pool_impl_->run_handler_thread_pool()->AddWorkToQueue(tws(), false,
std::move(fn));
}
void RunHandler::Impl::Reset(int64_t step_id,
const RunHandlerOptions& options) {
start_time_us_ = tensorflow::Env::Default()->NowMicros();
step_id_ = step_id;
options_ = options;
tws_.SetTracemeId(step_id);
}
int RunHandler::Impl::RunHandlerEigenThreadPool::NumThreads() const {
return run_handler_->pool_impl_->run_handler_thread_pool()->NumThreads();
}
int RunHandler::Impl::RunHandlerEigenThreadPool::CurrentThreadId() const {
return run_handler_->pool_impl_->run_handler_thread_pool()->CurrentThreadId();
}
RunHandlerPool::RunHandlerPool(Options options) : impl_(new Impl(options)) {}
RunHandlerPool::~RunHandlerPool() = default;
std::unique_ptr<RunHandler> RunHandlerPool::Get(
int64_t step_id, int64_t timeout_in_ms, const RunHandlerOptions& options) {
return impl_->Get(step_id, timeout_in_ms, options);
}
std::vector<int64_t> RunHandlerPool::GetActiveHandlerPrioritiesForTesting()
const {
return impl_->GetActiveHandlerPrioritiesForTesting();
}
void RunHandlerPool::Quiesce() const { impl_->Quiesce(); }
RunHandler::RunHandler(Impl* impl) : impl_(impl) {}
void RunHandler::ScheduleInterOpClosure(TaskFunction fn) {
impl_->ScheduleInterOpClosure(std::move(fn));
}
void RunHandler::ScheduleIntraOpClosure(TaskFunction fn) {
impl_->ScheduleInterOpClosure(std::move(fn));
}
int RunHandler::NumThreads() const {
return impl_->pool_impl()->run_handler_thread_pool()->NumThreads();
}
int64_t RunHandler::step_id() const { return impl_->step_id(); }
tensorflow::thread::ThreadPoolInterface*
RunHandler::AsIntraThreadPoolInterface() const {
return impl_->thread_pool_interface();
}
RunHandler::~RunHandler() { impl_->pool_impl()->ReleaseHandler(impl_); }
int RunHandlerWorkQueue::GetParallelismLevel() const {
return run_handler_->NumThreads();
}
void RunHandlerWorkQueue::AddTask(TaskFunction work) {
run_handler_->ScheduleInterOpClosure(tensorflow::tfrt_stub::WrapWork(
run_handler_->step_id(), "inter", std::move(work)));
}
std::optional<TaskFunction> RunHandlerWorkQueue::AddBlockingTask(
TaskFunction work, bool allow_queuing) {
LOG_EVERY_N_SEC(ERROR, 10)
<< "RunHandlerWorkQueue::AddBlockingTask() is not supposed to be called.";
return work;
}
void RunHandlerWorkQueue::Await(ArrayRef<RCReference<AsyncValue>> values) {
tfrt::Await(values);
}
bool RunHandlerWorkQueue::IsInWorkerThread() const {
return true;
}
}
} | #include <cstddef>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#define EIGEN_USE_THREADS
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#define EIGEN_USE_THREADS
#include "absl/memory/memory.h"
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/notification.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/host_context/task_function.h"
namespace tfrt {
namespace tf {
namespace {
TEST(RunHandlerUtilTest, TestBasicScheduling) {
int num_threads = 2;
int num_handlers = 10;
RunHandlerPool::Options options;
options.num_intra_op_threads = num_threads;
options.num_inter_op_threads = num_threads;
std::unique_ptr<RunHandlerPool> pool(new RunHandlerPool(options));
absl::Barrier barrier(num_threads);
tensorflow::BlockingCounter counter(2 * num_handlers * num_threads);
tensorflow::thread::ThreadPool test_pool(tensorflow::Env::Default(), "test",
num_handlers);
for (int i = 0; i < num_handlers; ++i) {
test_pool.Schedule([&counter, &barrier, &pool, i, num_threads]() {
auto handler = pool->Get(i);
tensorflow::BlockingCounter local_counter(2 * num_threads);
for (int j = 0; j < num_threads; ++j) {
handler->ScheduleInterOpClosure(
TaskFunction([&local_counter, &counter, &barrier, i]() {
if (i == 2) {
barrier.Block();
}
counter.DecrementCount();
local_counter.DecrementCount();
}));
handler->ScheduleIntraOpClosure(
TaskFunction([&local_counter, &counter]() {
counter.DecrementCount();
local_counter.DecrementCount();
}));
}
local_counter.Wait();
});
}
counter.Wait();
}
TEST(RunHandlerUtilTest, PrioritySchedulingTest) {
int num_threads = 2;
RunHandlerPool::Options pool_options;
pool_options.num_intra_op_threads = num_threads;
pool_options.num_inter_op_threads = num_threads;
pool_options.num_threads_in_sub_thread_pool = {2};
std::unique_ptr<RunHandlerPool> pool(new RunHandlerPool(pool_options));
RunHandlerOptions options = RunHandlerOptions();
options.priority = 2;
auto handler1 = pool->Get(1, 0, options);
options.priority = 1;
auto handler2 = pool->Get(2, 0, options);
options.priority = 3;
auto handler3 = pool->Get(3, 0, options);
std::vector<int64_t> sorted_active_list =
pool->GetActiveHandlerPrioritiesForTesting();
EXPECT_EQ(sorted_active_list.size(), 3);
EXPECT_EQ(sorted_active_list[0], 3);
EXPECT_EQ(sorted_active_list[1], 2);
EXPECT_EQ(sorted_active_list[2], 1);
handler1.reset();
options.priority = 5;
auto handler4 = pool->Get(4, 0, options);
options.priority = 4;
auto handler5 = pool->Get(5, 0, options);
sorted_active_list = pool->GetActiveHandlerPrioritiesForTesting();
EXPECT_EQ(sorted_active_list.size(), 4);
EXPECT_EQ(sorted_active_list[0], 5);
EXPECT_EQ(sorted_active_list[1], 4);
EXPECT_EQ(sorted_active_list[2], 3);
EXPECT_EQ(sorted_active_list[3], 1);
}
TEST(RunHandlerUtilTest, IntraOpThreadPool) {
int num_threads = 2;
RunHandlerPool::Options pool_options;
pool_options.num_intra_op_threads = num_threads;
pool_options.num_inter_op_threads = num_threads;
pool_options.num_threads_in_sub_thread_pool = {2};
std::unique_ptr<RunHandlerPool> pool(new RunHandlerPool(pool_options));
RunHandlerOptions options = RunHandlerOptions();
auto handler = pool->Get(1, 0, options);
auto* intra_pool = handler->AsIntraThreadPoolInterface();
absl::Notification notification;
intra_pool->Schedule([¬ification]() { notification.Notify(); });
notification.WaitForNotification();
}
class RunHandlerThreadPoolTest
: public testing::TestWithParam<std::tuple<bool, bool>> {
protected:
bool adaptive_waiting_time() { return std::get<0>(GetParam()); }
bool wait_if_no_active_request() { return std::get<1>(GetParam()); }
};
TEST_P(RunHandlerThreadPoolTest, EnqueueTask) {
Eigen::MaxSizeVector<tensorflow::mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool run_handler_thread_pool(
internal::RunHandlerThreadPool::Options(
0, 0,
true,
250,
250,
true, true,
128,
{0},
{1}),
tensorflow::Env::Default(), tensorflow::ThreadOptions(),
"tf_run_handler_pool", &waiters_mu, &waiters);
internal::ThreadWorkSource tws;
tws.SetWaiter(1, &waiters[0], &waiters_mu[0]);
int result = 0;
std::function<void()> fn = [&result] { result = 1; };
std::function<void()> fn2 = [&result] { result = 2; };
run_handler_thread_pool.AddWorkToQueue(&tws, true,
TaskFunction(fn));
EXPECT_EQ(tws.TaskQueueSize(true), 1);
run_handler_thread_pool.AddWorkToQueue(&tws, true,
TaskFunction(fn2));
EXPECT_EQ(tws.TaskQueueSize(true), 2);
tws.PopBlockingTask().f->f();
EXPECT_EQ(result, 1);
tws.PopBlockingTask().f->f();
EXPECT_EQ(result, 2);
run_handler_thread_pool.AddWorkToQueue(&tws, false,
TaskFunction(fn));
EXPECT_EQ(tws.TaskQueueSize(false), 1);
run_handler_thread_pool.AddWorkToQueue(&tws, false,
TaskFunction(fn2));
EXPECT_EQ(tws.TaskQueueSize(false), 2);
tws.PopNonBlockingTask(0, true).f->f();
EXPECT_EQ(result, 1);
tws.PopNonBlockingTask(0, true).f->f();
EXPECT_EQ(result, 2);
EXPECT_TRUE(absl::StrContains(tws.ToString(), "traceme_id = 0"));
}
TEST_P(RunHandlerThreadPoolTest, FindTask) {
Eigen::MaxSizeVector<tensorflow::mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool run_handler_thread_pool(
internal::RunHandlerThreadPool::Options(
1, 0,
true,
250,
250,
true, true,
128,
{1},
{1}),
tensorflow::Env::Default(), tensorflow::ThreadOptions(),
"tf_run_handler_pool", &waiters_mu, &waiters);
EXPECT_EQ(run_handler_thread_pool.NumBlockingThreads(), 1);
EXPECT_EQ(run_handler_thread_pool.NumNonBlockingThreads(), 0);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(5);
thread_work_sources.resize(5);
for (int i = 0; i < 5; ++i) {
thread_work_sources[i] = new internal::ThreadWorkSource();
thread_work_sources[i]->SetWaiter(1, &waiters[0], &waiters_mu[0]);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[3],
true, TaskFunction([&result] { result = 3; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[3],
true, TaskFunction([&result] { result = 3; }));
const auto find_blocking_task_from_all_handlers =
[&](bool* task_from_blocking_queue, internal::Task* t) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_all_handlers(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[3],
true, TaskFunction([&result] { result = 3; }));
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 3);
EXPECT_EQ(t.f, nullptr);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[3],
true, TaskFunction([&result] { result = 3; }));
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 3, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
const auto find_blocking_task_from_range =
[&](bool* task_from_blocking_queue, internal::Task* t, int range_start,
int range_end) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
range_start, range_end,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[3],
true, TaskFunction([&result] { result = 3; }));
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 3);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
find_blocking_task_from_range(&task_from_blocking_queue, &t, 0, 5);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 3);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
false, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
const auto blocking_thread_find_task_from_all_handler =
[&](bool* task_from_blocking_queue, internal::Task* t) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
true, thread_work_sources,
task_from_blocking_queue, &tws);
};
bool task_from_blocking_queue;
internal::Task t;
blocking_thread_find_task_from_all_handler(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, true);
t.f->f();
EXPECT_EQ(result, 2);
blocking_thread_find_task_from_all_handler(&task_from_blocking_queue, &t);
EXPECT_EQ(task_from_blocking_queue, false);
t.f->f();
EXPECT_EQ(result, 2);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
false, TaskFunction([&result] { result = 2; }));
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
const auto find_task_from_all_handler = [&](bool* task_from_blocking_queue,
internal::Task* t,
bool is_blocking_thread) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
is_blocking_thread, thread_work_sources, task_from_blocking_queue,
&tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(task_from_blocking_queue, false);
t.f->f();
EXPECT_EQ(result, 2);
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(t.f, nullptr);
find_task_from_all_handler(&task_from_blocking_queue, &t,
true);
}
{
int result = -1;
run_handler_thread_pool.AddWorkToQueue(
thread_work_sources[2],
true, TaskFunction([&result] { result = 2; }));
const auto find_task_from_all_handler = [&](bool* task_from_blocking_queue,
internal::Task* t,
bool is_blocking_thread) {
internal::ThreadWorkSource* tws;
*t = run_handler_thread_pool.FindTask(
0, 5,
0,
0, 10,
is_blocking_thread, thread_work_sources, task_from_blocking_queue,
&tws);
};
bool task_from_blocking_queue;
internal::Task t;
find_task_from_all_handler(&task_from_blocking_queue, &t,
false);
EXPECT_EQ(task_from_blocking_queue, false);
EXPECT_EQ(t.f, nullptr);
find_task_from_all_handler(&task_from_blocking_queue, &t,
true);
}
for (int i = 0; i < 5; ++i) {
delete thread_work_sources[i];
}
}
TEST_P(RunHandlerThreadPoolTest, RoundRobinExecution) {
Eigen::MaxSizeVector<tensorflow::mutex> waiters_mu(1);
waiters_mu.resize(1);
Eigen::MaxSizeVector<internal::Waiter> waiters(1);
waiters.resize(1);
internal::RunHandlerThreadPool* run_handler_thread_pool =
new internal::RunHandlerThreadPool(
internal::RunHandlerThreadPool::Options(
1, 0,
true,
250,
250,
true, true,
128,
{1},
{1}),
tensorflow::Env::Default(), tensorflow::ThreadOptions(),
"tf_run_handler_pool", &waiters_mu, &waiters);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(3);
thread_work_sources.resize(3);
internal::ThreadWorkSource tws[3];
for (int i = 0; i < 3; ++i) {
tws[i].SetWaiter(1, &waiters[0], &waiters_mu[0]);
thread_work_sources[i] = &tws[i];
}
int result = 0;
tensorflow::mutex mu;
bool ok_to_execute = false;
bool ok_to_validate = false;
tensorflow::condition_variable function_start;
tensorflow::condition_variable function_end;
std::vector<std::function<void()>> fns;
for (int i = 0; i < 3; ++i) {
fns.push_back([&result, &mu, &function_start, &function_end, &ok_to_execute,
&ok_to_validate, i] {
tensorflow::mutex_lock l(mu);
while (!ok_to_execute) {
function_start.wait(l);
}
result = i;
ok_to_execute = false;
ok_to_validate = true;
function_end.notify_one();
});
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
TaskFunction(fns[i]));
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
TaskFunction(fns[i]));
}
run_handler_thread_pool->Start();
run_handler_thread_pool->SetThreadWorkSources(
0, 1, thread_work_sources);
tensorflow::mutex_lock l(mu);
for (int round = 0; round < 2; ++round) {
for (int i = 0; i < 3; ++i) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i);
}
}
delete run_handler_thread_pool;
}
TEST_P(RunHandlerThreadPoolTest, MultipleSubThreadPool) {
Eigen::MaxSizeVector<tensorflow::mutex> waiters_mu(2);
waiters_mu.resize(2);
Eigen::MaxSizeVector<internal::Waiter> waiters(2);
waiters.resize(2);
internal::RunHandlerThreadPool* run_handler_thread_pool =
new internal::RunHandlerThreadPool(
internal::RunHandlerThreadPool::Options(
2, 0,
true,
250,
250,
true, true,
128,
{1, 1},
{0.5, 1}),
tensorflow::Env::Default(), tensorflow::ThreadOptions(),
"tf_run_handler_pool", &waiters_mu, &waiters);
Eigen::MaxSizeVector<internal::ThreadWorkSource*> thread_work_sources(4);
thread_work_sources.resize(4);
internal::ThreadWorkSource tws[4];
for (int i = 0; i < 4; ++i) {
tws[i].SetWaiter(1, &waiters[i / 2], &waiters_mu[i / 2]);
thread_work_sources[i] = &tws[i];
}
int result = 0;
tensorflow::mutex mu;
bool ok_to_execute = false;
bool ok_to_validate = false;
tensorflow::condition_variable function_start;
tensorflow::condition_variable function_end;
std::vector<std::function<void()>> fns;
for (int i = 0; i < 4; ++i) {
fns.push_back([&result, &mu, &function_start, &function_end, &ok_to_execute,
&ok_to_validate, i] {
tensorflow::mutex_lock l(mu);
while (!ok_to_execute) {
function_start.wait(l);
}
result = i;
ok_to_execute = false;
ok_to_validate = true;
function_end.notify_one();
});
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
TaskFunction(fns[i]));
run_handler_thread_pool->AddWorkToQueue(&tws[i], true,
TaskFunction(fns[i]));
}
run_handler_thread_pool->StartOneThreadForTesting();
run_handler_thread_pool->SetThreadWorkSources(
0, 1, thread_work_sources);
run_handler_thread_pool->SetThreadWorkSources(
1, 1, thread_work_sources);
tensorflow::mutex_lock l(mu);
for (int round = 0; round < 2; ++round) {
for (int i = 0; i < 2; ++i) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i);
}
}
for (int i = 0; i < 2; ++i) {
for (int round = 0; round < 2; ++round) {
ok_to_execute = true;
function_start.notify_one();
while (!ok_to_validate) {
function_end.wait(l);
}
ok_to_validate = false;
EXPECT_EQ(result, i + 2);
}
}
delete run_handler_thread_pool;
}
INSTANTIATE_TEST_SUITE_P(Parameter, RunHandlerThreadPoolTest,
testing::Combine(::testing::Bool(),
::testing::Bool()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21efe5f5-c57a-45c6-ac99-949f2a346f49 | cpp | tensorflow/tensorflow | flags | tensorflow/compiler/aot/flags.cc | tensorflow/core/config/flags_test.cc | #include "tensorflow/compiler/aot/flags.h"
namespace tensorflow {
namespace tfcompile {
void AppendMainFlags(std::vector<Flag>* flag_list, MainFlags* flags) {
const std::vector<Flag> tmp = {
{"graph", &flags->graph,
"Input GraphDef file. If the file ends in '.pbtxt' it is expected to "
"be in the human-readable proto text format, otherwise it is expected "
"to be in the proto binary format."},
{"debug_info", &flags->debug_info,
"Graph debug info file. If the file ends in '.pbtxt' it is expected to "
"be in the human-readable proto text format, otherwise it is expected "
"to be in the proto binary format."},
{"debug_info_path_begin_marker", &flags->debug_info_path_begin_marker,
"If not none, only keep the file path in the debug information after the"
" marker. The default value is empty"},
{"config", &flags->config,
"Input file containing Config proto. If the file ends in '.pbtxt' it "
"is expected to be in the human-readable proto text format, otherwise "
"it is expected to be in the proto binary format."},
{"dump_fetch_nodes", &flags->dump_fetch_nodes,
"If set, only flags related to fetches are processed, and the resulting "
"fetch nodes will be dumped to stdout in a comma-separated list. "
"Typically used to format arguments for other tools, e.g. "
"freeze_graph."},
{"target_triple", &flags->target_triple,
"Target platform, similar to the clang -target flag. The general "
"format is <arch><sub>-<vendor>-<sys>-<abi>. "
"http:
{"target_cpu", &flags->target_cpu,
"Target cpu, similar to the clang -mcpu flag. "
"http:
{"target_features", &flags->target_features,
"Target features, e.g. +avx2, +neon, etc."},
{"entry_point", &flags->entry_point,
"Name of the generated function. If multiple generated object files "
"will be linked into the same binary, each will need a unique entry "
"point."},
{"cpp_class", &flags->cpp_class,
"Name of the generated C++ class, wrapping the generated function. The "
"syntax of this flag is [[<optional_namespace>::],...]<class_name>. "
"This mirrors the C++ syntax for referring to a class, where multiple "
"namespaces may precede the class name, separated by double-colons. "
"The class will be generated in the given namespace(s), or if no "
"namespaces are given, within the global namespace."},
{"out_function_object", &flags->out_function_object,
"Output object file containing the generated function for the "
"TensorFlow model."},
{"out_header", &flags->out_header, "Output header file name."},
{"out_metadata_object", &flags->out_metadata_object,
"Output object file name containing optional metadata for the generated "
"function."},
{"out_session_module", &flags->out_session_module,
"Output session module proto."},
{"mlir_components", &flags->mlir_components,
"The MLIR components to enable. Currently only Bridge is supported."},
{"experimental_quantize", &flags->experimental_quantize,
"If set, quantization passes will run and dump the result before HLO "
"code generation."},
{"sanitize_dataflow", &flags->sanitize_dataflow,
"Enable DataFlow Sanitizer pass."},
{"sanitize_abilists_dataflow", &flags->sanitize_abilists_dataflow,
"Comma separated list of ABIList file paths."},
{"gen_name_to_index", &flags->gen_name_to_index,
"Generate name-to-index data for Lookup{Arg,Result}Index methods."},
{"gen_program_shape", &flags->gen_program_shape,
"Generate program shape data for the ProgramShape method."},
};
flag_list->insert(flag_list->end(), tmp.begin(), tmp.end());
}
}
} | #include "tensorflow/core/config/flags.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TFFlags, ReadFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
EXPECT_FALSE(flags::Global().test_only_experiment_2.value());
}
TEST(TFFlags, ResetFlagValue) {
EXPECT_TRUE(flags::Global().test_only_experiment_1.value());
flags::Global().test_only_experiment_1.reset(false);
EXPECT_FALSE(flags::Global().test_only_experiment_1.value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/config/flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5022c468-76d4-4d66-9465-6171e0aac452 | cpp | tensorflow/tensorflow | tf_op_registry | tensorflow/core/ir/tf_op_registry.cc | tensorflow/core/ir/tf_op_registry_test.cc | #include "tensorflow/core/ir/tf_op_registry.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/ops.h"
namespace mlir {
namespace tfg {
TensorFlowOpRegistryInterface::TensorFlowOpRegistryInterface(Dialect *dialect)
: TensorFlowOpRegistryInterface(dialect, tensorflow::OpRegistry::Global()) {
}
static bool IsStatefulImpl(const tensorflow::OpRegistry *registry,
StringRef op_name) {
const tensorflow::OpRegistrationData *op_reg_data =
registry->LookUp(op_name.str());
if (!op_reg_data) return true;
return op_reg_data->op_def.is_stateful();
}
bool TensorFlowOpRegistryInterface::isStateful(Operation *op) const {
if (op->hasTrait<OpTrait::IntrinsicOperation>()) return false;
if (auto func = dyn_cast<GraphFuncOp>(op)) return func.getIsStateful();
StringRef op_name = op->getName().stripDialect();
if (op->getNumRegions() && op_name.ends_with("Region"))
op_name = op_name.drop_back(6);
return IsStatefulImpl(registry_, op_name);
}
}
} | #include "tensorflow/core/ir/tf_op_registry.h"
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
void PrepareContext(MLIRContext *context) {
DialectRegistry registry;
registry.insert<TFGraphDialect>();
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<TensorFlowOpRegistryInterface>();
});
context->appendDialectRegistry(registry);
}
TEST(TensorFlowOpRegistryInterface, TestIntrinsicOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32>) -> (tensor<i32>) {
return(%arg) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
auto func_op = cast<GraphFuncOp>(&module->front());
auto ret_op = cast<ReturnOp>(func_op.getBody().front().getTerminator());
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*func_op));
EXPECT_FALSE(dyn_cast<TensorFlowRegistryInterface>(*ret_op));
}
TEST(TensorFlowOpRegistryInterface, TestStatelessTFOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code = R"mlir(
tfg.func @test(%lhs: tensor<i32>, %rhs: tensor<i32>) -> (tensor<i32>) {
%Add, %ctl = Add(%lhs, %rhs) : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *add =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(add);
ASSERT_TRUE(iface);
EXPECT_FALSE(iface.isStateful());
}
TEST(TensorFlowOpRegistryInterface, TestStatelessAndStatefulRegionOps) {
MLIRContext context(MLIRContext::Threading::DISABLED);
PrepareContext(&context);
const char *const code_template = R"mlir(
tfg.func @test(%idx: tensor<i32>, %arg: tensor<i32>) -> (tensor<i32>) {{
%Case, %ctl = {0}CaseRegion %idx {{
yield(%arg) : tensor<i32>
} : (tensor<i32>) -> (tensor<i32>)
return(%Case) : tensor<i32>
}
)mlir";
SmallVector<StringRef, 2> prefixes = {"", "Stateless"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(prefixes, expected)) {
std::string code = llvm::formatv(code_template, std::get<0>(it)).str();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *case_op =
&cast<GraphFuncOp>(&module->front()).getBody().front().front();
auto iface = dyn_cast<TensorFlowRegistryInterface>(case_op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a4a27622-89e4-490d-96ab-4ec3e13d4a4f | cpp | tensorflow/tensorflow | utility | tensorflow/core/ir/utility.cc | tensorflow/core/ir/utility_test.cc | #include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/Block.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/interfaces.h"
namespace mlir {
namespace tfg {
Block::BlockArgListType GetLoopRegionDataArgs(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_back(args.size() / 2);
}
Block::BlockArgListType GetLoopRegionControlTokens(Region ®ion) {
Block::BlockArgListType args = region.getArguments();
return args.drop_front(args.size() / 2);
}
BlockArgument GetLoopRegionControlOf(BlockArgument data) {
Block &block = *data.getOwner();
return block.getArgument(data.getArgNumber() + block.getNumArguments() / 2);
}
BlockArgument GetLoopRegionDataOf(BlockArgument ctl) {
Block &block = *ctl.getOwner();
return block.getArgument(ctl.getArgNumber() - block.getNumArguments() / 2);
}
Value LookupControlDependency(Value data) {
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
Value control_dep;
if (auto result = mlir::dyn_cast<OpResult>(data)) {
control_dep = *std::prev(result.getOwner()->result_end());
} else {
auto arg = mlir::cast<BlockArgument>(data);
control_dep = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getControlTokenOf(arg);
}
assert(mlir::isa<ControlType>(control_dep.getType()) &&
"expected a control type");
return control_dep;
}
std::optional<Value> LookupDataValue(Value ctl) {
assert(mlir::isa<ControlType>(ctl.getType()) && "expected a control type");
Value data;
if (auto result = mlir::dyn_cast<OpResult>(ctl)) {
if (result.getOwner()->getNumResults() == 1) return {};
data = *result.getOwner()->result_begin();
} else {
auto arg = mlir::cast<BlockArgument>(ctl);
data = cast<ControlArgumentInterface>(arg.getOwner()->getParentOp())
.getDataValueOf(arg);
}
assert(!mlir::isa<ControlType>(data.getType()) && "expected a data type");
return data;
}
}
} | #include "tensorflow/core/ir/utility.h"
#include <optional>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(DialectUtilityTest, TestLookupControlDependency) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Copy, %ctl = Copy(%arg) : (tensor<i32>) -> (tensor<i32>)
return(%Copy) : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value copy = ret_op.getOperand(0);
Value ctl = LookupControlDependency(copy);
ASSERT_TRUE(ctl);
OpResult ctl_result = mlir::dyn_cast<OpResult>(ctl);
ASSERT_TRUE(ctl_result);
EXPECT_EQ(ctl_result.getResultNumber(), 1);
EXPECT_EQ(copy, ctl_result.getOwner()->getResult(0));
EXPECT_EQ(ctl_result.getOwner()->getName().getStringRef(), "tfg.Copy");
Value arg = ctl_result.getOwner()->getOperand(0);
Value arg_ctl = LookupControlDependency(arg);
ASSERT_TRUE(arg_ctl);
BlockArgument ctl_arg = mlir::dyn_cast<BlockArgument>(arg_ctl);
ASSERT_TRUE(ctl_arg);
EXPECT_EQ(ctl_arg.getArgNumber(), 1);
EXPECT_EQ(arg, ctl_arg.getOwner()->getArgument(0));
}
TEST(DialectUtilityTest, TestLookupDataValue) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Produce, %ctl = Produce [%arg.ctl] : () -> (tensor<i32>)
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> produce = LookupDataValue(ctl);
ASSERT_TRUE(produce);
OpResult produce_result = mlir::dyn_cast<OpResult>(*produce);
ASSERT_TRUE(produce_result);
ASSERT_EQ(produce_result.getResultNumber(), 0);
ASSERT_EQ(produce_result.getOwner()->getName().getStringRef(), "tfg.Produce");
ASSERT_EQ(produce_result.getOwner()->getResult(1), ctl);
Value arg_ctl = produce_result.getOwner()->getOperand(0);
std::optional<Value> arg = LookupDataValue(arg_ctl);
ASSERT_TRUE(arg);
BlockArgument arg_arg = mlir::dyn_cast<BlockArgument>(*arg);
ASSERT_TRUE(arg_arg);
ASSERT_EQ(arg_arg.getArgNumber(), 0);
ASSERT_EQ(arg_arg.getOwner()->getArgument(1), arg_ctl);
}
TEST(DialectUtilityTest, TestLookupDataValueNoData) {
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%ctl = NoOp [%arg.ctl] : () -> ()
return(%arg) [%ctl] : tensor<i32>
}
)mlir";
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto ret_op = cast<ReturnOp>(func.getBody().front().getTerminator());
Value ctl = ret_op.getOperand(1);
std::optional<Value> no_data = LookupDataValue(ctl);
ASSERT_FALSE(no_data);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/utility.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/utility_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad18d8db-ff85-4f70-86f2-d0a7b774dad7 | cpp | tensorflow/tensorflow | tf_op_wrapper | tensorflow/core/ir/tf_op_wrapper.cc | tensorflow/core/ir/tf_op_wrapper_test.cc | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
TFOp::TFOp(Operation *op) : op_(op) {
assert(!op || classof(op) && "expected a TFG op");
}
StringAttr TFOp::nameAttr() {
return op_->getAttrOfType<StringAttr>(getDialect()->getNameAttrIdentifier());
}
StringRef TFOp::name() { return nameAttr().getValue(); }
void TFOp::setName(const Twine &name) {
setName(StringAttr::get(op_->getContext(), name.str()));
}
void TFOp::setName(StringAttr name) {
op_->setAttr(getDialect()->getNameAttrIdentifier(), name);
}
StringAttr TFOp::requestedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getDeviceAttrIdentifier());
}
StringRef TFOp::requestedDevice() { return requestedDeviceAttr().getValue(); }
void TFOp::setRequestedDevice(const Twine &device) {
setRequestedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setRequestedDevice(StringAttr device) {
op_->setAttr(getDialect()->getDeviceAttrIdentifier(), device);
}
StringAttr TFOp::assignedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getAssignedDeviceAttrIdentifier());
}
StringRef TFOp::assignedDevice() { return assignedDeviceAttr().getValue(); }
void TFOp::setAssignedDevice(const Twine &device) {
setAssignedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setAssignedDevice(StringAttr device) {
op_->setAttr(getDialect()->getAssignedDeviceAttrIdentifier(), device);
}
StringAttr TFOp::tpuReplicate() {
return op_->getAttrOfType<StringAttr>("_tpu_replicate");
}
void TFOp::setTpuReplicate(StringAttr tpu_replicate) {
op_->setAttr("_tpu_replicate", tpu_replicate);
}
}
} | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFOpWrapper, LLVMRTTI) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<i32>) {
%A, %ctlA = A : () -> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *module_op = module.get();
EXPECT_FALSE(isa<TFOp>(module_op));
EXPECT_FALSE(dyn_cast<TFOp>(module_op));
module->walk([&](TFOp op) {
EXPECT_TRUE(isa<TFOp>(op.getOperation()));
EXPECT_TRUE(dyn_cast<TFOp>(op.getOperation()));
});
}
TEST(TFOpWrapper, ControlOperands) {
const char *const code = R"mlir(
tfg.func @test(%a: tensor<i32> {tfg.name = "a"},
%b: tensor<i32> {tfg.name = "b"}) -> (tensor<i32>) {
%A, %ctlA = A(%a, %b) [%a.ctl, %b.ctl] : (tensor<i32>, tensor<i32>)
-> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
TFOp a_op;
module->walk([&](TFOp op) {
if (op->getName().getStringRef() == "tfg.A") a_op = op;
});
ASSERT_TRUE(a_op);
EXPECT_EQ(a_op.controlRet().getDefiningOp(), a_op.getOperation());
OperandRange operands = a_op->getOperands();
OperandRange data = a_op.getNonControlOperands();
OperandRange ctls = a_op.getControlOperands();
EXPECT_EQ(operands.size(), 4u);
EXPECT_EQ(data.size(), 2u);
EXPECT_EQ(ctls.size(), 2u);
OperandRange::iterator ctl_it = llvm::find_if(operands, [](Value operand) {
return mlir::isa<ControlType>(operand.getType());
});
EXPECT_NE(ctl_it, operands.end());
EXPECT_EQ(data.end(), ctl_it);
EXPECT_EQ(*ctls.begin(), *ctl_it);
}
TEST(TFOpWrapper, AttributeGetterSetters) {
MLIRContext context;
auto *tfg_dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.A");
state.addTypes(tfg_dialect->getControlType());
TFOp op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
{
EXPECT_FALSE(op.nameAttr());
StringRef a_name = "a_name";
op.setName(a_name);
EXPECT_EQ(op.name(), a_name);
StringRef another_name = "another_name";
op.setName(StringAttr::get(&context, another_name));
EXPECT_EQ(op.name(), another_name);
}
{
StringRef a_device = "/some_device";
EXPECT_FALSE(op.requestedDeviceAttr());
op.setRequestedDevice(a_device);
EXPECT_EQ(op.requestedDevice(), a_device);
StringRef another_device = "/some_other_device";
op.setRequestedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.requestedDevice(), another_device);
}
{
StringRef a_device = "/some_assigned_device";
EXPECT_FALSE(op.assignedDeviceAttr());
op.setAssignedDevice(a_device);
EXPECT_EQ(op.assignedDevice(), a_device);
StringRef another_device = "/some_other_assigned_device";
op.setAssignedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.assignedDevice(), another_device);
}
{
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
EXPECT_EQ(op.deviceAttr(), op.requestedDeviceAttr());
StringRef device = "/an_assigned_device";
op.setAssignedDevice(device);
EXPECT_EQ(op.deviceAttr(), op.assignedDeviceAttr());
EXPECT_EQ(op.device(), device);
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
op->removeAttr(tfg_dialect->getDeviceAttrIdentifier());
EXPECT_EQ(op.device(), "");
}
{
auto tpu_replicate = StringAttr::get(op->getContext(), "a_tpu");
op.setTpuReplicate(tpu_replicate);
EXPECT_EQ(op.tpuReplicate(), tpu_replicate);
}
}
TEST(TFOpWrapper, ValueControlRet) {
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Const, %ctl = Const {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Add, %ctl_2 = Add(%Const, %arg) [%ctl] {T = i32} : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iterator = func.getBody().begin()->begin();
TFOp const_op = &(*iterator++);
TFOp add_op = &(*iterator);
OperandControlRetRange ret_range(add_op->getOperands());
EXPECT_EQ(ret_range[0], const_op.controlRet());
EXPECT_EQ(ret_range[1], func.getBody().begin()->getArguments()[1]);
EXPECT_EQ(ret_range[2], const_op.controlRet());
for (Value v : ret_range) EXPECT_TRUE(mlir::isa<ControlType>(v.getType()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd288dc6-fb3a-406a-966e-03145ea710ac | cpp | tensorflow/tensorflow | interfaces | tensorflow/core/ir/interfaces.cc | tensorflow/core/ir/interfaces_test.cc | #include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace tfg {
LogicalResult ControlArgumentInterface::verifyRegion(Operation *op,
Region ®ion) {
unsigned num_ctl = 0, num_data = 0;
for (BlockArgument arg : region.getArguments()) {
bool is_ctl = mlir::isa<tf_type::ControlType>(arg.getType());
num_ctl += is_ctl;
num_data += !is_ctl;
}
if (num_ctl != num_data) {
return op->emitOpError("region #")
<< region.getRegionNumber()
<< " expected same number of data values and control tokens ("
<< num_data << " vs. " << num_ctl << ")";
}
return success();
}
void StatefulMemoryEffectInterface::getEffects(
Operation *op,
SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
&effects) const {
auto registry = dyn_cast<TensorFlowRegistryInterface>(op);
if (!registry || registry.isStateful() || op->getParentOfType<GraphOp>()) {
effects.emplace_back(MemoryEffects::Write::get());
}
}
}
}
#include "tensorflow/core/ir/interfaces.cc.inc" | #include "tensorflow/core/ir/interfaces.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/DialectInterface.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/Verifier.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TensorFlowRegistryInterface, TestDefaultImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.Foo");
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
ASSERT_TRUE(succeeded(verify(op)));
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
EXPECT_FALSE(iface);
}
TEST(TensorFlowRegisterInterface, TestCustomImplementation) {
MLIRContext context(MLIRContext::Threading::DISABLED);
DialectRegistry registry;
registry.insert<TFGraphDialect>();
struct CustomRegistryInterface : public TensorFlowRegistryInterfaceBase {
using TensorFlowRegistryInterfaceBase::TensorFlowRegistryInterfaceBase;
bool isStateful(Operation *op) const override {
return op->getName().stripDialect() == "Foo";
}
};
registry.addExtension(+[](mlir::MLIRContext *ctx, TFGraphDialect *dialect) {
dialect->addInterfaces<CustomRegistryInterface>();
});
context.appendDialectRegistry(registry);
auto *dialect = context.getOrLoadDialect<TFGraphDialect>();
SmallVector<StringRef, 2> op_names = {"tfg.Foo", "tfg.Bar"};
SmallVector<bool, 2> expected = {true, false};
for (auto it : llvm::zip(op_names, expected)) {
OperationState state(UnknownLoc::get(&context), std::get<0>(it));
state.addTypes(dialect->getControlType());
Operation *op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
auto iface = dyn_cast<TensorFlowRegistryInterface>(op);
ASSERT_TRUE(iface);
EXPECT_EQ(iface.isStateful(), std::get<1>(it));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/interfaces.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/interfaces_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8515d29c-d1da-488d-9277-faa53268774a | cpp | tensorflow/tensorflow | dialect | tensorflow/core/ir/types/dialect.cc | tensorflow/core/ir/types/dialect_test.cc | #include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/strings/escaping.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#define GET_ATTRDEF_CLASSES
#include "tensorflow/core/ir/types/attributes.cc.inc"
#include "tensorflow/core/ir/types/attributes_enum.cc.inc"
#define GET_TYPEDEF_CLASSES
#include "tensorflow/core/ir/types/types.cc.inc"
#include "tensorflow/core/ir/types/dialect.cpp.inc"
namespace mlir {
namespace tf_type {
void TFTypeDialect::initialize() {
addAttributes<
#define GET_ATTRDEF_LIST
#include "tensorflow/core/ir/types/attributes.cc.inc"
>();
addTypes<ControlType, OpaqueTensorType,
#define HANDLE_TF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>();
}
namespace {
template <typename TypeWithSubtype>
Type ParseTypeWithSubtype(MLIRContext* context, DialectAsmParser& parser) {
if (failed(parser.parseOptionalLess())) return TypeWithSubtype::get(context);
SmallVector<TensorType, 1> subtypes;
do {
TensorType tensor_ty;
if (parser.parseType(tensor_ty)) return Type();
if (!IsValidTFTensorType(tensor_ty)) {
parser.emitError(parser.getNameLoc()) << "invalid subtype: " << tensor_ty;
return Type();
}
subtypes.push_back(tensor_ty);
} while (succeeded(parser.parseOptionalComma()));
if (parser.parseGreater()) return Type();
return TypeWithSubtype::get(subtypes, context);
}
template <typename TypeWithSubtype>
void PrintTypeWithSubtype(StringRef type, TypeWithSubtype ty,
DialectAsmPrinter& os) {
os << type;
ArrayRef<TensorType> subtypes = ty.getSubtypes();
if (subtypes.empty()) return;
os << "<";
interleaveComma(subtypes, os);
os << ">";
}
Type ParseResourceType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<ResourceType>(context, parser);
}
void PrintResourceType(ResourceType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("resource", ty, os);
}
Type ParseVariantType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<VariantType>(context, parser);
}
void PrintVariantType(VariantType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("variant", ty, os);
}
}
Type TFTypeDialect::parseType(DialectAsmParser& parser) const {
StringRef type_tag;
llvm::SMLoc loc = parser.getNameLoc();
Type genType;
auto parse_result = generatedTypeParser(parser, &type_tag, genType);
if (parse_result.has_value()) return genType;
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type_tag == name) return tftype##Type::get(getContext());
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
if (type_tag.starts_with("resource")) {
Type ret = ParseResourceType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid resource type");
return ret;
}
if (type_tag.starts_with("variant")) {
Type ret = ParseVariantType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid variant type");
return ret;
}
parser.emitError(parser.getNameLoc(),
"unknown type in TF graph dialect: " + type_tag);
return {};
}
void TFTypeDialect::printType(Type type, DialectAsmPrinter& printer) const {
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
printer << name; \
return; \
}
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
Print##tftype##Type(derived_ty, printer); \
return; \
}
#include "tensorflow/core/ir/types/types.def"
if (failed(generatedTypePrinter(type, printer)))
llvm::report_fatal_error("unexpected tensorflow graph type kind");
}
Attribute VersionAttr::parse(AsmParser& parser, Type) {
if (failed(parser.parseLess())) return {};
int32_t producer, min_consumer;
if (parser.parseKeyword("producer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(producer) ||
parser.parseComma() ||
parser.parseKeyword("min_consumer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(min_consumer))
return {};
SmallVector<int32_t, 4> bad_consumers;
if (!parser.parseOptionalComma()) {
if (parser.parseKeyword("bad_consumers", " in tf_type version") ||
parser.parseEqual() || parser.parseLSquare())
return {};
do {
int32_t bad_consumer;
if (parser.parseInteger(bad_consumer)) return {};
bad_consumers.push_back(bad_consumer);
} while (!parser.parseOptionalComma());
if (parser.parseRSquare()) return {};
}
if (failed(parser.parseGreater())) return {};
return VersionAttr::get(parser.getContext(), producer, min_consumer,
bad_consumers);
}
void VersionAttr::print(AsmPrinter& printer) const {
llvm::raw_ostream& os = printer.getStream();
os << "<producer = " << getProducer()
<< ", min_consumer = " << getMinConsumer();
ArrayRef<int32_t> badConsumers = getBadConsumers();
if (!badConsumers.empty()) {
os << ", bad_consumers = [";
llvm::interleaveComma(badConsumers, os);
os << "]";
}
os << ">";
}
FailureOr<FullTypeAttr> RawFullTypeAttrParser(AsmParser& parser) {
SmallVector<FullTypeAttr> args;
llvm::StringRef type_id_str;
if (failed(parser.parseKeyword(&type_id_str))) {
parser.emitError(
parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter keyword for "
"'type_id'");
return failure();
}
std::optional<FullTypeId> type_id = symbolizeFullTypeId(type_id_str);
if (!type_id) {
parser.emitError(parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter "
"'type_id'");
return failure();
}
if (parser.parseCommaSeparatedList(AsmParser::Delimiter::OptionalLessGreater,
[&]() {
FailureOr<tf_type::FullTypeAttr> arg =
RawFullTypeAttrParser(parser);
if (failed(arg)) return failure();
args.push_back(*arg);
return success();
}))
return failure();
Attribute attr;
parser.parseOptionalAttribute(attr);
return FullTypeAttr::get(
parser.getContext(),
mlir::IntegerAttr::get(mlir::IntegerType::get(parser.getContext(), 32),
static_cast<int32_t>(*type_id)),
args, attr);
}
Attribute FullTypeAttr::parse(AsmParser& parser, Type odsType) {
if (failed(parser.parseLess())) return {};
FailureOr<tf_type::FullTypeAttr> ret = RawFullTypeAttrParser(parser);
if (succeeded(ret) && failed(parser.parseGreater())) return {};
return ret.value_or(FullTypeAttr());
}
static void RawFullTypeAttrPrint(FullTypeAttr tfattr, AsmPrinter& printer) {
printer << stringifyFullTypeId(
tf_type::FullTypeId(tfattr.getTypeId().getInt()));
if (!tfattr.getArgs().empty()) {
printer << "<";
llvm::interleaveComma(tfattr.getArgs(), printer, [&](Attribute arg) {
if (auto t = mlir::dyn_cast<FullTypeAttr>(arg))
RawFullTypeAttrPrint(t, printer);
else
printer << "<<INVALID ARG>>";
});
printer << ">";
}
if (tfattr.getAttr()) {
printer << ' ';
printer.printStrippedAttrOrType(tfattr.getAttr());
}
}
void FullTypeAttr::print(AsmPrinter& printer) const {
printer << "<";
RawFullTypeAttrPrint(*this, printer);
printer << ">";
}
void FuncAttr::print(AsmPrinter& os) const {
if (getName().getRootReference().getValue().empty())
os << "<\"\", " << getAttrs() << ">";
else
os << "<" << getName() << ", " << getAttrs() << ">";
}
Attribute FuncAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
llvm::SMLoc loc = parser.getCurrentLocation();
Attribute name, dict;
if (failed(parser.parseAttribute(name))) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (auto func_name_str = mlir::dyn_cast<StringAttr>(name)) {
if (!func_name_str.getValue().empty()) {
parser.emitError(loc)
<< "expected empty string or symbol while parsing tf.func "
"attribute";
return {};
}
name = SymbolRefAttr::get(parser.getContext(), "");
}
if (!mlir::isa<SymbolRefAttr>(name)) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (failed(parser.parseComma())) return {};
loc = parser.getCurrentLocation();
if (failed(parser.parseAttribute(dict)) || !mlir::isa<DictionaryAttr>(dict)) {
parser.emitError(loc)
<< "expected Dictionary attribute while parsing tf.func attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return FuncAttr::get(parser.getContext(), mlir::cast<SymbolRefAttr>(name),
mlir::cast<DictionaryAttr>(dict));
}
void PlaceholderAttr::print(AsmPrinter& os) const {
os << "<" << StringAttr::get(getContext(), getValue()) << ">";
}
Attribute PlaceholderAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
std::string content;
if (failed(parser.parseOptionalString(&content))) {
parser.emitError(parser.getCurrentLocation())
<< "expected string while parsing tf.placeholder attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return PlaceholderAttr::get(parser.getContext(), content);
}
void ShapeAttr::print(AsmPrinter& os) const {
os << "<";
if (hasRank()) {
auto print_dim = [&](int64_t dim) {
if (dim != ShapedType::kDynamic)
os << dim;
else
os << "?";
};
llvm::interleave(getShape(), os, print_dim, "x");
} else {
os << "*";
}
os << ">";
}
Attribute ShapeAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
if (succeeded(parser.parseOptionalStar())) {
if (failed(parser.parseGreater())) {
parser.emitError(parser.getCurrentLocation())
<< "expected `>` after `*` when parsing a tf.shape "
"attribute";
return {};
}
return ShapeAttr::get(parser.getContext(), std::nullopt);
}
SmallVector<int64_t> shape;
if (failed(parser.parseOptionalGreater())) {
auto parse_element = [&]() {
shape.emplace_back();
llvm::SMLoc loc = parser.getCurrentLocation();
if (succeeded(parser.parseOptionalQuestion())) {
shape.back() = ShapedType::kDynamic;
} else if (failed(parser.parseDecimalInteger(shape.back()))) {
parser.emitError(loc)
<< "expected an integer or `?` when parsing a tf.shape attribute";
return failure();
}
return success();
};
if (failed(parse_element())) return {};
while (failed(parser.parseOptionalGreater())) {
if (failed(parser.parseXInDimensionList()) || failed(parse_element()))
return {};
}
}
return ShapeAttr::get(parser.getContext(), llvm::ArrayRef(shape));
}
ShapeAttr ShapeAttr::get(MLIRContext* context,
std::optional<ArrayRef<int64_t>> shape) {
if (shape) return Base::get(context, *shape, false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
ShapeAttr ShapeAttr::get(MLIRContext* context, ShapedType shaped_type) {
if (shaped_type.hasRank())
return Base::get(context, shaped_type.getShape(), false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
std::optional<ArrayRef<int64_t>> ShapeAttr::getValue() const {
if (hasRank()) return getShape();
return std::nullopt;
}
bool ShapeAttr::hasRank() const { return !getImpl()->unranked; }
int64_t ShapeAttr::getRank() const {
assert(hasRank());
return getImpl()->shape.size();
}
bool ShapeAttr::hasStaticShape() const {
if (!hasRank()) return false;
for (auto dim : getShape()) {
if (dim < 0) return false;
}
return true;
}
namespace {
std::optional<ArrayRef<int64_t>> GetShape(Value value) {
auto shaped_type = mlir::cast<ShapedType>(value.getType());
if (shaped_type.hasRank()) return shaped_type.getShape();
return std::nullopt;
}
bool GetCastCompatibleShape(ArrayRef<int64_t> a_shape,
ArrayRef<int64_t> b_shape,
SmallVectorImpl<int64_t>* refined_shape) {
if (a_shape.size() != b_shape.size()) return false;
int64_t rank = a_shape.size();
refined_shape->reserve(rank);
for (auto dims : llvm::zip(a_shape, b_shape)) {
int64_t dim1 = std::get<0>(dims);
int64_t dim2 = std::get<1>(dims);
if (ShapedType::isDynamic(dim1)) {
refined_shape->push_back(dim2);
continue;
}
if (ShapedType::isDynamic(dim2)) {
refined_shape->push_back(dim1);
continue;
}
if (dim1 == dim2) {
refined_shape->push_back(dim1);
continue;
}
return false;
}
return true;
}
}
OperandShapeIterator::OperandShapeIterator(Operation::operand_iterator it)
: llvm::mapped_iterator<Operation::operand_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
ResultShapeIterator::ResultShapeIterator(Operation::result_iterator it)
: llvm::mapped_iterator<Operation::result_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
bool TensorFlowType::classof(Type type) {
return llvm::isa<TFTypeDialect>(type.getDialect());
}
bool TensorFlowRefType::classof(Type type) {
return mlir::isa<
#define HANDLE_TF_TYPE(tftype, enumerant, name)
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>(type);
}
TensorFlowType TensorFlowRefType::get(Type type) {
MLIRContext* ctx = type.getContext();
type = getElementTypeOrSelf(type);
if (type.isF16()) {
return HalfRefType::get(ctx);
} else if (type.isF32()) {
return FloatRefType::get(ctx);
} else if (type.isF64()) {
return DoubleRefType::get(ctx);
} else if (type.isBF16()) {
return Bfloat16RefType::get(ctx);
} else if (type.isFloat8E4M3FN()) {
return Float8E4M3FNRefType::get(ctx);
} else if (type.isFloat8E5M2()) {
return Float8E5M2RefType::get(ctx);
} else if (auto complex_type = mlir::dyn_cast<ComplexType>(type)) {
Type etype = complex_type.getElementType();
if (etype.isF32()) {
return Complex64RefType::get(ctx);
} else if (etype.isF64()) {
return Complex128RefType::get(ctx);
}
llvm_unreachable("unexpected complex type");
} else if (auto itype = mlir::dyn_cast<IntegerType>(type)) {
switch (itype.getWidth()) {
case 1:
return BoolRefType::get(ctx);
case 4:
return itype.isUnsigned() ? TensorFlowType(Uint4RefType::get(ctx))
: Int4RefType::get(ctx);
case 8:
return itype.isUnsigned() ? TensorFlowType(Uint8RefType::get(ctx))
: Int8RefType::get(ctx);
case 16:
return itype.isUnsigned() ? TensorFlowType(Uint16RefType::get(ctx))
: Int16RefType::get(ctx);
case 32:
return itype.isUnsigned() ? TensorFlowType(Uint32RefType::get(ctx))
: Int32RefType::get(ctx);
case 64:
return itype.isUnsigned() ? TensorFlowType(Uint64RefType::get(ctx))
: Int64RefType::get(ctx);
default:
llvm_unreachable("unexpected integer type");
}
}
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) \
return tftype##RefType::get(ctx);
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
llvm_unreachable("unexpected type kind");
}
Type TensorFlowRefType::RemoveRef() {
MLIRContext* ctx = getContext();
if (mlir::isa<HalfRefType>(*this)) return FloatType::getF16(ctx);
if (mlir::isa<FloatRefType>(*this)) return FloatType::getF32(ctx);
if (mlir::isa<DoubleRefType>(*this)) return FloatType::getF64(ctx);
if (mlir::isa<Bfloat16RefType>(*this)) return FloatType::getBF16(ctx);
if (mlir::isa<Float8E4M3FNType>(*this))
return FloatType::getFloat8E4M3FN(ctx);
if (mlir::isa<Float8E5M2Type>(*this)) return FloatType::getFloat8E5M2(ctx);
if (mlir::isa<BoolRefType>(*this)) return IntegerType::get(ctx, 1);
if (mlir::isa<Int4RefType>(*this))
return IntegerType::get(ctx, 4, IntegerType::Signed);
if (mlir::isa<Int8RefType>(*this)) return IntegerType::get(ctx, 8);
if (mlir::isa<Int16RefType>(*this)) return IntegerType::get(ctx, 16);
if (mlir::isa<Int32RefType>(*this)) return IntegerType::get(ctx, 32);
if (mlir::isa<Int64RefType>(*this)) return IntegerType::get(ctx, 64);
if (mlir::isa<Uint4RefType>(*this))
return IntegerType::get(ctx, 4, IntegerType::Unsigned);
if (mlir::isa<Uint8RefType>(*this))
return IntegerType::get(ctx, 8, IntegerType::Unsigned);
if (mlir::isa<Uint16RefType>(*this))
return IntegerType::get(ctx, 16, IntegerType::Unsigned);
if (mlir::isa<Uint32RefType>(*this))
return IntegerType::get(ctx, 32, IntegerType::Unsigned);
if (mlir::isa<Uint64RefType>(*this))
return IntegerType::get(ctx, 64, IntegerType::Unsigned);
if (mlir::isa<Complex64RefType>(*this))
return ComplexType::get(FloatType::getF32(ctx));
if (mlir::isa<Complex128RefType>(*this))
return ComplexType::get(FloatType::getF64(ctx));
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (isa<tftype##RefType>()) return tftype##Type::get(ctx);
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
llvm_unreachable("unexpected tensorflow ref type kind");
}
bool TensorFlowTypeWithSubtype::classof(Type type) {
return mlir::isa<ResourceType, VariantType>(type);
}
Type TensorFlowTypeWithSubtype::RemoveSubtypes() {
MLIRContext* ctx = getContext();
if (mlir::isa<VariantType>(*this)) return VariantType::get(ctx);
if (mlir::isa<ResourceType>(*this)) return ResourceType::get(ctx);
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
TensorFlowTypeWithSubtype TensorFlowTypeWithSubtype::clone(
ArrayRef<TensorType> new_subtypes) {
MLIRContext* ctx = getContext();
if (mlir::isa<VariantType>(*this))
return mlir::cast<TensorFlowTypeWithSubtype>(
VariantType::get(new_subtypes, ctx));
if (mlir::isa<ResourceType>(*this))
return mlir::cast<TensorFlowTypeWithSubtype>(
ResourceType::get(new_subtypes, ctx));
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
ArrayRef<TensorType> TensorFlowTypeWithSubtype::GetSubtypes() {
if (auto variant_type = mlir::dyn_cast<VariantType>(*this))
return variant_type.getSubtypes();
if (auto resource_type = mlir::dyn_cast<ResourceType>(*this))
return resource_type.getSubtypes();
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
bool BroadcastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto types : llvm::zip(lhs, rhs)) {
auto lhs_type = DropRefType(std::get<0>(types));
auto rhs_type = DropRefType(std::get<1>(types));
auto lhs_tt = mlir::dyn_cast<TensorType>(lhs_type);
auto rhs_tt = mlir::dyn_cast<TensorType>(rhs_type);
if (!lhs_tt || !rhs_tt) {
if (lhs_type != rhs_type) return false;
continue;
}
auto lhs_et = lhs_tt.getElementType();
auto rhs_et = rhs_tt.getElementType();
if (lhs_et != rhs_et) {
auto lhs_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(lhs_et);
auto rhs_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(rhs_et);
if (!lhs_wst || !rhs_wst) return false;
auto lhs_wst_st = lhs_wst.GetSubtypes();
auto rhs_wst_st = rhs_wst.GetSubtypes();
if (!lhs_wst_st.empty() && !rhs_wst_st.empty()) {
for (auto subtypes : llvm::zip(lhs_wst_st, rhs_wst_st)) {
if (!BroadcastCompatible(std::get<0>(subtypes),
std::get<1>(subtypes)))
return false;
}
}
}
auto lhs_rt = mlir::dyn_cast<RankedTensorType>(lhs_type);
auto rhs_rt = mlir::dyn_cast<RankedTensorType>(rhs_type);
if (!lhs_rt || !rhs_rt) return true;
SmallVector<int64_t, 4> shape;
return OpTrait::util::getBroadcastedShape(lhs_rt.getShape(),
rhs_rt.getShape(), shape);
}
return true;
}
Type GetCastCompatibleType(Type a, Type b, bool may_ignore_ref_type_a) {
if (a == b) return b;
auto a_tt = mlir::dyn_cast<TensorType>(a);
auto b_tt = mlir::dyn_cast<TensorType>(b);
if (static_cast<bool>(a_tt) ^ static_cast<bool>(b_tt)) return nullptr;
if (!a_tt && !b_tt) {
if (may_ignore_ref_type_a) {
if (auto ref_type = mlir::dyn_cast<TensorFlowRefType>(a)) {
a = ref_type.RemoveRef();
if (a == b) return a;
}
}
if (a.getTypeID() != b.getTypeID()) return nullptr;
auto a_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(a);
auto b_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(b);
if (!a_wst || !b_wst) return nullptr;
if (mlir::isa<VariantType>(a)) return a;
if (mlir::isa<VariantType>(b)) return b;
auto a_wst_st = a_wst.GetSubtypes();
auto b_wst_st = b_wst.GetSubtypes();
if (a_wst_st.empty()) return b;
if (b_wst_st.empty()) return a;
if (a_wst_st.size() != b_wst_st.size()) return nullptr;
SmallVector<TensorType, 4> refined_subtypes;
for (auto subtypes : llvm::zip(a_wst_st, b_wst_st)) {
Type refined_st =
GetCastCompatibleType(std::get<0>(subtypes), std::get<1>(subtypes),
false);
if (!refined_st) return nullptr;
refined_subtypes.push_back(mlir::cast<TensorType>(refined_st));
}
return ResourceType::get(refined_subtypes, a.getContext());
}
Type refined_element_ty = GetCastCompatibleType(
a_tt.getElementType(), b_tt.getElementType(), may_ignore_ref_type_a);
if (!refined_element_ty) return nullptr;
if (!a_tt.hasRank() && !b_tt.hasRank()) {
return UnrankedTensorType::get(refined_element_ty);
}
if (!a_tt.hasRank()) {
return RankedTensorType::get(b_tt.getShape(), refined_element_ty);
}
if (!b_tt.hasRank()) {
return RankedTensorType::get(a_tt.getShape(), refined_element_ty);
}
SmallVector<int64_t, 4> refined_shape;
if (!GetCastCompatibleShape(a_tt.getShape(), b_tt.getShape(), &refined_shape))
return nullptr;
return RankedTensorType::get(refined_shape, refined_element_ty);
}
bool HasCompatibleElementTypes(Type lhs, Type rhs,
bool may_ignore_ref_type_lhs) {
return GetCastCompatibleType(lhs, rhs, may_ignore_ref_type_lhs) != nullptr;
}
bool AreCastCompatible(TypeRange types) {
Type common = types.front();
for (auto type : types.drop_front()) {
Type refined_type =
GetCastCompatibleType(common, type, false);
if (!refined_type) return false;
common = refined_type;
}
return true;
}
bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto pair : llvm::zip(lhs, rhs)) {
auto lhs_i = std::get<0>(pair);
auto rhs_i = std::get<1>(pair);
if (!AreCastCompatible({lhs_i, rhs_i})) return false;
}
return true;
}
static Type GetDefaultTypeOf(TensorFlowRefType type) {
return type.RemoveRef();
}
template <typename ComposedType>
Type DropTypeHelper(Type ty) {
Type element_ty = getElementTypeOrSelf(ty);
auto composed_type = mlir::dyn_cast<ComposedType>(element_ty);
if (!composed_type) return ty;
Type default_ty = GetDefaultTypeOf(composed_type);
if (auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty)) {
return RankedTensorType::get(ranked_ty.getShape(), default_ty);
} else if (mlir::dyn_cast<UnrankedTensorType>(ty)) {
return UnrankedTensorType::get(default_ty);
} else {
return default_ty;
}
}
Type DropSubTypes(Type ty) {
return DropTypeHelper<TensorFlowTypeWithSubtype>(ty);
}
Type DropRefType(Type ty) { return DropTypeHelper<TensorFlowRefType>(ty); }
Type DropRefAndSubTypes(Type ty) { return DropRefType(DropSubTypes(ty)); }
Attribute TensorProtoAttr::parse(AsmParser& parser, Type type) {
if (parser.parseColon()) {
return nullptr;
}
std::string data;
if (parser.parseString(&data)) {
return nullptr;
}
if (data.size() < 2 || data.substr(0, 2) != "0x") {
parser.emitError(parser.getNameLoc(), "Hex string doesn't start with `0x`");
return nullptr;
}
auto shapedType = mlir::dyn_cast<ShapedType>(type);
if (!shapedType) return nullptr;
std::string bytes_data = absl::HexStringToBytes(data.substr(2));
return TensorProtoAttr::get(shapedType, bytes_data);
}
void TensorProtoAttr::print(mlir::AsmPrinter& printer) const {
StringRef bytes_str = getValue();
printer << " : \"0x" << llvm::toHex(bytes_str) << "\"";
}
}
} | #include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFTypesDialect, TestFuncAttrSubElement) {
const char *const code = R"mlir(
"test.op"() {func = #tf_type.func<@foo, {bar = @foo}>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
Builder b(&context);
StringAttr baz = b.getStringAttr("baz");
ASSERT_TRUE(succeeded(SymbolTable::replaceAllSymbolUses(
b.getStringAttr("foo"), baz, test_op.getParentRegion())));
auto func_attr = mlir::dyn_cast<tf_type::FuncAttr>(test_op.getAttr("func"));
ASSERT_TRUE(func_attr);
auto sym_ref = FlatSymbolRefAttr::get(baz);
EXPECT_TRUE(func_attr.getName() == sym_ref);
auto bar_ref = func_attr.getAttrs().get("bar");
EXPECT_TRUE(bar_ref == sym_ref);
}
TEST(TFTypesDialect, ParsesDimensionListWithZero) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x128>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, 128));
}
TEST(TFTypesDialect, ParsesDimensionListWithQuestionMark) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x?x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(),
testing::ElementsAre(0, std::numeric_limits<int64_t>::min(), 2));
}
TEST(TFTypesDialect, ParsesDimensionListWithNegativeOne) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x-1x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, -1, 2));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/types/dialect.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/types/dialect_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f24d4774-ec82-49af-b0a3-e694b8beb8e4 | cpp | tensorflow/tensorflow | shape_inference_utils | tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.cc | tensorflow/core/ir/utils/shape_inference_utils_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.h"
#include <optional>
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#define DEBUG_TYPE "tf-shape-inference-utils"
namespace mlir {
namespace TF {
LogicalResult InferReturnTypeComponentsForTFOp(
std::optional<Location> location, Operation* op, int64_t graph_version,
tfg::OperandAsConstantFn operand_as_constant_fn,
tfg::OpResultAsShapeFn op_result_as_shape_fn,
tfg::ResultElementTypeFn result_element_type_fn,
SmallVectorImpl<ShapedTypeComponents>& inferred_return_shapes) {
assert(op->getName().getDialectNamespace() ==
TensorFlowDialect::getDialectNamespace());
return tfg::InferReturnTypeComponentsForTFOp(
location, op, op->getOperands(), graph_version, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
tensorflow::GetAttrValuesFromOperation, inferred_return_shapes);
}
}
} | #include "tensorflow/core/ir/utils/shape_inference_utils.h"
#include <vector>
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/InferTypeOpInterface.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
namespace mlir {
namespace tfg {
namespace {
const char *const code = R"mlir(
tfg.func @test(%arg : tensor<32x?x256x4xi32> {tfg.name = "arg"}, %arg_1 : tensor<*xi32> {tfg.name = "arg1", tf._output_shapes = [5 : i32]}) -> (tensor<2x2xf32>) {
%Placeholder, %ctl = Placeholder name("placeholder") {dtype = f32, shape = #tf_type.shape<>} : () -> (tensor<f32>)
%Const, %ctl_0 = Const name("c0") {dtype = f32, value = dense<1.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%Const_1, %ctl_2 = Const name("c1") {dtype = f32, value = dense<2.000000e+00> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%IdentityN:3, %ctl_3 = IdentityN(%Const, %Placeholder, %Const_1) name("id_n") {T = [f32, f32, f32]} : (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>) -> (tensor<2x2xf32>, tensor<f32>, tensor<2x2xf32>)
%Identity, %ctl_6 = Identity(%IdentityN#1) name("id1") {T = f32} : (tensor<f32>) -> (tensor<f32>)
%Add, %ctl_7 = Add(%Const, %IdentityN#1) name("add") {T = f32} : (tensor<2x2xf32>, tensor<f32>) -> (tensor<2x2xf32>)
%Const_1000, %ctl_9 = Const name("c1000") {dtype = i32, value = dense<1000> : tensor<i32>} : () -> (tensor<i32>)
%Const_2, %ctl_10 = Const name("c2") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_3, %ctl_11 = Const name("c3") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>)
%Range, %ctl_range = Range(%Const_2, %Const_1000, %Const_3) name("range") {Tidx = i32} : (tensor<i32>, tensor<i32>, tensor<i32>) -> tensor<1000xi32>
%Const_4, %ctl_12 = Const name("c4") {dtype = i32, value = dense<[32, -1, 4]> : tensor<3xi32>} : () -> (tensor<3xi32>)
%Reshape, %ctl_13 = Reshape(%arg, %Const_4) name("reshape") {T = i32} : (tensor<32x?x256x4xi32>, tensor<3xi32>) -> tensor<32x?x4xi32>
%Const_5, %ctl_14 = Const name("TensorListReserve/num_elements") {dtype = i32, value = dense<3> : tensor<i32>} : () -> (tensor<i32>)
%Const_6, %ctl_15 = Const name("TensorListReserve/element_shape") {dtype = i32, value = dense<2> : tensor<2xi32>} : () -> (tensor<2xi32>)
%TensorListReserve, %ctl_16 = TensorListReserve(%Const_6, %Const_5) name("TensorListReserve") {element_dtype = f32, shape_type = i32} : (tensor<2xi32>, tensor<i32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Const_7, %ctl_17 = Const name("index") {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Const_8, %ctl_18 = Const name("item") {dtype = f32, value = dense<[[1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00]]> : tensor<2x2xf32>} : () -> (tensor<2x2xf32>)
%TensorListSetItem, %ctl_19 = TensorListSetItem(%TensorListReserve, %Const_7, %Const_8) name("TensorListSetItem") {element_dtype = f32} : (tensor<!tf_type.variant<tensor<2x2xf32>>>, tensor<i32>, tensor<2x2xf32>) -> (tensor<!tf_type.variant<tensor<2x2xf32>>>)
%Identity_1, %ctl_20 = Identity(%arg_1) name("id2") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
return (%Const_1) : tensor<2x2xf32>
}
)mlir";
}
class ShapeInferenceTest : public ::testing::Test {
protected:
using OpShapeInfo = SmallVector<ShapedTypeComponents>;
ShapeInferenceTest() {
context_.getOrLoadDialect<tfg::TFGraphDialect>();
module_ = mlir::parseSourceString<mlir::ModuleOp>(code, &context_);
assert(module_);
}
template <typename OpRange>
void VerifyInferredShapes(OpRange &&ops,
SmallVector<OpShapeInfo> &inferred_result,
bool check_type) {
for (auto it : llvm::zip(ops, inferred_result)) {
Operation &op = std::get<0>(it);
OpShapeInfo &info = std::get<1>(it);
EXPECT_EQ(op.getNumResults() - 1, info.size());
for (int i = 0; i < op.getNumResults() - 1; ++i) {
ShapedType shape = mlir::cast<ShapedType>(op.getResultTypes()[i]);
EXPECT_EQ(shape.hasRank(), info[i].hasRank());
if (shape.hasRank()) EXPECT_EQ(shape.getShape(), info[i].getDims());
if (check_type)
EXPECT_EQ(shape.getElementType(), info[i].getElementType());
}
}
}
ModuleOp GetModule() { return module_.get(); }
MLIRContext *GetContext() { return &context_; }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(ShapeInferenceTest, TestShapeAndTypeInference) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return operand.getDefiningOp()->getAttr("value");
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
auto rt = mlir::dyn_cast<RankedTensorType>(op_result.getType());
if (!rt || rt.getRank() != 1 || !rt.hasStaticShape()) return {};
std::vector<DimensionHandle> dims(rt.getDimSize(0), ic.UnknownDim());
auto attr =
op_result.getDefiningOp()->getAttrOfType<DenseElementsAttr>("value");
for (auto element : llvm::enumerate(attr.getValues<APInt>()))
dims[element.index()] = ic.MakeDim(element.value().getSExtValue());
return ic.MakeShape(dims);
};
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
auto exclude_reshape_operand_as_constant_fn =
[&](Value operand) -> Attribute {
Operation *defining_op = operand.getDefiningOp();
if (!defining_op || defining_op->getName().getStringRef() == "tfg.Reshape")
return BoolAttr::get(GetContext(), false);
return operand.getDefiningOp()->getAttr("value");
};
all_results.clear();
for (Operation &op : block.without_terminator()) {
auto result_element_type_fn = [&](int idx) -> Type {
return mlir::cast<ShapedType>(op.getResult(idx).getType())
.getElementType();
};
SmallVector<ShapedTypeComponents> results;
EXPECT_TRUE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010,
exclude_reshape_operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
nullptr, results)
.succeeded());
all_results.push_back(results);
}
VerifyInferredShapes(func.getBody().begin()->without_terminator(),
all_results,
true);
}
TEST_F(ShapeInferenceTest, TestInferenceFailure) {
auto operand_as_constant_fn = [](Value operand) -> Attribute {
return nullptr;
};
auto op_result_as_shape_fn = [](InferenceContext &ic,
OpResult op_result) -> ShapeHandle {
return {};
};
auto result_element_type_fn = [](int idx) -> Type { return nullptr; };
GraphFuncOp func = GetModule().lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
Block &block = *func.getBody().begin();
SmallVector<SmallVector<ShapedTypeComponents>> all_results;
auto get_empty_attr_values_fn =
[](Operation *, llvm::StringRef, const tensorflow::OpRegistrationData *,
bool, tensorflow::AttrValueMap *) { return absl::OkStatus(); };
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
auto result = InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn, op_result_as_shape_fn,
result_element_type_fn, get_empty_attr_values_fn, results);
if (op.getName().getStringRef() == "tfg.Const" ||
op.getName().getStringRef() == "tfg.IdentityN" ||
op.getName().getStringRef() == "tfg.PlaceHolder" ||
op.getName().getStringRef() == "tfg.Range")
EXPECT_TRUE(failed(result));
}
auto error_attr_values_fn = [](Operation *, llvm::StringRef,
const tensorflow::OpRegistrationData *, bool,
tensorflow::AttrValueMap *) {
return tensorflow::errors::Unknown("Intended error");
};
for (Operation &op : block.without_terminator()) {
SmallVector<ShapedTypeComponents> results;
EXPECT_FALSE(InferReturnTypeComponentsForTFOp(
op.getLoc(), &op, TFOp(&op).getNonControlOperands(),
1010, operand_as_constant_fn,
op_result_as_shape_fn, result_element_type_fn,
error_attr_values_fn, results)
.succeeded());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/shape_inference_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/utils/shape_inference_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e2ab1c0d-c7b7-446e-9d85-8322e4227a2e | cpp | tensorflow/tensorflow | convert_tensor | tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
using llvm::ArrayRef;
using llvm::SmallVector;
using mlir::Builder;
using mlir::DenseStringElementsAttr;
using mlir::ElementsAttr;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::Type;
using tensorflow::errors::InvalidArgument;
static TensorProto ConvertToProto(const Tensor& input_tensor,
bool use_tensor_content = true) {
TensorProto tensor_proto;
if (use_tensor_content)
input_tensor.AsProtoTensorContent(&tensor_proto);
else
input_tensor.AsProtoField(&tensor_proto);
return tensor_proto;
}
static std::string MangleTensor(const Tensor& tensor) {
return mangling_util::MangleTensor(ConvertToProto(tensor));
}
template <typename T>
absl::StatusOr<ElementsAttr> ConvertFlatTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<T>();
return ElementsAttr(mlir::DenseElementsAttr::get(
type, llvm::ArrayRef(arr.data(), arr.size())));
}
ElementsAttr ConvertTensorOfCustomFloatType(const Tensor& tensor,
RankedTensorType type) {
auto buffer =
llvm::ArrayRef(static_cast<char*>(tensor.data()), tensor.TotalBytes());
return mlir::DenseElementsAttr::getFromRawBuffer(type, buffer);
}
absl::StatusOr<ElementsAttr> ConvertStringTensor(const Tensor& input_tensor,
ShapedType type) {
auto arr = input_tensor.flat<tstring>();
std::vector<mlir::StringRef> string_refs;
string_refs.reserve(arr.size());
for (int i = 0; i < arr.size(); i++) {
const auto& val = arr(i);
string_refs.push_back({val.data(), val.size()});
}
return ElementsAttr(DenseStringElementsAttr::get(type, string_refs));
}
absl::StatusOr<ElementsAttr> ConvertTensor(const Tensor& input_tensor,
Builder* builder) {
const auto& input_dtype = input_tensor.dtype();
const auto& input_shape = input_tensor.shape();
Type elt_type;
TF_RETURN_IF_ERROR(ConvertDataType(input_dtype, *builder, &elt_type));
SmallVector<int64_t, 4> shape;
ConvertToMlirShape(input_shape, &shape);
auto type = RankedTensorType::get(shape, elt_type);
#define CONVERT_FLAT(DTYPE, CTYPE) \
case DTYPE: \
return ConvertFlatTensor<CTYPE>(input_tensor, type);
switch (input_dtype) {
CONVERT_FLAT(DT_BOOL, bool)
CONVERT_FLAT(DT_FLOAT, float)
CONVERT_FLAT(DT_DOUBLE, double)
CONVERT_FLAT(DT_INT8, int8)
CONVERT_FLAT(DT_INT16, int16)
CONVERT_FLAT(DT_INT32, int32)
CONVERT_FLAT(DT_INT64, int64_t)
CONVERT_FLAT(DT_UINT8, uint8)
CONVERT_FLAT(DT_UINT16, uint16)
CONVERT_FLAT(DT_UINT32, uint32)
CONVERT_FLAT(DT_UINT64, uint64)
CONVERT_FLAT(DT_COMPLEX64, std::complex<float>)
CONVERT_FLAT(DT_COMPLEX128, std::complex<double>)
case DT_BFLOAT16:
case DT_HALF:
case DT_FLOAT8_E5M2:
case DT_FLOAT8_E4M3FN:
return ConvertTensorOfCustomFloatType(input_tensor, type);
case DT_STRING:
return ConvertStringTensor(input_tensor, type);
default:
return ElementsAttr(
mlir::TF::TensorProtoAttr::get(type, MangleTensor(input_tensor)));
}
#undef CONVERT_FLAT
}
int NumberOfMaterializedElements(const TensorProto& tensor) {
if (!tensor.tensor_content().empty()) return -1;
#define MATCH(DTYPE, FIELD) \
case DTYPE: \
return tensor.FIELD##_val().size()
switch (tensor.dtype()) {
MATCH(DT_FLOAT, float);
MATCH(DT_DOUBLE, double);
MATCH(DT_INT8, int);
MATCH(DT_UINT8, int);
MATCH(DT_INT16, int);
MATCH(DT_UINT16, int);
MATCH(DT_INT32, int);
MATCH(DT_UINT32, uint32);
MATCH(DT_INT64, int64);
MATCH(DT_UINT64, uint64);
MATCH(DT_BOOL, bool);
MATCH(DT_HALF, half);
MATCH(DT_BFLOAT16, half);
MATCH(DT_STRING, string);
case DT_COMPLEX64:
case DT_COMPLEX128:
default:
return -1;
}
}
absl::StatusOr<ElementsAttr> ConvertTensorProto(const TensorProto& input_tensor,
Builder* builder) {
TensorShape input_tensor_shape(input_tensor.tensor_shape());
if (NumberOfMaterializedElements(input_tensor) == 1 &&
input_tensor_shape.num_elements() > 1) {
TensorProto tensor_copy = input_tensor;
auto* shape = tensor_copy.mutable_tensor_shape();
shape->clear_dim();
shape->add_dim()->set_size(1);
TF_ASSIGN_OR_RETURN(ElementsAttr single_attr,
ConvertTensorProto(tensor_copy, builder));
llvm::SmallVector<int64_t> original_dimensions;
for (auto dim : input_tensor_shape) original_dimensions.push_back(dim.size);
return ElementsAttr(mlir::SplatElementsAttr::get(
single_attr.getShapedType().clone(original_dimensions),
single_attr.getValues<mlir::Attribute>()[0]));
}
Tensor t;
if (!t.FromProto(input_tensor))
return InvalidArgument("Failed to parse input_tensor.");
return ConvertTensor(t, builder);
}
void ConvertToTensorShapeProto(ArrayRef<int64_t> shape,
TensorShapeProto* output_shape) {
for (auto d : shape) {
output_shape->add_dim()->set_size(ShapedType::isDynamic(d) ? kTFDynamicSize
: d);
}
}
PartialTensorShape ConvertTypeToTensorShape(const mlir::Type& type) {
if (mlir::isa<mlir::UnrankedTensorType>(type)) {
return PartialTensorShape();
}
if (auto tensor_type = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
TensorShapeProto tensor_shape_proto;
ConvertToTensorShapeProto(tensor_type.getShape(), &tensor_shape_proto);
return PartialTensorShape(tensor_shape_proto);
}
return TensorShape();
}
mlir::TF::ShapeAttr ConvertTypeToTensorShapeAttr(const mlir::Type& type) {
if (mlir::isa<mlir::UnrankedTensorType>(type)) {
return mlir::TF::ShapeAttr::get(type.getContext(), std::nullopt);
}
if (auto tensor_type = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
return mlir::TF::ShapeAttr::get(type.getContext(), tensor_type.getShape());
}
return mlir::TF::ShapeAttr::get(type.getContext(), ArrayRef<int64_t>());
}
absl::StatusOr<TensorSpecProto> ConvertTypeToTensorSpecProto(
const mlir::Type& type) {
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(type, &dtype));
TensorSpecProto tensor_spec;
tensor_spec.set_dtype(dtype);
*tensor_spec.mutable_shape() = ConvertTypeToTensorShape(type).AsProto();
return tensor_spec;
}
absl::StatusOr<mlir::Attribute> ConvertTensorShapeProto(
const TensorShapeProto& shape, mlir::MLIRContext* context) {
if (shape.unknown_rank())
return mlir::TF::ShapeAttr::get(context, std::nullopt);
llvm::SmallVector<int64_t, 4> dims;
dims.reserve(shape.dim().size());
for (const auto& dim : shape.dim()) {
dims.push_back(dim.size() == kTFDynamicSize ? ShapedType::kDynamic
: dim.size());
}
return mlir::TF::ShapeAttr::get(context, llvm::ArrayRef(dims));
}
void ConvertStringElementsAttr(
const DenseStringElementsAttr attr,
protobuf::RepeatedPtrField<std::string>* output) {
for (const auto& val : attr.getRawStringData())
output->Add({val.data(), val.size()});
}
template <typename T>
void ConvertComplexElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output) {
for (const auto& val : attr.getValues<std::complex<T>>()) {
output->Add(val.real());
output->Add(val.imag());
}
}
Status ConvertTensorProtoAttr(const mlir::TF::TensorProtoAttr attr,
TensorProto* output_tensor) {
auto mangled_tensor = attr.getValue();
absl::string_view tensor_view(mangled_tensor.data(), mangled_tensor.size());
return mangling_util::DemangleTensor(tensor_view, output_tensor);
}
template <typename T>
void ConvertElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0)) output->Add(attr.getSplatValue<T>());
} else {
output->Reserve(attr.getNumElements());
for (auto value : attr.getValues<T>()) output->AddAlreadyReserved(value);
}
}
template <typename T, typename Cord>
void ConvertFloatElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0)) output->Add(attr.getSplatValue<T>());
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertHalfElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<int>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<Eigen::half>() != Eigen::half(0))
output->Add(
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<Eigen::half>()));
} else {
output->Reserve(attr.getNumElements());
for (const Eigen::half value : attr.getValues<Eigen::half>())
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
template <typename T, typename U = T, typename Cord>
void ConvertIntElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
template <typename T, typename U = T, typename Cord>
void ConvertUIntElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<T>* output,
Cord* tensor_content) {
if (attr.isSplat()) {
if (attr.getSplatValue<U>() != U(0))
output->Add(static_cast<T>(attr.getSplatValue<U>()));
} else {
port::CopyFromArray(tensor_content, attr.getRawData().data(),
attr.getRawData().size());
}
}
void ConvertBfloat16ElementsAttr(const mlir::DenseElementsAttr attr,
protobuf::RepeatedField<int>* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<bfloat16>() != bfloat16(0))
output->Add(
Eigen::numext::bit_cast<uint16_t>(attr.getSplatValue<bfloat16>()));
} else {
output->Reserve(attr.getNumElements());
for (const bfloat16 value : attr.getValues<bfloat16>())
output->AddAlreadyReserved(Eigen::numext::bit_cast<uint16_t>(value));
}
}
template <typename T>
void ConvertFloat8ElementsAttr(const mlir::DenseElementsAttr attr,
std::string* output) {
if (attr.isSplat()) {
if (attr.getSplatValue<T>() != T(0))
output->push_back(
Eigen::numext::bit_cast<uint8_t>(attr.getSplatValue<T>()));
} else {
output->reserve(attr.getNumElements());
for (const T value : attr.getValues<T>())
output->push_back(Eigen::numext::bit_cast<uint8_t>(value));
}
}
Status ConvertToTensorProto(const ElementsAttr attr, TensorProto* output) {
auto type = attr.getShapedType();
auto shape = type.getShape();
DataType output_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(type, &output_dtype));
output->set_dtype(output_dtype);
ConvertToTensorShapeProto(shape, output->mutable_tensor_shape());
if (auto tensor_attr = mlir::dyn_cast<mlir::TF::TensorProtoAttr>(attr))
return ConvertTensorProtoAttr(tensor_attr, output);
auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr);
if (!dense_attr) return errors::InvalidArgument("Unsupported elements attr");
switch (output_dtype) {
case DT_BOOL:
ConvertElementsAttr(dense_attr, output->mutable_bool_val());
break;
case DT_BFLOAT16:
ConvertBfloat16ElementsAttr(dense_attr, output->mutable_half_val());
break;
case DT_COMPLEX64:
ConvertComplexElementsAttr(dense_attr, output->mutable_scomplex_val());
break;
case DT_COMPLEX128:
ConvertComplexElementsAttr(dense_attr, output->mutable_dcomplex_val());
break;
case DT_DOUBLE:
ConvertFloatElementsAttr(dense_attr, output->mutable_double_val(),
output->mutable_tensor_content());
break;
case DT_HALF:
ConvertHalfElementsAttr(dense_attr, output->mutable_half_val());
break;
case DT_FLOAT:
ConvertFloatElementsAttr(dense_attr, output->mutable_float_val(),
output->mutable_tensor_content());
break;
case DT_FLOAT8_E5M2:
ConvertFloat8ElementsAttr<tsl::float8_e5m2>(dense_attr,
output->mutable_float8_val());
break;
case DT_FLOAT8_E4M3FN:
ConvertFloat8ElementsAttr<tsl::float8_e4m3fn>(
dense_attr, output->mutable_float8_val());
break;
case tensorflow::DT_INT4:
ConvertIntElementsAttr<int, tsl::int4>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case tensorflow::DT_UINT4:
ConvertUIntElementsAttr<int, tsl::uint4>(
dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_QUINT8:
case DT_INT8:
ConvertUIntElementsAttr<int, int8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_QUINT16:
case DT_INT16:
ConvertIntElementsAttr<int, int16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_INT32:
ConvertIntElementsAttr(dense_attr, output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_INT64:
ConvertIntElementsAttr(dense_attr, output->mutable_int64_val(),
output->mutable_tensor_content());
break;
case DT_STRING:
ConvertStringElementsAttr(mlir::cast<DenseStringElementsAttr>(dense_attr),
output->mutable_string_val());
break;
case DT_UINT8:
ConvertUIntElementsAttr<int, uint8_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_UINT16:
ConvertUIntElementsAttr<int, uint16_t>(dense_attr,
output->mutable_int_val(),
output->mutable_tensor_content());
break;
case DT_UINT32:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint32_val(),
output->mutable_tensor_content());
break;
case DT_UINT64:
ConvertUIntElementsAttr(dense_attr, output->mutable_uint64_val(),
output->mutable_tensor_content());
break;
default:
return errors::Unimplemented(absl::StrCat("Unimplemented data type ",
DataTypeString(output_dtype)));
}
return absl::OkStatus();
}
Status ConvertToTensor(const mlir::ElementsAttr attr, Tensor* output_tensor) {
TensorProto tensor_proto;
TF_RETURN_IF_ERROR(ConvertToTensorProto(attr, &tensor_proto));
if (!output_tensor->FromProto(tensor_proto)) {
return InvalidArgument("Couldn't convert tensor proto to tensor.");
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include <cstring>
#include <initializer_list>
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "xla/test.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
static void RegisterDialects(mlir::MLIRContext &context) {
context.loadDialect<mlir::TF::TensorFlowDialect>();
}
TEST(ConvertTypeToTensorTypeTest, UnrankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape =
ConvertTypeToTensorShape(mlir::UnrankedTensorType::get(b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, NonFullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
GetTypeFromTFTensorShape({-1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({-1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, FullyDefinedRankedTensorType) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
EXPECT_TRUE(output_shape.IsIdenticalTo(PartialTensorShape({1, 2, 3})));
}
TEST(ConvertTypeToTensorTypeTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
PartialTensorShape output_shape = ConvertTypeToTensorShape(b.getF32Type());
EXPECT_TRUE(output_shape.IsIdenticalTo(TensorShape()));
}
TEST(ConvertTypeToTensorTypeTest, ConvertStringTensor) {
mlir::MLIRContext context;
RegisterDialects(context);
mlir::Builder b(&context);
Tensor tensor(DT_STRING, TensorShape({1, 2, 2, 1}));
EXPECT_EQ(4, tensor.NumElements());
auto Tt = tensor.flat<tstring>();
Tt.setValues({"one", "two", "three", "four"});
auto value_or_status = ConvertTensor(tensor, &b);
ASSERT_TRUE(value_or_status.ok());
auto attr = value_or_status.value();
EXPECT_TRUE(mlir::isa<mlir::DenseStringElementsAttr>(attr));
auto string_attr = mlir::cast<mlir::DenseStringElementsAttr>(attr);
auto string_values = string_attr.getRawStringData();
ASSERT_EQ(string_values.size(), 4);
EXPECT_EQ(string_values[0], mlir::StringRef("one"));
EXPECT_EQ(string_values[1], mlir::StringRef("two"));
EXPECT_EQ(string_values[2], mlir::StringRef("three"));
EXPECT_EQ(string_values[3], mlir::StringRef("four"));
}
class ConvertTensorTest : public ::testing::Test {
protected:
template <typename T>
void VerifyConversion(std::initializer_list<T> values, DataType dtype,
mlir::Type expected_ty) {
mlir::Builder b(expected_ty.getContext());
Tensor tensor(dtype, TensorShape({static_cast<int64_t>(values.size())}));
tensor.flat<T>().setValues(values);
auto value_or = ConvertTensor(tensor, &b);
TF_ASSERT_OK(value_or.status());
auto attr = value_or.value();
EXPECT_EQ(attr.getShapedType().getElementType(), expected_ty);
Tensor out;
TF_ASSERT_OK(ConvertToTensor(attr, &out));
test::ExpectTensorEqual<T>(tensor, out);
}
};
TEST_F(ConvertTensorTest, Simple) {
mlir::MLIRContext context;
RegisterDialects(context);
ASSERT_NO_FATAL_FAILURE(VerifyConversion<Eigen::half>(
{Eigen::half(1.0)}, DT_HALF, mlir::FloatType::getF16(&context)));
ASSERT_NO_FATAL_FAILURE(
VerifyConversion<bfloat16>({bfloat16(1.0), bfloat16(-1.0)}, DT_BFLOAT16,
mlir::FloatType::getBF16(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<float>(
{1.0, -1.0}, DT_FLOAT, mlir::FloatType::getF32(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<double>(
{1.0, -1.0}, DT_DOUBLE, mlir::FloatType::getF64(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e5m2>(
{tsl::float8_e5m2{1.0}, tsl::float8_e5m2{-1.0}}, DT_FLOAT8_E5M2,
mlir::FloatType::getFloat8E5M2(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<tsl::float8_e4m3fn>(
{tsl::float8_e4m3fn{1.0}, tsl::float8_e4m3fn{-1.0}}, DT_FLOAT8_E4M3FN,
mlir::FloatType::getFloat8E4M3FN(&context)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int4>(
{static_cast<int4>(1), static_cast<int4>(-1)}, DT_INT4,
mlir::IntegerType::get(&context, 4,
mlir::IntegerType::SignednessSemantics::Signed)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int8>(
{1, -1}, DT_INT8, mlir::IntegerType::get(&context, 8)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int16>(
{1, -1}, DT_INT16, mlir::IntegerType::get(&context, 16)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int32>(
{1, -1}, DT_INT32, mlir::IntegerType::get(&context, 32)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<int64_t>(
{1, -1}, DT_INT64, mlir::IntegerType::get(&context, 64)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint4>(
{static_cast<uint4>(1), static_cast<uint4>(2)}, DT_UINT4,
mlir::IntegerType::get(
&context, 4, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint8>(
{1, 2}, DT_UINT8,
mlir::IntegerType::get(
&context, 8, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint16>(
{1, 2}, DT_UINT16,
mlir::IntegerType::get(
&context, 16, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint32>(
{1, 2}, DT_UINT32,
mlir::IntegerType::get(
&context, 32, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<uint64>(
{1, 2}, DT_UINT64,
mlir::IntegerType::get(
&context, 64, mlir::IntegerType::SignednessSemantics::Unsigned)));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<float>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX64,
mlir::ComplexType::get(mlir::FloatType::getF32(&context))));
ASSERT_NO_FATAL_FAILURE(VerifyConversion<std::complex<double>>(
{{0.0, 1.0}, {1.0, 0.0}}, DT_COMPLEX128,
mlir::ComplexType::get(mlir::FloatType::getF64(&context))));
}
bool IsSplat(mlir::ElementsAttr attr) {
return mlir::cast<mlir::DenseElementsAttr>(attr).isSplat();
}
TEST(ConvertTensorProtoTest, SplatTensor) {
TensorProto tensor;
tensor.set_dtype(DT_FLOAT);
tensor.mutable_tensor_shape()->add_dim()->set_size(1ULL << 35);
tensor.add_float_val(42.0);
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(tensor, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({1ULL << 35}, builder.getF32Type()),
42.0f)),
ResultOf(IsSplat, IsTrue())));
}
TEST(ConvertTensorProtoTest, NonSplatTensor) {
TensorProto proto = tensor::CreateTensorProto<float>(
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
mlir::MLIRContext context;
mlir::Builder builder(&context);
TF_ASSERT_OK_AND_ASSIGN(mlir::ElementsAttr attribute,
ConvertTensorProto(proto, &builder));
EXPECT_THAT(
attribute,
AllOf(Eq(mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get({2, 2}, builder.getF32Type()),
{1.0f, 2.0f, 3.0f, 4.0f})),
ResultOf(IsSplat, IsFalse())));
}
TEST(ConvertTypeToTensorSpecProtoTest, UnrankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::UnrankedTensorType::get(b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_TRUE(output_proto->shape().unknown_rank());
}
TEST(ConvertTypeToTensorSpecProtoTest, RankedTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(
mlir::RankedTensorType::get({1, 2, 3}, b.getF32Type()));
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_EQ(output_proto->shape().dim_size(), 3);
EXPECT_EQ(output_proto->shape().dim().at(0).size(), 1);
EXPECT_EQ(output_proto->shape().dim().at(1).size(), 2);
EXPECT_EQ(output_proto->shape().dim().at(2).size(), 3);
}
TEST(ConvertTypeToTensorSpecProtoTest, ScalarTensorType) {
mlir::MLIRContext context;
mlir::Builder b(&context);
auto output_proto = ConvertTypeToTensorSpecProto(b.getF32Type());
TF_ASSERT_OK(output_proto.status());
EXPECT_EQ(output_proto->dtype(), DT_FLOAT);
EXPECT_FALSE(output_proto->shape().unknown_rank());
EXPECT_EQ(output_proto->shape().dim_size(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab71cfba-e3ad-4152-9ff5-0fff1613d9b5 | cpp | tensorflow/tensorflow | feature_util | tensorflow/core/example/feature_util.cc | tensorflow/core/example/feature_util_test.cc | #include "tensorflow/core/example/feature_util.h"
#include <string>
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace internal {
Feature& ExampleFeature(absl::string_view name, Example* example) {
return *GetFeature(name, example);
}
}
template <>
bool HasFeature<>(absl::string_view key, const Features& features) {
return features.feature().contains(internal::ProtoMapKey(key));
}
template <>
bool HasFeature<protobuf_int64>(absl::string_view key,
const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kInt64List);
}
template <>
bool HasFeature<float>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kFloatList);
}
template <>
bool HasFeature<std::string>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
template <>
bool HasFeature<tstring>(absl::string_view key, const Features& features) {
auto it = features.feature().find(internal::ProtoMapKey(key));
return (it != features.feature().end()) &&
(it->second.kind_case() == Feature::KindCase::kBytesList);
}
bool HasFeatureList(absl::string_view key,
const SequenceExample& sequence_example) {
return sequence_example.feature_lists().feature_list().contains(
internal::ProtoMapKey(key));
}
template <>
const protobuf::RepeatedField<protobuf_int64>& GetFeatureValues<protobuf_int64>(
const Feature& feature) {
return feature.int64_list().value();
}
template <>
protobuf::RepeatedField<protobuf_int64>* GetFeatureValues<protobuf_int64>(
Feature* feature) {
return feature->mutable_int64_list()->mutable_value();
}
template <>
const protobuf::RepeatedField<float>& GetFeatureValues<float>(
const Feature& feature) {
return feature.float_list().value();
}
template <>
protobuf::RepeatedField<float>* GetFeatureValues<float>(Feature* feature) {
return feature->mutable_float_list()->mutable_value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<tstring>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
const protobuf::RepeatedPtrField<std::string>& GetFeatureValues<std::string>(
const Feature& feature) {
return feature.bytes_list().value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<tstring>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
template <>
protobuf::RepeatedPtrField<std::string>* GetFeatureValues<std::string>(
Feature* feature) {
return feature->mutable_bytes_list()->mutable_value();
}
const protobuf::RepeatedPtrField<Feature>& GetFeatureList(
absl::string_view key, const SequenceExample& sequence_example) {
return sequence_example.feature_lists()
.feature_list()
.at(internal::ProtoMapKey(key))
.feature();
}
protobuf::RepeatedPtrField<Feature>* GetFeatureList(
absl::string_view feature_list_key, SequenceExample* sequence_example) {
return (*sequence_example->mutable_feature_lists()
->mutable_feature_list())[internal::ProtoMapKey(
feature_list_key)]
.mutable_feature();
}
template <>
void ClearFeatureValues<protobuf_int64>(Feature* feature) {
feature->mutable_int64_list()->Clear();
}
template <>
void ClearFeatureValues<float>(Feature* feature) {
feature->mutable_float_list()->Clear();
}
template <>
void ClearFeatureValues<std::string>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
void ClearFeatureValues<tstring>(Feature* feature) {
feature->mutable_bytes_list()->Clear();
}
template <>
Features* GetFeatures<Features>(Features* proto) {
return proto;
}
template <>
Features* GetFeatures<Example>(Example* proto) {
return proto->mutable_features();
}
template <>
Features* GetFeatures<SequenceExample>(SequenceExample* proto) {
return proto->mutable_context();
}
template <>
const Features& GetFeatures<Features>(const Features& proto) {
return proto;
}
template <>
const Features& GetFeatures<Example>(const Example& proto) {
return proto.features();
}
template <>
const Features& GetFeatures<SequenceExample>(const SequenceExample& proto) {
return proto.context();
}
} | #include "tensorflow/core/example/feature_util.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
const float kTolerance = 1e-5;
TEST(GetFeatureValuesInt64Test, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_int64_list()->add_value(42);
auto values = GetFeatureValues<protobuf_int64>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(GetFeatureValuesInt64Test, ReadsASingleValueFromSequenceExampleContext) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ(42, tag.Get(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValue) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1,
example.features().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.features().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<protobuf_int64>(&feature)->Add(42);
ASSERT_EQ(1, feature.int64_list().value_size());
EXPECT_EQ(42, feature.int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_EQ(1, example.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, example.context().feature().at("tag").int64_list().value(0));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistence) {
Example example;
ASSERT_FALSE(HasFeature("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(0);
EXPECT_TRUE(HasFeature("tag", example));
}
TEST(GetFeatureValuesInt64Test, CheckUntypedFieldExistenceForSequenceExample) {
SequenceExample seq_example;
ASSERT_FALSE(HasFeature("tag", seq_example));
GetFeatureValues<protobuf_int64>("tag", &seq_example)->Add(0);
EXPECT_TRUE(HasFeature("tag", seq_example));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", example));
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CheckTypedFieldExistenceForSequenceExample) {
SequenceExample sequence_example;
GetFeatureValues<float>("tag", &sequence_example)->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", sequence_example));
GetFeatureValues<protobuf_int64>("tag", &sequence_example)->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", sequence_example));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", sequence_example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(GetFeatureValuesInt64Test, CopyIterableToAField) {
Example example;
std::vector<int> values{1, 2, 3};
std::copy(values.begin(), values.end(),
protobuf::RepeatedFieldBackInserter(
GetFeatureValues<protobuf_int64>("tag", &example)));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_float_list()->add_value(3.14);
auto values = GetFeatureValues<float>(feature);
ASSERT_EQ(1, values.size());
EXPECT_NEAR(3.14, values.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(3.14);
auto tag = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_NEAR(3.14, tag.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToFeature) {
Feature feature;
GetFeatureValues<float>(&feature)->Add(3.14);
ASSERT_EQ(1, feature.float_list().value_size());
EXPECT_NEAR(3.14, feature.float_list().value(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValue) {
Example example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1,
example.features().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14,
example.features().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, WritesASingleValueToSequenceExample) {
SequenceExample example;
GetFeatureValues<float>("tag", &example)->Add(3.14);
ASSERT_EQ(1, example.context().feature().at("tag").float_list().value_size());
EXPECT_NEAR(3.14, example.context().feature().at("tag").float_list().value(0),
kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(HasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesFloatTest, CheckTypedFieldExistenceForDeprecatedMethod) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(ExampleHasFeature<float>("tag", example));
GetFeatureValues<float>("tag", &example)->Add(3.14);
EXPECT_TRUE(ExampleHasFeature<float>("tag", example));
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_NEAR(3.14, tag_ro.Get(0), kTolerance);
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromFeature) {
Feature feature;
feature.mutable_bytes_list()->add_value("FOO");
auto values = GetFeatureValues<std::string>(feature);
ASSERT_EQ(1, values.size());
EXPECT_EQ("FOO", values.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValue) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, ReadsASingleValueFromSequenceExample) {
SequenceExample example;
(*example.mutable_context()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("FOO");
auto tag = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag.size());
EXPECT_EQ("FOO", tag.Get(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueToFeature) {
Feature feature;
*GetFeatureValues<std::string>(&feature)->Add() = "FOO";
ASSERT_EQ(1, feature.bytes_list().value_size());
EXPECT_EQ("FOO", feature.bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValue) {
Example example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1,
example.features().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO",
example.features().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, WritesASingleValueSequenceExample) {
SequenceExample example;
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
ASSERT_EQ(1, example.context().feature().at("tag").bytes_list().value_size());
EXPECT_EQ("FOO", example.context().feature().at("tag").bytes_list().value(0));
}
TEST(GetFeatureValuesStringTest, CheckTypedFieldExistence) {
Example example;
GetFeatureValues<protobuf_int64>("tag", &example)->Add(42);
ASSERT_FALSE(HasFeature<std::string>("tag", example));
*GetFeatureValues<std::string>("tag", &example)->Add() = "FOO";
EXPECT_TRUE(HasFeature<std::string>("tag", example));
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainer) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesFromContainerWithStringViewKey) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
absl::string_view key("tag");
AppendFeatureValues(values, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest,
FloatValuesUsingInitializerListWithStringViewKey) {
Example example;
absl::string_view key("tag");
AppendFeatureValues({1.1, 2.2, 3.3}, key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIterators) {
Example example;
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(AppendFeatureValuesTest, FloatValuesUsingIteratorsWithStringViewKey) {
Example example;
absl::string_view key("tag");
std::vector<double> values{1.1, 2.2, 3.3};
AppendFeatureValues(values.begin(), values.end(), key, &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, FloatValuesUsingInitializerList) {
Example example;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", &example);
SetFeatureValues({10.1, 20.2, 30.3}, "tag", &example);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(10.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(20.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(30.3, tag_ro.Get(2), kTolerance);
}
TEST(SetFeatureValuesTest, ContainerOfStringView) {
Example example;
std::vector<std::string> values = {"hello", "world"};
std::vector<absl::string_view> values_string_view(values.begin(),
values.end());
SetFeatureValues(values_string_view, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(tag_ro.size(), 2);
EXPECT_EQ(tag_ro.Get(0), "hello");
EXPECT_EQ(tag_ro.Get(1), "world");
}
TEST(AppendFeatureValuesTest, Int64ValuesUsingInitializerList) {
Example example;
std::vector<protobuf_int64> values{1, 2, 3};
AppendFeatureValues(values, "tag", &example);
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ(1, tag_ro.Get(0));
EXPECT_EQ(2, tag_ro.Get(1));
EXPECT_EQ(3, tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringValuesUsingInitializerList) {
Example example;
AppendFeatureValues({"FOO", "BAR", "BAZ"}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringVariablesUsingInitializerList) {
Example example;
string string1("FOO");
string string2("BAR");
string string3("BAZ");
AppendFeatureValues({string1, string2, string3}, "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingInitializerList) {
Example example;
AppendFeatureValues({absl::string_view("FOO"), absl::string_view("BAR"),
absl::string_view("BAZ")},
"tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(AppendFeatureValuesTest, StringViewVariablesUsingIterators) {
Example example;
std::vector<absl::string_view> strings;
strings.push_back("FOO");
strings.push_back("BAR");
strings.push_back("BAZ");
AppendFeatureValues(strings.begin(), strings.end(), "tag", &example);
auto tag_ro = GetFeatureValues<std::string>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_EQ("FOO", tag_ro.Get(0));
EXPECT_EQ("BAR", tag_ro.Get(1));
EXPECT_EQ("BAZ", tag_ro.Get(2));
}
TEST(GetFeatureTest, WritesAVectorToFeature) {
Example example;
Feature* feature = GetFeature("tag", &example);
AppendFeatureValues<float>({1.1, 2.2, 3.3}, feature);
auto tag_ro = GetFeatureValues<float>("tag", example);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(GetFeatureTest, ReadsAVectorFromFeature) {
Example example;
AppendFeatureValues<float>({1.1, 2.2, 3.3}, "tag", &example);
const Feature& feature = GetFeature("tag", example);
auto tag_ro = GetFeatureValues<float>(feature);
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, ReadsASingleValueFromContext) {
SequenceExample se;
(*se.mutable_context()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto values = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, values.size());
EXPECT_EQ(42, values.Get(0));
}
TEST(SequenceExampleTest, WritesASingleValueToContext) {
SequenceExample se;
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
ASSERT_EQ(1, se.context().feature().at("tag").int64_list().value_size());
EXPECT_EQ(42, se.context().feature().at("tag").int64_list().value(0));
}
TEST(SequenceExampleTest, AppendFeatureValuesToContextSingleArg) {
SequenceExample se;
AppendFeatureValues({1.1, 2.2, 3.3}, "tag", se.mutable_context());
auto tag_ro = GetFeatureValues<float>("tag", se.context());
ASSERT_EQ(3, tag_ro.size());
EXPECT_NEAR(1.1, tag_ro.Get(0), kTolerance);
EXPECT_NEAR(2.2, tag_ro.Get(1), kTolerance);
EXPECT_NEAR(3.3, tag_ro.Get(2), kTolerance);
}
TEST(SequenceExampleTest, CheckTypedFieldExistence) {
SequenceExample se;
GetFeatureValues<float>("tag", se.mutable_context())->Add(3.14);
ASSERT_FALSE(HasFeature<protobuf_int64>("tag", se.context()));
GetFeatureValues<protobuf_int64>("tag", se.mutable_context())->Add(42);
EXPECT_TRUE(HasFeature<protobuf_int64>("tag", se.context()));
auto tag_ro = GetFeatureValues<protobuf_int64>("tag", se.context());
ASSERT_EQ(1, tag_ro.size());
EXPECT_EQ(42, tag_ro.Get(0));
}
TEST(SequenceExampleTest, ReturnsExistingFeatureLists) {
SequenceExample se;
(*se.mutable_feature_lists()->mutable_feature_list())["tag"]
.mutable_feature()
->Add();
auto feature = GetFeatureList("tag", se);
ASSERT_EQ(1, feature.size());
}
TEST(SequenceExampleTest, CreatesNewFeatureLists) {
SequenceExample se;
GetFeatureList("tag", &se)->Add();
EXPECT_EQ(1, se.feature_lists().feature_list().at("tag").feature_size());
}
TEST(SequenceExampleTest, CheckFeatureListExistence) {
SequenceExample se;
ASSERT_FALSE(HasFeatureList("tag", se));
GetFeatureList("tag", &se)->Add();
ASSERT_TRUE(HasFeatureList("tag", se));
}
TEST(SequenceExampleTest, AppendFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-1", "cam2-2"},
GetFeatureList("images", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-2\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, AppendFeatureValuesWithVectors) {
SequenceExample se;
std::vector<float> readings{1.0, 2.5, 5.0};
AppendFeatureValues(readings, GetFeatureList("movie_ratings", &se)->Add());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"feature_lists {\n"
" feature_list {\n"
" key: \"movie_ratings\"\n"
" value {\n"
" feature {\n"
" float_list {\n"
" value: 1\n"
" value: 2.5\n"
" value: 5\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetContextFeatureValuesWithInitializerList) {
SequenceExample se;
SetFeatureValues({101, 102, 103}, "ids", se.mutable_context());
SetFeatureValues({1, 2, 3}, "ids", se.mutable_context());
AppendFeatureValues({4, 5, 6}, "ids", se.mutable_context());
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 1\n"
" value: 2\n"
" value: 3\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(SequenceExampleTest, SetFeatureValuesWithInitializerList) {
SequenceExample se;
AppendFeatureValues({1, 2, 3}, "ids", se.mutable_context());
SetFeatureValues({4, 5, 6}, "ids", se.mutable_context());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"}, GetFeatureList("images", &se)->Add());
AppendFeatureValues({"cam1-0", "cam2-0"},
GetFeatureList("more-images", &se)->Add());
SetFeatureValues({"cam1-1", "cam2-1"},
GetFeatureList("more-images", &se)->Mutable(0));
SequenceExample expected_proto;
protobuf::TextFormat::ParseFromString(
"context {\n"
" feature {\n"
" key: \"ids\"\n"
" value {\n"
" int64_list {\n"
" value: 4\n"
" value: 5\n"
" value: 6\n"
" }\n"
" }\n"
" }\n"
"}\n"
"feature_lists {\n"
" feature_list {\n"
" key: \"images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-0\"\n"
" value: \"cam2-0\"\n"
" }\n"
" }\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
" feature_list {\n"
" key: \"more-images\"\n"
" value {\n"
" feature {\n"
" bytes_list {\n"
" value: \"cam1-1\"\n"
" value: \"cam2-1\"\n"
" }\n"
" }\n"
" }\n"
" }\n"
"}\n",
&expected_proto);
EXPECT_EQ(se.DebugString(), expected_proto.DebugString());
}
TEST(MaybeGetFeatureValuesTest, ReturnsNullPtr) {
const Example example;
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(tag, nullptr);
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleInt) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_int64_list()
->add_value(42);
auto tag = MaybeGetFeatureValues<protobuf_int64>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ(42, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleFloat) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_float_list()
->add_value(0.3);
auto tag = MaybeGetFeatureValues<float>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_FLOAT_EQ(0.3, tag->Get(0));
}
TEST(MaybeGetFeatureValuesTest, ReadsASingleString) {
Example example;
(*example.mutable_features()->mutable_feature())["tag"]
.mutable_bytes_list()
->add_value("entry");
auto tag = MaybeGetFeatureValues<std::string>("tag", example);
ASSERT_EQ(1, tag->size());
EXPECT_EQ("entry", tag->Get(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/feature_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/feature_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3080d71-6a4e-44ac-b11e-7ca949bc2bb5 | cpp | tensorflow/tensorflow | example_parser_configuration | tensorflow/core/example/example_parser_configuration.cc | tensorflow/core/example/example_parser_configuration_test.cc | #include "tensorflow/core/example/example_parser_configuration.h"
#include <vector>
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
Status FindNodeIndexByName(const tensorflow::GraphDef& graph,
const string& node_name, int* node_idx) {
for (int i = 0; i < graph.node_size(); ++i) {
const auto& node = graph.node(i);
if (node.name() == node_name) {
*node_idx = i;
return absl::OkStatus();
}
}
return errors::InvalidArgument(node_name, " not found in GraphDef");
}
Status ExtractExampleParserConfiguration(
const tensorflow::GraphDef& graph, const string& node_name,
tensorflow::Session* session,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
int node_idx;
TF_RETURN_IF_ERROR(FindNodeIndexByName(graph, node_name, &node_idx));
const auto& node = graph.node(node_idx);
if (node.op() != "ParseExample") {
return errors::InvalidArgument(node_name, " node is not a ParseExample op");
}
auto& attr_map = node.attr();
auto num_sparse = attr_map.at("Nsparse").i();
auto num_dense = attr_map.at("Ndense").i();
fixed_len_features->resize(num_dense);
var_len_features->resize(num_sparse);
auto tdense = attr_map.at("Tdense");
auto dense_shapes = attr_map.at("dense_shapes");
auto sparse_types = attr_map.at("sparse_types");
if (tdense.list().type_size() != num_dense) {
return errors::InvalidArgument("Node attr Tdense has ",
tdense.list().type_size(),
" elements != Ndense attr: ", num_dense);
}
if (dense_shapes.list().shape_size() != num_dense) {
return errors::InvalidArgument("Node attr dense_shapes has ",
dense_shapes.list().shape_size(),
" elements != Ndense attr: ", num_dense);
}
if (sparse_types.list().type_size() != num_sparse) {
return errors::InvalidArgument("Node attr sparse_types has ",
sparse_types.list().type_size(),
" elements != NSparse attr: ", num_sparse);
}
for (int i = 0; i < tdense.list().type_size(); ++i) {
(*fixed_len_features)[i].dtype = tdense.list().type(i);
(*fixed_len_features)[i].shape = TensorShape(dense_shapes.list().shape(i));
}
for (int i = 0; i < sparse_types.list().type_size(); ++i) {
(*var_len_features)[i].dtype = sparse_types.list().type(i);
}
std::vector<string> fetch_names(node.input_size() - 1);
for (int i = 1; i < node.input_size(); ++i) {
fetch_names[i - 1] = node.input(i);
}
std::vector<Tensor> op_input_tensors;
TF_RETURN_IF_ERROR(session->Run({},
fetch_names, {},
&op_input_tensors));
int sparse_keys_start = 1;
int dense_keys_start = sparse_keys_start + num_sparse;
int dense_defaults_start = dense_keys_start + num_dense;
for (int i = 0; i < num_sparse; ++i) {
int input_idx = sparse_keys_start + i;
(*var_len_features)[i].key =
op_input_tensors[input_idx].scalar<tstring>()();
}
for (int i = 0; i < num_dense; ++i) {
FixedLenFeature& config = (*fixed_len_features)[i];
int dense_keys_offset = dense_keys_start + i;
config.key = op_input_tensors[dense_keys_offset].scalar<tstring>()();
int defaults_offset = dense_defaults_start + i;
config.default_value = op_input_tensors[defaults_offset];
}
int sparse_indices_output_start = 0;
int sparse_values_output_start = sparse_indices_output_start + num_sparse;
int sparse_shapes_output_start = sparse_values_output_start + num_sparse;
int dense_values_output_start = sparse_shapes_output_start + num_sparse;
string node_output_prefix = strings::StrCat(node_name, ":");
for (int i = 0; i < num_sparse; ++i) {
VarLenFeature& config = (*var_len_features)[i];
int indices_offset = sparse_indices_output_start + i;
config.indices_output_tensor_name =
strings::StrCat(node_output_prefix, indices_offset);
int values_offset = sparse_values_output_start + i;
config.values_output_tensor_name =
strings::StrCat(node_output_prefix, values_offset);
int shapes_offset = sparse_shapes_output_start + i;
config.shapes_output_tensor_name =
strings::StrCat(node_output_prefix, shapes_offset);
}
for (int i = 0; i < num_dense; ++i) {
int output_idx = dense_values_output_start + i;
(*fixed_len_features)[i].values_output_tensor_name =
strings::StrCat(node_output_prefix, output_idx);
}
return absl::OkStatus();
}
Status ExampleParserConfigurationProtoToFeatureVectors(
const ExampleParserConfiguration& config_proto,
std::vector<FixedLenFeature>* fixed_len_features,
std::vector<VarLenFeature>* var_len_features) {
const auto& feature_map = config_proto.feature_map();
for (auto it = feature_map.cbegin(); it != feature_map.cend(); ++it) {
string key = it->first;
const auto& config = it->second;
if (config.has_fixed_len_feature()) {
const auto& fixed_config = config.fixed_len_feature();
FixedLenFeature f;
f.key = key;
f.dtype = fixed_config.dtype();
f.shape = TensorShape(fixed_config.shape());
Tensor default_value(f.dtype, f.shape);
if (!default_value.FromProto(fixed_config.default_value())) {
return errors::InvalidArgument(
"Invalid default_value in config proto ",
fixed_config.default_value().DebugString());
}
f.default_value = default_value;
f.values_output_tensor_name = fixed_config.values_output_tensor_name();
fixed_len_features->push_back(f);
} else {
const auto& var_len_config = config.var_len_feature();
VarLenFeature v;
v.key = key;
v.dtype = var_len_config.dtype();
v.values_output_tensor_name = var_len_config.values_output_tensor_name();
v.indices_output_tensor_name =
var_len_config.indices_output_tensor_name();
v.shapes_output_tensor_name = var_len_config.shapes_output_tensor_name();
var_len_features->push_back(v);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/example/example_parser_configuration.h"
#include <memory>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/example_proto_helper.h"
namespace tensorflow {
namespace {
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
std::unique_ptr<Session> CreateSession() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
return std::unique_ptr<Session>(NewSession(options));
}
class ExtractExampleParserConfigurationTest : public ::testing::Test {
protected:
void SetUp() override {
string proto_string;
string filename =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/example/testdata/parse_example_graph_def.pbtxt");
ReadFileToStringOrDie(Env::Default(), filename, &proto_string);
protobuf::TextFormat::ParseFromString(proto_string, &graph_def_);
session_ = CreateSession();
TF_CHECK_OK(session_->Create(graph_def_));
}
NodeDef* parse_example_node() {
for (auto& node : *graph_def_.mutable_node()) {
if (node.name() == "ParseExample/ParseExample") {
return &node;
}
}
return nullptr;
}
GraphDef graph_def_;
std::unique_ptr<Session> session_;
};
TEST_F(ExtractExampleParserConfigurationTest, OpNotFound) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "BlarseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNsparse) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Nsparse"].set_i(3);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, InconsistentAttrNdense) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
NodeDef* node = parse_example_node();
auto mutable_attr = node->mutable_attr();
(*mutable_attr)["Ndense"].set_i(2);
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
TEST_F(ExtractExampleParserConfigurationTest, Basic) {
std::vector<FixedLenFeature> dense_vec;
std::vector<VarLenFeature> sparse_vec;
Status status = ExtractExampleParserConfiguration(
graph_def_, "ParseExample/ParseExample", session_.get(), &dense_vec,
&sparse_vec);
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(2, sparse_vec.size());
EXPECT_EQ(3, dense_vec.size());
EXPECT_EQ("sf0", sparse_vec[0].key);
EXPECT_EQ(DT_STRING, sparse_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:0",
sparse_vec[0].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:2",
sparse_vec[0].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:4",
sparse_vec[0].shapes_output_tensor_name);
EXPECT_EQ("sf1", sparse_vec[1].key);
EXPECT_EQ(DT_STRING, sparse_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:1",
sparse_vec[1].indices_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:3",
sparse_vec[1].values_output_tensor_name);
EXPECT_EQ("ParseExample/ParseExample:5",
sparse_vec[1].shapes_output_tensor_name);
EXPECT_EQ("x", dense_vec[0].key);
EXPECT_EQ(DT_FLOAT, dense_vec[0].dtype);
EXPECT_EQ("ParseExample/ParseExample:6",
dense_vec[0].values_output_tensor_name);
EXPECT_EQ("y", dense_vec[1].key);
EXPECT_EQ(DT_FLOAT, dense_vec[1].dtype);
EXPECT_EQ("ParseExample/ParseExample:7",
dense_vec[1].values_output_tensor_name);
EXPECT_EQ("z", dense_vec[2].key);
EXPECT_EQ(DT_FLOAT, dense_vec[2].dtype);
EXPECT_EQ("ParseExample/ParseExample:8",
dense_vec[2].values_output_tensor_name);
}
static const char kExampleParseConfigurationProto[] = R"( feature_map {
key: "x"
value {
fixed_len_feature {
dtype: DT_FLOAT
shape {
dim {
size: 1
}
}
default_value {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 33.0
}
values_output_tensor_name: "ParseExample/ParseExample:3"
}
}
}
feature_map {
key: "y"
value {
var_len_feature {
dtype: DT_STRING
values_output_tensor_name: "ParseExample/ParseExample:1"
indices_output_tensor_name: "ParseExample/ParseExample:0"
shapes_output_tensor_name: "ParseExample/ParseExample:2"
}
}
}
)";
class ExampleParserConfigurationProtoToFeatureVectorsTest
: public ::testing::Test {
protected:
void SetUp() override {
CHECK(protobuf::TextFormat::ParseFromString(kExampleParseConfigurationProto,
&config_proto_));
}
ExampleParserConfiguration config_proto_;
};
TEST_F(ExampleParserConfigurationProtoToFeatureVectorsTest, Basic) {
std::vector<FixedLenFeature> fixed_len_features;
std::vector<VarLenFeature> var_len_features;
TF_ASSERT_OK(ExampleParserConfigurationProtoToFeatureVectors(
config_proto_, &fixed_len_features, &var_len_features));
ASSERT_EQ(1, fixed_len_features.size());
ASSERT_EQ(1, var_len_features.size());
const FixedLenFeature& f = fixed_len_features[0];
ASSERT_EQ(DT_FLOAT, f.dtype);
ASSERT_EQ("x", f.key);
ASSERT_EQ("ParseExample/ParseExample:3", f.values_output_tensor_name);
TensorShape expected_shape({1});
ASSERT_EQ(expected_shape.dims(), f.shape.dims());
ASSERT_EQ(1, f.shape.dim_size(0));
Tensor expected_default(DT_FLOAT, TensorShape({1}));
test::FillIota<float>(&expected_default, 33.0);
test::ExpectTensorEqual<float>(expected_default, f.default_value);
const VarLenFeature& v = var_len_features[0];
ASSERT_EQ(DT_STRING, v.dtype);
ASSERT_EQ("ParseExample/ParseExample:0", v.indices_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:1", v.values_output_tensor_name);
ASSERT_EQ("ParseExample/ParseExample:2", v.shapes_output_tensor_name);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/example_parser_configuration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/example/example_parser_configuration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3f822d69-6c9f-46ae-ab6f-031ccb5e5af2 | cpp | tensorflow/tensorflow | tensor_coding | tensorflow/core/distributed_runtime/tensor_coding.cc | tensorflow/core/distributed_runtime/tensor_coding_test.cc | #include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace tensorflow {
TensorResponse::Source::~Source() {}
void TensorResponse::Clear() {
on_host_ = false;
device_ = nullptr;
alloc_attrs_ = AllocatorAttributes();
allocator_ = nullptr;
already_used_ = false;
ClearTensor();
}
void TensorResponse::ClearTensor() {
meta_.Clear();
tensor_ = Tensor();
}
void TensorResponse::InitAlloc(DeviceBase* d, const AllocatorAttributes& aa) {
Clear();
device_ = d;
alloc_attrs_ = aa;
const DeviceAttributes& da = d->attributes();
if (alloc_attrs_.on_host() || da.device_type() == "CPU") {
on_host_ = true;
}
allocator_ = device_->GetAllocator(alloc_attrs_);
}
Status TensorResponse::InitFrom(RecvTensorResponse* response) {
Status s;
meta_.Swap(response);
if (on_host_) {
if (!tensor_.FromProto(allocator_, meta_.tensor())) {
s = errors::InvalidArgument("Cannot parse tensor from response");
}
} else {
s = device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_);
}
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return s;
}
void TensorResponse::InitPartial(const RecvTensorResponse& response,
const AllocationAttributes& allocation_attr) {
meta_ = response;
TensorShape shape(meta_.tensor().tensor_shape());
Tensor t(allocator_, meta_.tensor().dtype(), shape, allocation_attr);
tensor_ = std::move(t);
}
Status TensorResponse::ParseFrom(Source* source) {
if (!on_host_) {
protobuf::io::CodedInputStream input(source->contents());
if (!meta_.ParseFromCodedStream(&input) || !input.ConsumedEntireMessage()) {
return errors::InvalidArgument("Cannot parse tensor from response");
}
Status s =
device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_);
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return s;
}
if (already_used_) {
ClearTensor();
}
already_used_ = true;
if (ParseFast(source)) return absl::OkStatus();
meta_.Clear();
if (ParseSlow(source)) return absl::OkStatus();
return errors::InvalidArgument("Cannot parse tensor from response");
}
namespace {
enum WireType {
WIRETYPE_VARINT = 0,
WIRETYPE_LENGTH_DELIMITED = 2,
};
inline int GetTagFieldNumber(uint32 tag) { return tag >> 3; }
inline WireType GetTagWireType(uint32 tag) {
return static_cast<WireType>(tag & 0x7);
}
bool ReadVarintSizeAsInt(protobuf::io::CodedInputStream* input, int* result) {
protobuf_uint64 v;
if (input->ReadVarint64(&v) && v <= static_cast<uint64>(INT_MAX)) {
*result = static_cast<int>(v);
return true;
} else {
return false;
}
}
bool ReadNestedMessage(protobuf::io::CodedInputStream* input,
protobuf::Message* value) {
int length;
if (!ReadVarintSizeAsInt(input, &length)) return false;
std::pair<protobuf::io::CodedInputStream::Limit, int> p =
input->IncrementRecursionDepthAndPushLimit(length);
if (p.second < 0 || !value->MergePartialFromCodedStream(input)) return false;
return input->DecrementRecursionDepthAndPopLimit(p.first);
}
}
bool TensorResponse::ParseTensorSubmessage(
protobuf::io::CodedInputStream* input, TensorProto* tensor_meta) {
bool seen_tensor_content = false;
while (true) {
auto p = input->ReadTagWithCutoff(127);
int tag = GetTagFieldNumber(p.first);
WireType wt = GetTagWireType(p.first);
if (!p.second) {
bool ok = (tag == 0);
if (ok && !seen_tensor_content) {
TensorShape shape(tensor_meta->tensor_shape());
Tensor t(allocator_, tensor_meta->dtype(), shape);
tensor_ = std::move(t);
}
return ok;
}
switch (tag) {
case TensorProto::kDtypeFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input->ReadVarint32(&v)) return false;
if (seen_tensor_content) return false;
tensor_meta->set_dtype(static_cast<DataType>(static_cast<int>(v)));
if (!DataTypeCanUseMemcpy(tensor_meta->dtype())) return false;
break;
}
case TensorProto::kTensorShapeFieldNumber: {
if ((wt != WIRETYPE_LENGTH_DELIMITED) ||
!ReadNestedMessage(input, tensor_meta->mutable_tensor_shape()))
return false;
if (seen_tensor_content) return false;
break;
}
case TensorProto::kVersionNumberFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input->ReadVarint32(&v)) return false;
if (seen_tensor_content) return false;
tensor_meta->set_version_number(static_cast<int32>(v));
break;
}
case TensorProto::kTensorContentFieldNumber: {
if (seen_tensor_content) return false;
if (wt != WIRETYPE_LENGTH_DELIMITED ||
!tensor_meta->has_tensor_shape()) {
return false;
}
int num_bytes;
if (!ReadVarintSizeAsInt(input, &num_bytes)) return false;
seen_tensor_content = true;
TensorShape shape(tensor_meta->tensor_shape());
Tensor t(allocator_, tensor_meta->dtype(), shape);
StringPiece buf = t.tensor_data();
if (static_cast<size_t>(num_bytes) != buf.size()) return false;
if (!input->ReadRaw(const_cast<char*>(buf.data()), num_bytes))
return false;
tensor_ = std::move(t);
break;
}
default: {
return false;
}
}
}
}
bool TensorResponse::ParseFast(Source* source) {
protobuf::io::CodedInputStream input(source->contents());
while (true) {
auto p = input.ReadTagWithCutoff(127);
int tag = GetTagFieldNumber(p.first);
WireType wt = GetTagWireType(p.first);
if (!p.second) {
return (tag == 0);
}
switch (tag) {
case RecvTensorResponse::kTensorFieldNumber: {
if (wt != WIRETYPE_LENGTH_DELIMITED) return false;
int length;
if (!ReadVarintSizeAsInt(&input, &length)) return false;
std::pair<protobuf::io::CodedInputStream::Limit, int> p =
input.IncrementRecursionDepthAndPushLimit(length);
if (p.second < 0 ||
!ParseTensorSubmessage(&input, meta_.mutable_tensor())) {
return false;
}
if (!input.DecrementRecursionDepthAndPopLimit(p.first)) {
return false;
}
break;
}
case RecvTensorResponse::kIsDeadFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) return false;
meta_.set_is_dead(v != 0);
break;
}
case RecvTensorResponse::kSendStartMicrosFieldNumber: {
protobuf_uint64 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) return false;
meta_.set_send_start_micros(static_cast<int64_t>(v));
break;
}
case RecvTensorResponse::kTransportOptionsFieldNumber: {
if ((wt != WIRETYPE_LENGTH_DELIMITED) ||
!ReadNestedMessage(&input, meta_.mutable_transport_options()))
return false;
break;
}
case RecvTensorResponse::kRequireAckFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) return false;
meta_.set_require_ack(v != 0);
break;
}
default: {
return false;
}
}
}
return false;
}
bool TensorResponse::ParseSlow(Source* source) {
if (!meta_.ParseFromZeroCopyStream(source->contents())) {
return false;
}
Tensor parsed(meta_.tensor().dtype());
if (!parsed.FromProto(allocator_, meta_.tensor())) {
return false;
}
tensor_ = std::move(parsed);
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return true;
}
} | #include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {
attr_.set_device_type("CPU");
}
const DeviceAttributes& attributes() const override { return attr_; }
Allocator* GetAllocator(AllocatorAttributes attr) override {
return cpu_allocator();
}
private:
DeviceAttributes attr_;
};
class StringSource : public TensorResponse::Source {
public:
explicit StringSource(const string* s, int block_size)
: s_(s), stream_(nullptr), block_size_(block_size) {}
~StringSource() override { DeleteStream(); }
protobuf::io::ZeroCopyInputStream* contents() override {
DeleteStream();
stream_ = new (&space_)
protobuf::io::ArrayInputStream(s_->data(), s_->size(), block_size_);
return stream_;
}
void DeleteStream() {
if (stream_) {
stream_->~ArrayInputStream();
}
}
private:
const string* s_;
protobuf::io::ArrayInputStream* stream_;
char space_[sizeof(protobuf::io::ArrayInputStream)];
int block_size_;
};
class TensorResponseTest : public ::testing::Test {
public:
void Validate(const Tensor& src, bool is_dead, bool use_tensor_content) {
RecvTensorResponse proto;
proto.set_is_dead(is_dead);
proto.set_send_start_micros(123456);
if (use_tensor_content) {
src.AsProtoTensorContent(proto.mutable_tensor());
} else {
src.AsProtoField(proto.mutable_tensor());
}
string encoded;
proto.AppendToString(&encoded);
StringSource source(&encoded, 1024);
TensorResponse response;
DummyDevice cpu_device(Env::Default());
response.InitAlloc(&cpu_device, AllocatorAttributes());
for (int i = 0; i < 2; i++) {
Status s = response.ParseFrom(&source);
EXPECT_TRUE(s.ok());
const RecvTensorResponse& meta = response.metadata();
EXPECT_EQ(meta.is_dead(), is_dead);
EXPECT_EQ(meta.send_start_micros(), 123456);
const Tensor& result = response.tensor();
EXPECT_EQ(result.dtype(), src.dtype());
EXPECT_EQ(result.shape().DebugString(), src.shape().DebugString());
EXPECT_EQ(result.DebugString(), src.DebugString());
}
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
LOG(ERROR) << "DT: " << static_cast<int>(dt);
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
absl::InlinedVector<tstring, 4UL> v;
LOG(ERROR) << "DT: string";
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(TensorResponseTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(TensorResponseTest, StringTensor) { DoTestForStrings(DT_STRING); }
string MakeFloatTensorTestCase(int num_elems) {
std::vector<int8> v(num_elems);
for (int i = 0; i < num_elems; i++) {
v[i] = i % 10;
}
Tensor src(DT_INT8, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<int8>(&src, v);
RecvTensorResponse proto;
proto.set_is_dead(false);
proto.set_send_start_micros(123456);
src.AsProtoTensorContent(proto.mutable_tensor());
string encoded;
proto.AppendToString(&encoded);
return encoded;
}
static void BM_TensorResponse(::testing::benchmark::State& state) {
const int arg = state.range(0);
string encoded = MakeFloatTensorTestCase(arg);
DummyDevice cpu_device(Env::Default());
size_t bytes = 0;
for (auto i : state) {
TensorResponse response;
response.InitAlloc(&cpu_device, AllocatorAttributes());
StringSource source(&encoded, -1);
Status s = response.ParseFrom(&source);
bytes = response.tensor().TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorResponse)->Arg(0)->Arg(1000)->Arg(100000);
static void BM_TensorViaTensorProto(::testing::benchmark::State& state) {
const int arg = state.range(0);
std::string encoded = MakeFloatTensorTestCase(arg);
size_t bytes = 0;
for (auto s : state) {
RecvTensorResponse r;
r.ParseFromString(encoded);
Tensor t;
CHECK(t.FromProto(r.tensor()));
bytes = t.TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorViaTensorProto)->Arg(0)->Arg(1000)->Arg(100000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/tensor_coding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/tensor_coding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
46384a61-f663-42db-afe4-785b596082c0 | cpp | tensorflow/tensorflow | platform_strings | tensorflow/core/platform/platform_strings.cc | tensorflow/core/platform/platform_strings_test.cc | #include "tensorflow/core/platform/platform_strings.h"
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>
namespace tensorflow {
int GetPlatformStrings(const std::string& path,
std::vector<std::string>* found) {
int result;
FILE* ifp = fopen(path.c_str(), "rb");
if (ifp != nullptr) {
static const char prefix[] = TF_PLAT_STR_MAGIC_PREFIX_;
int first_char = prefix[1];
int last_char = -1;
int c;
while ((c = getc(ifp)) != EOF) {
if (c == first_char && last_char == 0) {
int i = 2;
while (prefix[i] != 0 && (c = getc(ifp)) == prefix[i]) {
i++;
}
if (prefix[i] == 0) {
std::string str;
while ((c = getc(ifp)) != EOF && c != 0) {
str.push_back(c);
}
if (!str.empty()) {
found->push_back(str);
}
}
}
last_char = c;
}
result = (ferror(ifp) == 0) ? 0 : errno;
if (fclose(ifp) != 0) {
result = errno;
}
} else {
result = errno;
}
return result;
}
} | #include "tensorflow/core/platform/platform_strings.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/str_util.h"
TF_PLATFORM_STRINGS()
typedef std::vector<std::string> string_vec;
static int PrintStrings(const std::string file_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(file_name, &str)) {
for (int i = 0; i != str.size(); i++) {
printf("%s\n", str[i].c_str());
}
} else {
perror(file_name.c_str());
rc = 2;
}
return rc;
}
static bool GetValue(const string_vec &str, const std::string ¯o_name,
std::string *pvalue) {
std::string nam_eq = macro_name + "=";
int i = 0;
while (i != str.size() && !absl::StartsWith(str[i], nam_eq)) {
i++;
}
bool found = (i != str.size());
if (found) {
*pvalue = str[i].substr(nam_eq.size());
}
return found;
}
static void CheckStr(const string_vec &str, const std::string ¯o_name,
const std::string &value) {
std::string value_from_str;
if (GetValue(str, macro_name, &value_from_str)) {
if (value != value_from_str) {
LOG(ERROR) << "===== value=" << value
<< " value_from_str=" << value_from_str;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, value_from_str) << " " << macro_name << ": bad value";
} else {
if (value != macro_name) {
LOG(ERROR) << "===== value=" << value << " macro_name=" << macro_name;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, macro_name) << " " << macro_name << ": not found in binary";
}
}
#define AS_STR_1_(x) #x
#define AS_STR(x) AS_STR_1_(x)
static int RunTest(const std::string &binary_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(binary_name, &str)) {
CheckStr(str, "__linux__", AS_STR(__linux__));
CheckStr(str, "_WIN32", AS_STR(_WIN32));
CheckStr(str, "__APPLE__", AS_STR(__APPLE__));
CheckStr(str, "__x86_64__", AS_STR(__x86_64__));
CheckStr(str, "__aarch64__", AS_STR(__aarch64__));
CheckStr(str, "__powerpc64__", AS_STR(__powerpc64__));
CheckStr(str, "TF_PLAT_STR_VERSION", TF_PLAT_STR_VERSION_);
} else {
perror(binary_name.c_str());
rc = 2;
}
return rc;
}
int main(int argc, char *argv[]) {
tensorflow::Env *env = tensorflow::Env::Default();
static const char usage[] = "usage: platform_strings_test [file...]";
int rc = 0;
tensorflow::port::InitMain(usage, &argc, &argv);
if (argc == 1) {
printf("rc=%d\n", PrintStrings(env->GetExecutablePath()));
rc = RunTest(env->GetExecutablePath());
} else {
for (int argn = 1; argn != argc; argn++) {
rc |= PrintStrings(argv[argn]);
}
}
return rc;
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/platform_strings.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/platform_strings_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d8ed00a3-7513-4797-8a12-6dc6dfb3fefe | cpp | tensorflow/tensorflow | enable_tf2_utils | tensorflow/core/platform/enable_tf2_utils.cc | tensorflow/core/platform/enable_tf2_utils_test.cc | #include "tensorflow/core/platform/enable_tf2_utils.h"
#include <atomic>
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
enum Enablement : uint8 { kFalse = 0, kTrue = 1, undefined = 2 };
static std::atomic<Enablement> tf2_enabled{undefined};
void set_tf2_execution(bool enabled) {
tf2_enabled = (enabled) ? Enablement::kTrue : Enablement::kFalse;
}
bool tf2_execution_enabled() {
if (tf2_enabled == Enablement::undefined) {
static bool tf2_behavior_env_enabled = [] {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
return tf2_env != "0";
}();
tf2_enabled =
(tf2_behavior_env_enabled) ? Enablement::kTrue : Enablement::kFalse;
}
return tf2_enabled;
}
} | #include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
TEST(TF2EnabledTest, enabled_behavior) {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
bool expected = (tf2_env != "0");
EXPECT_EQ(tensorflow::tf2_execution_enabled(), expected);
tensorflow::set_tf2_execution(true);
EXPECT_TRUE(tensorflow::tf2_execution_enabled());
tensorflow::set_tf2_execution(false);
EXPECT_FALSE(tensorflow::tf2_execution_enabled());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/enable_tf2_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/enable_tf2_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc8e519b-c16d-49d3-97a5-72a90d4fd5f7 | cpp | tensorflow/tensorflow | subgraph | tensorflow/lite/delegates/gpu/common/selectors/subgraph.cc | tensorflow/lite/core/subgraph_test.cc | #include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
int GPUOperationsSubgraph::AddTensor(const TensorDescriptor& desc) {
new_tensors.push_back(desc);
return -new_tensors.size();
}
int GPUOperationsSubgraph::AddTensor(const BHWC& shape,
const TensorDescriptor& desc) {
TensorDescriptor desc_with_shape = desc;
desc_with_shape.SetBHWCShape(shape);
return AddTensor(desc_with_shape);
}
int GPUOperationsSubgraph::AddTensor(const OHWI& shape,
const TensorDescriptor& desc) {
const BHWC shape_as_bhwc(shape.o, shape.h, shape.w, shape.i);
return AddTensor(shape_as_bhwc, desc);
}
std::unique_ptr<GPUOperation>* InitSingleOpSubgraph(
const std::vector<Value*>& inputs, const std::vector<Value*>& outputs,
GPUOperationsSubgraph* gpu_subgraph) {
gpu_subgraph->operations.clear();
gpu_subgraph->new_tensors.clear();
gpu_subgraph->operations.push_back({});
for (int i = 0; i < inputs.size(); ++i) {
gpu_subgraph->operations[0].input_ids.push_back(inputs[i]->id);
}
for (int i = 0; i < outputs.size(); ++i) {
gpu_subgraph->operations[0].output_ids.push_back(outputs[i]->id);
}
return &gpu_subgraph->operations[0].operation;
}
}
} | #include "tensorflow/lite/core/subgraph.h"
#include <algorithm>
#include <cstddef>
#include <functional>
#include <memory>
#include <numeric>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_PADV2();
TfLiteRegistration* Register_NEG();
}
}
namespace {
using testing::ElementsAreArray;
using testing::Not;
TEST(RemoveUnusedInputs, NothingToRemove) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(4);
subgraph.SetInputs({0, 1});
subgraph.SetOutputs({3});
TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
subgraph.AddNodeWithParameters({0, 1}, {2}, {}, nullptr, 0, nullptr, pad_op);
subgraph.AddNodeWithParameters({2}, {3}, {}, nullptr, 0, nullptr, neg_op);
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({0, 1}));
}
TEST(RemoveUnusedInputs, HasUnusedInputs) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(4);
subgraph.SetInputs({0, 1, 2});
subgraph.SetOutputs({3});
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
subgraph.AddNodeWithParameters({2}, {3}, {}, nullptr, 0, nullptr, neg_op);
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({-1, -1, 2}));
}
TEST(RemoveUnusedInputs, BypassInputsWithoutOp) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
subgraph.AddTensors(3);
subgraph.SetInputs({0, 1, 2});
subgraph.SetOutputs({0, 2});
ASSERT_EQ(subgraph.RemoveUnusedInputs(), kTfLiteOk);
ASSERT_EQ(subgraph.inputs(), std::vector<int>({0, -1, 2}));
}
TEST(GetSubgraphContext, NonConstGetSubgraphContext) {
Interpreter interpreter;
auto& subgraph = interpreter.primary_subgraph();
TfLiteContext* context = nullptr;
EXPECT_EQ(kTfLiteError, subgraph.AcquireSubgraphContext(-1, &context));
ASSERT_EQ(context, nullptr);
EXPECT_EQ(kTfLiteError, subgraph.AcquireSubgraphContext(1, &context));
ASSERT_EQ(context, nullptr);
EXPECT_EQ(kTfLiteOk, subgraph.AcquireSubgraphContext(0, &context));
ASSERT_NE(context, nullptr);
EXPECT_EQ(kTfLiteOk, subgraph.ReleaseSubgraphContext(0));
}
TEST(MarkSubgraphAsDelegationSkippable, MarkSubgraphAsDelegationSkippable) {
static StderrReporter* error_reporter = new StderrReporter;
std::vector<std::unique_ptr<Subgraph>> subgraphs;
for (int i = 0; i < 2; ++i) {
subgraphs.emplace_back(new Subgraph(error_reporter,
nullptr,
&subgraphs,
nullptr,
nullptr,
nullptr,
i));
}
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(0), kTfLiteError);
ASSERT_FALSE(subgraphs[0]->IsDelegationSkippable());
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(2), kTfLiteError);
ASSERT_EQ(subgraphs[0]->MarkSubgraphAsDelegationSkippable(1), kTfLiteOk);
ASSERT_TRUE(subgraphs[1]->IsDelegationSkippable());
}
size_t BytesFor(const TfLiteType type, const int* const data,
const size_t size) {
size_t type_size;
CHECK_EQ(GetSizeOfType(nullptr, type, &type_size), kTfLiteOk)
<< "Type is not supported by GetSizeOfType";
return std::accumulate(data, data + size, type_size, std::multiplies<int>());
}
size_t BytesFor(const TfLiteType type, const TfLiteIntArray& dims) {
return BytesFor(type, dims.data, dims.size);
}
size_t BytesFor(const TfLiteType type, const std::vector<int>& dims) {
return BytesFor(type, dims.data(), dims.size());
}
class SubgraphResizeTensorTest : public testing::Test {
public:
SubgraphResizeTensorTest() {
tensor_.type = type_;
tensor_.allocation_type = kTfLiteDynamic;
}
~SubgraphResizeTensorTest() override { TfLiteTensorFree(&tensor_); }
protected:
const TfLiteType type_ = kTfLiteInt32;
Interpreter interpreter_;
TfLiteContext& context_ = *interpreter_.primary_subgraph().context();
const std::vector<int> reference_shape_ = {5, 4, 3};
const size_t reference_dims_bytes_ = BytesFor(type_, reference_shape_);
TfLiteTensor tensor_ = {};
TfLiteIntArray* dims_ = ConvertVectorToTfLiteIntArray(reference_shape_);
};
TEST_F(SubgraphResizeTensorTest, ResizeEmptyDynamicTensorAllocateData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeEmptyDynamicTensorWithStoredShapeAllocatesData) {
tensor_.dims = dims_;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest, ResizeDynamicTensorWithTheEqualShapeIsANoop) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
const void* const initial_data = tensor_.data.data;
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray(reference_shape_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims2);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_GE(tensor_.data.data, initial_data);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest, ResizeDynamicTensorWithStoredShapeIsANoop) {
tensor_.dims = dims_;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
const void* const initial_data = tensor_.data.data;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_GE(tensor_.data.data, initial_data);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithEquivalentBufferSizeIsANoop) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
const void* const initial_data = tensor_.data.data;
const std::vector<int> new_shape = {3, 4, 5};
ASSERT_THAT(new_shape, Not(ElementsAreArray(reference_shape_)));
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray(new_shape);
ASSERT_EQ(BytesFor(type_, *dims2), reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_EQ(tensor_.data.data, initial_data);
EXPECT_EQ(tensor_.dims, dims2);
std::fill_n(tensor_.data.raw, reference_dims_bytes_, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithDifferentShapeReallocatesData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
TfLiteIntArray* dims2 = ConvertVectorToTfLiteIntArray({5, 4, 6});
const int dims2_bytes = BytesFor(type_, *dims2);
ASSERT_NE(dims2_bytes, reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims2), kTfLiteOk);
EXPECT_GE(tensor_.bytes, dims2_bytes);
EXPECT_EQ(tensor_.dims, dims2);
std::fill_n(tensor_.data.raw, dims2_bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButDifferentBytesReallocatesData) {
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, dims_), kTfLiteOk);
TfLiteTensorResizeMaybeCopy(tensor_.bytes + 15, &tensor_,
true);
ASSERT_GT(tensor_.bytes, reference_dims_bytes_);
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_GE(tensor_.bytes, reference_dims_bytes_);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButStringTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteString;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButRessourceTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteResource;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
TEST_F(SubgraphResizeTensorTest,
ResizeDynamicTensorWithSameShapeButVariantTypeSizeReallocatesData) {
constexpr size_t manual_bytes = 10;
TfLiteTensorResizeMaybeCopy(manual_bytes, &tensor_, true);
tensor_.dims = dims_;
tensor_.type = kTfLiteVariant;
ASSERT_EQ(context_.ResizeTensor(&context_, &tensor_, tensor_.dims),
kTfLiteOk);
EXPECT_EQ(tensor_.dims, dims_);
std::fill_n(tensor_.data.raw, tensor_.bytes, 0);
std::fill_n(tensor_.dims->data, tensor_.dims->size, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/selectors/subgraph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/subgraph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b694c9ed-3864-4e50-b8e1-72fee39e53f7 | cpp | tensorflow/tensorflow | optimizer_cse | tensorflow/core/graph/optimizer_cse.cc | tensorflow/core/graph/optimizer_cse_test.cc | #include "tensorflow/core/graph/optimizer_cse.h"
#include <iostream>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
class OptimizerCSE {
public:
explicit OptimizerCSE(Graph* g) : g_(g) {}
bool Optimize(const std::function<bool(const Node*)>& consider_fn);
private:
static size_t NodeHash(const Node* n);
static bool Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch);
Graph* g_;
};
static void FillInputs(
const Node* n, absl::InlinedVector<const Node*, 4UL>* control_edges,
absl::InlinedVector<std::pair<const Node*, int>, 4UL>* in) {
DCHECK_EQ(in->size(), n->num_inputs());
control_edges->clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_edges->push_back(e->src());
} else {
(*in)[e->dst_input()] = std::make_pair(e->src(), e->src_output());
}
}
std::sort(control_edges->begin(), control_edges->end());
if (n->op_def().is_commutative()) {
std::sort(in->begin(), in->end());
}
}
static size_t kIllegalNodeHash = 0;
class Hasher {
public:
uint64 hash() { return h_ == kIllegalNodeHash ? kIllegalNodeHash + 1 : h_; }
void MixString(const string& s) { h_ = Hash64(s.data(), s.size(), h_); }
void MixInteger(size_t z) { h_ = Hash64Combine(h_, z); }
void MixProto(const protobuf::MessageLite& msg) {
msg.ByteSizeLong();
HashingOutputStream hasher;
{
protobuf::io::CodedOutputStream stream(&hasher);
stream.EnableAliasing(true);
stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&stream);
}
h_ = Hash64Combine(h_, hasher.hash());
}
private:
class HashingOutputStream : public protobuf::io::ZeroCopyOutputStream {
public:
static constexpr size_t kBufSize = 228;
static constexpr uint64 kDefaultSeed = 2570847921467975139ULL;
bool Next(void** data, int* size) override {
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
*data = buf_;
*size = kBufSize;
} else {
*data = buf_ + i_;
*size = kBufSize - i_;
}
i_ = kBufSize;
return true;
}
void BackUp(int count) override { i_ -= count; }
int64_t ByteCount() const override { return byte_count_; }
bool WriteAliasedRaw(const void* void_data, int size) override {
const char* data = static_cast<const char*>(void_data);
const auto remaining = kBufSize - i_;
if (remaining > 0) {
if (size < remaining) {
memcpy(buf_ + i_, data, size);
i_ += size;
return true;
}
memcpy(buf_ + i_, data, remaining);
i_ = kBufSize;
data += remaining;
size -= remaining;
}
if (i_ == kBufSize) {
Mix(buf_, kBufSize);
i_ = 0;
}
while (size >= kBufSize) {
Mix(data, kBufSize);
data += kBufSize;
size -= kBufSize;
}
memcpy(buf_, data, size);
i_ = size;
return true;
}
bool AllowsAliasing() const override { return true; }
uint64 hash() {
if (i_ != 0) {
Mix(buf_, i_);
i_ = 0;
}
return h_;
}
private:
void Mix(const char* p, size_t n) {
byte_count_ += n;
h_ = Hash64(p, n, h_);
}
char buf_[kBufSize];
int i_ = 0;
int64_t byte_count_ = 0;
uint64 h_ = kDefaultSeed;
};
uint64 h_ = HashingOutputStream::kDefaultSeed;
};
size_t OptimizerCSE::NodeHash(const Node* n) {
Hasher hasher;
hasher.MixString(n->type_string());
hasher.MixInteger(n->output_types().size());
for (DataType dt : n->output_types()) {
hasher.MixInteger(dt);
}
hasher.MixInteger(n->num_inputs());
absl::InlinedVector<const Node*, 4UL> control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> in(n->num_inputs());
FillInputs(n, &control_edges, &in);
for (const auto& edge : in) {
hasher.MixInteger(edge.first->id());
hasher.MixInteger(edge.second);
}
#if !defined(__ANDROID__)
size_t attr_hashes = 0;
for (const auto& attr : n->attrs()) {
Hasher h;
h.MixString(attr.first);
h.MixProto(attr.second);
attr_hashes = Hash64CombineUnordered(attr_hashes, h.hash());
}
hasher.MixInteger(attr_hashes);
#endif
return hasher.hash();
}
static bool HasRefInput(const Node* n) {
for (auto dt : n->input_types()) {
if (IsRefType(dt)) return true;
}
return false;
}
bool OptimizerCSE::Equivalent(const Node* a, const Node* b,
AttrSlice::Scratch* scratch) {
if (a->type_string() != b->type_string()) return false;
if (a->op_def().is_stateful()) return false;
if (HasRefInput(a) || HasRefInput(b)) return false;
if (!a->attrs().EqualAttrs(b->attrs(), scratch)) return false;
if (a->num_inputs() != b->num_inputs()) return false;
const int N_in = a->num_inputs();
absl::InlinedVector<const Node*, 4UL> a_control_edges;
absl::InlinedVector<const Node*, 4UL> b_control_edges;
absl::InlinedVector<std::pair<const Node*, int>, 4UL> a_in(N_in);
absl::InlinedVector<std::pair<const Node*, int>, 4UL> b_in(N_in);
FillInputs(a, &a_control_edges, &a_in);
FillInputs(b, &b_control_edges, &b_in);
if (a_in != b_in) return false;
if (a_control_edges != b_control_edges) return false;
return true;
}
bool OptimizerCSE::Optimize(
const std::function<bool(const Node*)>& consider_fn) {
std::vector<Node*> order;
GetReversePostOrder(*g_, &order, NodeComparatorID());
std::unordered_map<size_t, Node*> available;
bool changed = false;
AttrSlice::Scratch scratch;
for (Node* n : order) {
if (!n->IsOp()) continue;
if (n->type_string() == "Placeholder" ||
n->type_string() == "PlaceholderV2" ||
n->type_string() == "PlaceholderWithDefault") {
continue;
}
if (consider_fn != nullptr && !consider_fn(n)) continue;
size_t h = NodeHash(n);
Node** candidate = &available[h];
if (*candidate == nullptr) {
*candidate = n;
} else if (Equivalent(*candidate, n, &scratch)) {
VLOG(1) << "CSE: equivalent: " << (*candidate)->name() << " and "
<< n->name();
for (const Edge* e : n->out_edges()) {
g_->AddEdge(*candidate, e->src_output(), e->dst(), e->dst_input());
}
MergeDebugInfo(NodeDebugInfo(*n), *candidate);
g_->RemoveNode(n);
changed = true;
}
}
return changed;
}
bool OptimizeCSE(Graph* g,
const std::function<bool(const Node*)>& consider_fn) {
OptimizerCSE opt(g);
return opt.Optimize(consider_fn);
}
} | #include "tensorflow/core/graph/optimizer_cse.h"
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
class OptimizerCSETest : public ::testing::Test {
public:
OptimizerCSETest() : graph_(OpRegistry::Global()) {}
void InitGraph(const string& s) {
::tensorflow::InitGraph(s, &graph_);
original_ = CanonicalGraphString(&graph_);
}
static bool IncludeNode(const Node* n) { return n->IsOp(); }
static string EdgeId(const Node* n, int index) {
if (index == 0) {
return n->name();
} else if (index == Graph::kControlSlot) {
return strings::StrCat(n->name(), ":control");
} else {
return strings::StrCat(n->name(), ":", index);
}
}
string CanonicalGraphString(Graph* g) {
std::vector<string> nodes;
std::vector<string> edges;
for (const Node* n : g->nodes()) {
if (IncludeNode(n)) {
nodes.push_back(strings::StrCat(n->name(), "(", n->type_string(), ")"));
}
}
for (const Edge* e : g->edges()) {
if (IncludeNode(e->src()) && IncludeNode(e->dst())) {
edges.push_back(strings::StrCat(EdgeId(e->src(), e->src_output()), "->",
EdgeId(e->dst(), e->dst_input())));
}
}
std::sort(nodes.begin(), nodes.end());
std::sort(edges.begin(), edges.end());
return strings::StrCat(absl::StrJoin(nodes, ";"), "|",
absl::StrJoin(edges, ";"));
}
string DoCSE(const std::function<bool(const Node*)>& consider_fn = nullptr) {
string before = CanonicalGraphString(&graph_);
LOG(ERROR) << "Before rewrites: " << before;
OptimizeCSE(&graph_, consider_fn);
string result = CanonicalGraphString(&graph_);
LOG(ERROR) << "After rewrites: " << result;
return result;
}
const string& OriginalGraph() const { return original_; }
Graph graph_;
string original_;
};
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST_F(OptimizerCSETest, Simple) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_ThreeEquivalent) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_WithFixups) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['C', 'D'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul);E(Mul)|"
"A->C;B->C:1;C->E;C->E:1");
}
TEST_F(OptimizerCSETest, Simple_Commutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
static bool IsNotMultiply(const Node* n) { return n->type_string() != "Mul"; }
TEST_F(OptimizerCSETest, Simple_Filtered) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(IsNotMultiply), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_NotCommutative) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['B', 'A'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Ops) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Sub' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] attr { key: 'shape'"
" value { shape: { dim: { size: 37 name: 'SAME_NAME' } } } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, Simple_SameOps_SameAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(),
"A(Input);B(Input);C(Mul)|"
"A->C;B->C:1");
}
TEST_F(OptimizerCSETest, SameConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);D(Mul)|"
"A->D;A->D:1");
}
TEST_F(OptimizerCSETest, DifferentConstants) {
InitGraph(
"node { name: 'A' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 0 } } } }"
"node { name: 'B' op: 'Const' "
" attr { key: 'dtype' value { type: DT_INT32 } }"
" attr { key: 'value' value {"
" tensor { dtype: DT_INT32 tensor_shape { dim { size: 1 } } "
" int_val: 100000 } } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_INT32 } }"
" input: ['A', 'B'] }");
EXPECT_EQ(DoCSE(),
"A(Const);B(Const);D(Mul)|"
"A->D;B->D:1");
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs1) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_INT32 } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 4 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, SameOps_DifferentAttrs2) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 'a' value { i: 3 } }"
" attr { key: 't' value { type: DT_FLOAT } } }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B']"
" attr { key: 't' value { type: DT_INT32 } }"
" attr { key: 'a' value { i: 3 } } }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, NotEquivalent_Inputs) {
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Input'}"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'E' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'C'] }");
EXPECT_EQ(DoCSE(), OriginalGraph());
}
TEST_F(OptimizerCSETest, Constant_Dedup) {
Tensor a(DT_FLOAT, TensorShape({1}));
a.flat<float>()(0) = 1.0;
Tensor b(DT_DOUBLE, TensorShape({1}));
b.flat<double>()(0) = 1.0;
Tensor c(DT_FLOAT, TensorShape({1, 1}));
c.flat<float>()(0) = 1.0;
Tensor d(DT_FLOAT, TensorShape({1}));
d.flat<float>()(0) = 2.0;
Graph g(OpRegistry::Global());
for (const auto& val : {a, b, c, d, d, c, b, a}) {
test::graph::Constant(&g, val);
}
GraphDef gdef;
test::graph::ToGraphDef(&g, &gdef);
InitGraph(tsl::LegacyUnredactedDebugString(gdef));
EXPECT_EQ(OriginalGraph(),
"n/_0(Const);n/_1(Const);n/_2(Const);n/_3(Const);"
"n/_4(Const);n/_5(Const);n/_6(Const);n/_7(Const)|");
std::vector<string> nodes = str_util::Split(DoCSE(), ";|");
std::set<string> node_set(nodes.begin(), nodes.end());
EXPECT_EQ(node_set.count("n/_0(Const)") + node_set.count("n/_7(Const)"), 1);
EXPECT_EQ(node_set.count("n/_1(Const)") + node_set.count("n/_6(Const)"), 1);
EXPECT_EQ(node_set.count("n/_2(Const)") + node_set.count("n/_5(Const)"), 1);
EXPECT_EQ(node_set.count("n/_3(Const)") + node_set.count("n/_4(Const)"), 1);
}
void BM_CSE(::testing::benchmark::State& state) {
const int op_nodes = state.range(0);
string s;
for (int in = 0; in < 10; in++) {
s += strings::Printf("node { name: 'in%04d' op: 'Input'}", in);
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int op = 0; op < op_nodes; op++) {
s += strings::Printf(
"node { name: 'op%04d' op: 'Mul' attr { key: 'T' value { "
"type: DT_FLOAT } } input: ['in%04d', 'in%04d' ] }",
op, rnd.Uniform(10), rnd.Uniform(10));
}
bool first = true;
for (auto i : state) {
state.PauseTiming();
Graph* graph = new Graph(OpRegistry::Global());
InitGraph(s, graph);
int N = graph->num_node_ids();
if (first) {
state.SetLabel(strings::StrCat("Per graph node. Nodes: ", N));
first = false;
}
{
state.ResumeTiming();
OptimizeCSE(graph, nullptr);
state.PauseTiming();
}
delete graph;
state.ResumeTiming();
}
}
BENCHMARK(BM_CSE)->Arg(1000)->Arg(10000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/optimizer_cse.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/optimizer_cse_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52bb704e-dee6-462a-9567-d84486186958 | cpp | tensorflow/tensorflow | graph_debug_info_builder | tensorflow/core/graph/graph_debug_info_builder.cc | tensorflow/core/graph/graph_debug_info_builder_test.cc | #include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tsl/platform/path.h"
namespace tensorflow {
static const char* kFilenameToIgnorePrefix = "<embedded";
std::string StackFrameToString(const StackFrame& frame,
int shared_prefix_length) {
std::string out = absl::StrFormat(
"File \"%s\", line %d, in %s",
absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)
? frame.file_name
: frame.file_name.substr(shared_prefix_length),
frame.line_number, frame.function_name);
return out;
}
std::string ToStringHelper(absl::Span<const StackFrame> stack_frames,
int shared_prefix_length) {
return absl::StrJoin(
stack_frames, "\n", [&](std::string* out, const StackFrame& frame) {
absl::StrAppend(out, StackFrameToString(frame, shared_prefix_length));
});
}
FrozenStackTrace::FrozenStackTrace(absl::Span<StackFrame const> frames,
absl::Span<StackFrame const> user_frames)
: frames_(frames.begin(), frames.end()),
user_frames_(user_frames.begin(), user_frames.end()) {
if (user_frames.empty()) {
user_frames_ = frames_;
}
}
FrozenStackTrace::FrozenStackTrace(
const GraphDebugInfo::StackTrace& stack_trace,
const GraphDebugInfo& debug_info) {
auto push_frame = [this,
&debug_info](const GraphDebugInfo::FileLineCol& frame) {
int file_index = frame.file_index();
std::string file_name =
(file_index >= 0 && file_index < debug_info.files_size())
? debug_info.files(file_index)
: "<UNKNOWN_FILE_NAME>";
frames_.push_back(StackFrame(file_name, frame.line(), frame.func()));
};
if (!stack_trace.file_line_cols().empty()) {
for (const GraphDebugInfo::FileLineCol& frame :
stack_trace.file_line_cols()) {
push_frame(frame);
}
} else {
for (const uint64_t frame_id : stack_trace.frame_id()) {
if (debug_info.frames_by_id().contains(frame_id)) {
push_frame(debug_info.frames_by_id().at(frame_id));
} else {
LOG_FIRST_N(ERROR, 5) << "No matching frame for id:" << frame_id;
}
}
}
}
absl::Span<StackFrame const> FrozenStackTrace::ToFrames() const {
return frames_;
}
std::vector<StackFrame> FrozenStackTrace::ToUncachedFrames() const {
return frames_;
}
StackFrame FrozenStackTrace::LastUserFrame() const { return frames_.back(); }
std::vector<StackFrame> FrozenStackTrace::GetUserFrames(int limit) const {
std::vector<StackFrame> result;
if (limit < 0 || limit > user_frames_.size()) {
limit = user_frames_.size();
}
result.reserve(limit);
for (int i = 0; i < limit; ++i) {
result.push_back(user_frames_[i]);
}
return result;
}
std::string FrozenStackTrace::ToString(const TracePrintingOptions& opts) const {
int shared_prefix_length = 0;
if (opts.filter_common_prefix) {
std::vector<std::string> prefix_file_names;
for (const StackFrame& frame : frames_) {
if (!absl::StrContains(frame.file_name, kFilenameToIgnorePrefix)) {
prefix_file_names.push_back(frame.file_name);
}
}
shared_prefix_length = tsl::io::CommonPathPrefix(prefix_file_names).size();
}
if (!opts.drop_internal_frames) {
return ToStringHelper(frames_, shared_prefix_length);
}
std::vector<StackFrame> non_internal_frames;
for (const StackFrame& frame : frames_) {
if (!IsInternalFrameForFilename(frame.file_name)) {
non_internal_frames.push_back(frame);
}
}
return ToStringHelper(non_internal_frames, shared_prefix_length);
}
GraphDebugInfoBuilder::GraphDebugInfoBuilder()
: debug_info_(std::make_unique<GraphDebugInfo>()) {}
void GraphDebugInfoBuilder::AccumulateStackTracesMap(
const StackTracesMap& stack_traces_map, absl::string_view key_suffix,
const GraphDebugInfoBuilder::Options& options) {
trace_to_index_.reserve(trace_to_index_.size() + stack_traces_map.size());
for (const auto& [node_name, stack_trace] : stack_traces_map) {
if (stack_trace == nullptr) continue;
std::string trace_key = absl::StrCat(node_name, key_suffix);
AccumulateStackTrace(stack_trace, trace_key, options);
}
}
void GraphDebugInfoBuilder::AccumulateStackTrace(
std::shared_ptr<AbstractStackTrace> trace, absl::string_view traces_key,
const GraphDebugInfoBuilder::Options& options) {
int trace_index = 0;
StackTracePointer p{trace};
auto found = trace_to_index_.find(p);
if (found != trace_to_index_.end()) {
trace_index = found->second;
} else {
trace_index = debug_info_->traces_by_id().size();
trace_to_index_[p] = trace_index;
GraphDebugInfo::StackTrace& stack_trace_proto =
(*debug_info_->mutable_traces_by_id())[trace_index];
if (options.user_frames) {
frame_to_index_.reserve(
frame_to_index_.size() +
trace->GetUserFrames(options.user_frames_limit).size());
for (const auto& stack_frame :
trace->GetUserFrames(options.user_frames_limit)) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
} else {
frame_to_index_.reserve(frame_to_index_.size() +
trace->ToFrames().size());
for (const auto& stack_frame : trace->ToFrames()) {
AppendToStackTraceProto(stack_frame, stack_trace_proto);
}
}
}
(*debug_info_->mutable_name_to_trace_id())[traces_key] = trace_index;
}
void GraphDebugInfoBuilder::AppendToStackTraceProto(
const StackFrame& stack_frame,
GraphDebugInfo::StackTrace& stack_trace_proto) {
int frame_index = 0;
auto found = frame_to_index_.find(stack_frame);
if (found != frame_to_index_.end()) {
frame_index = found->second;
} else {
frame_index = debug_info_->frames_by_id().size();
frame_to_index_[stack_frame] = frame_index;
GraphDebugInfo::FileLineCol& frame =
(*debug_info_->mutable_frames_by_id())[frame_index];
auto file_index = file_name_to_index_.find(stack_frame.file_name);
if (file_index != file_name_to_index_.end()) {
frame.set_file_index(file_index->second);
} else {
frame.set_file_index(new_name_index_);
file_name_to_index_[stack_frame.file_name] = new_name_index_;
*debug_info_->add_files() = stack_frame.file_name;
new_name_index_++;
}
frame.set_line(stack_frame.line_number);
frame.set_func(stack_frame.function_name);
}
stack_trace_proto.add_frame_id(frame_index);
}
void GraphDebugInfoBuilder::AppendGraphDebugInfo(
absl::string_view prefix, const GraphDebugInfo& new_info) {
for (const auto& pair : new_info.name_to_trace_id()) {
auto trace = new_info.traces_by_id().at(pair.second);
auto frozen = std::make_shared<FrozenStackTrace>(trace, new_info);
std::string key =
prefix.empty() ? pair.first : absl::StrCat(pair.first, "@", prefix);
AccumulateStackTrace(frozen, key, GraphDebugInfoBuilder::Options{});
}
}
GraphDebugInfo GraphDebugInfoBuilder::Build() const { return *debug_info_; }
absl::Status GraphDebugInfoBuilder::AppendGraphDebugInfoStr(
absl::string_view prefix, absl::string_view new_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(new_info_str.data(), new_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
AppendGraphDebugInfo(prefix, debug_info);
return absl::OkStatus();
}
std::string GraphDebugInfoBuilder::ToGraphDebugInfoStr() const {
return Build().SerializeAsString();
}
StackTracesMap LoadTracesFromDebugInfo(const GraphDebugInfo& debug_info) {
StackTracesMap traces;
absl::flat_hash_map<uint64_t, std::shared_ptr<AbstractStackTrace>>
traces_by_id;
traces_by_id.reserve(debug_info.traces_by_id_size());
for (const auto& [id, frames] : debug_info.traces_by_id()) {
traces_by_id[id] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
traces.reserve(debug_info.name_to_trace_id_size() + debug_info.traces_size());
for (const auto& [name, trace_id] : debug_info.name_to_trace_id()) {
if (!traces_by_id.contains(trace_id)) {
LOG_FIRST_N(ERROR, 5) << "No matching trace for id:" << trace_id;
continue;
}
traces[name] = traces_by_id[trace_id];
}
for (const auto& [name, frames] : debug_info.traces()) {
traces[name] = std::make_shared<FrozenStackTrace>(frames, debug_info);
}
return traces;
}
absl::StatusOr<StackTracesMap> LoadTracesFromDebugInfoStr(
absl::string_view debug_info_str) {
GraphDebugInfo debug_info;
if (!debug_info.ParseFromArray(debug_info_str.data(),
debug_info_str.size())) {
return absl::InvalidArgumentError("Failed to parse GraphDebugInfo proto.");
}
return LoadTracesFromDebugInfo(debug_info);
}
GraphDebugInfo StackTracesMapToGraphDebugInfo(const StackTracesMap& map,
bool user_frames) {
GraphDebugInfoBuilder builder;
GraphDebugInfoBuilder::Options options;
options.user_frames = user_frames;
options.user_frames_limit = -1;
builder.AccumulateStackTracesMap(map, "", options);
return builder.Build();
}
} | #include "tensorflow/core/graph/graph_debug_info_builder.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
using ::testing::Ne;
using ::testing::UnorderedElementsAre;
class TestStackTrace : public AbstractStackTrace {
public:
explicit TestStackTrace(const std::vector<StackFrame> frames)
: frames_(std::move(frames)) {}
absl::Span<StackFrame const> ToFrames() const override { return frames_; }
std::vector<StackFrame> ToUncachedFrames() const override { return frames_; }
std::vector<StackFrame> GetUserFrames(int limit) const override {
return frames_;
}
StackFrame LastUserFrame() const override { return frames_.back(); }
string ToString(const TracePrintingOptions& opts) const override {
auto frame = LastUserFrame();
return absl::StrCat(frame.file_name, ":", frame.line_number, ":",
frame.function_name);
}
std::vector<StackFrame> frames_;
};
TEST(GraphDebugInfoBuilderTest, AccumulateStackTrace) {
auto stack_trace = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTrace(stack_trace, "alpha_beta");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.traces_by_id_size(), Eq(1));
EXPECT_THAT(debug_info.name_to_trace_id().find("alpha_beta"),
Ne(debug_info.name_to_trace_id().end()));
auto actual_stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("alpha_beta"));
EXPECT_THAT(actual_stack_trace.frame_id_size(), Eq(2))
<< debug_info.DebugString();
}
TEST(GraphDebugInfoBuilderTest, AccumulateStackTracesMap) {
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"}});
stack_traces["scale"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 10, "function_foo"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
stack_traces["y"] = std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
{"dummy_file_alpha.cc", 20, "function_bar"},
{"dummy_file_beta.cc", 30, "function_sop"},
});
GraphDebugInfoBuilder builder;
builder.AccumulateStackTracesMap(stack_traces, "@func");
GraphDebugInfo debug_info = builder.Build();
EXPECT_THAT(debug_info.files(), UnorderedElementsAre("dummy_file_alpha.cc",
"dummy_file_beta.cc"));
EXPECT_THAT(debug_info.name_to_trace_id_size(), Eq(3));
EXPECT_THAT(debug_info.name_to_trace_id().find("scale@func"),
Ne(debug_info.name_to_trace_id().end()));
auto stack_trace = debug_info.traces_by_id().at(
debug_info.name_to_trace_id().at("scale@func"));
EXPECT_THAT(stack_trace.frame_id_size(), Eq(2));
std::vector<GraphDebugInfo::FileLineCol> file_line_cols;
for (auto& frame_id : stack_trace.frame_id()) {
file_line_cols.push_back(debug_info.frames_by_id().at(frame_id));
}
auto file_line_col_0 = file_line_cols[0];
auto file_line_col_1 = file_line_cols[1];
EXPECT_THAT(std::vector<int>(
{file_line_col_0.file_index(), file_line_col_1.file_index()}),
UnorderedElementsAre(0, 1));
EXPECT_THAT(file_line_col_0.line(), Eq(10));
EXPECT_THAT(file_line_col_0.func(), Eq("function_foo"));
EXPECT_THAT(file_line_col_1.line(), Eq(30));
EXPECT_THAT(file_line_col_1.func(), Eq("function_sop"));
}
TEST(GraphDebugInfoBuilderTest, AppendGraphDebugInfo) {
GraphDebugInfo a;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["two"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 20, "function_bar"}});
stack_traces["scale"] = std::make_shared<TestStackTrace>(
std::vector<StackFrame>{{"dummy_file_alpha.cc", 10, "function_foo"}});
builder.AccumulateStackTracesMap(stack_traces, "");
a = builder.Build();
}
GraphDebugInfo b;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["y"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "");
b = builder.Build();
}
GraphDebugInfo c;
{
GraphDebugInfoBuilder builder;
StackTracesMap stack_traces;
stack_traces["z"] =
std::make_shared<TestStackTrace>(std::vector<StackFrame>{
{"dummy_file_alpha.cc", 15, "function_flex"},
});
builder.AccumulateStackTracesMap(stack_traces, "@func3");
c = builder.Build();
}
GraphDebugInfoBuilder builder;
builder.AppendGraphDebugInfo("func1", a);
builder.AppendGraphDebugInfo("func2", b);
builder.AppendGraphDebugInfo("", c);
GraphDebugInfo combined = builder.Build();
EXPECT_EQ(combined.name_to_trace_id().size(), 4);
std::vector<std::string> keys{"two@func1", "scale@func1", "y@func2",
"z@func3"};
for (const auto& key : keys) {
EXPECT_THAT(combined.name_to_trace_id().find(key),
Ne(combined.name_to_trace_id().end()));
}
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyMap) {
StackTracesMap map;
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_size(), 0);
}
TEST(StackTracesMapToGraphDebugInfoTest, EmptyFrames) {
StackTracesMap map;
std::vector<StackFrame> frames;
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
EXPECT_EQ(generated.files_size(), 0);
EXPECT_EQ(generated.traces_by_id_size(), 1);
EXPECT_TRUE(generated.name_to_trace_id().contains("dummy_name"));
}
TEST(StackTracesMapToGraphDebugInfoTest, RoundTripStackTraces) {
StackTracesMap map;
std::vector<StackFrame> frames = {
StackFrame({"dummy_file_name", 10, "dummy_function_name"}),
StackFrame({"dummy_file_name", 20, "other_function_name"})};
auto stack_trace = std::make_shared<FrozenStackTrace>(frames);
map.insert({"dummy_name", stack_trace});
GraphDebugInfo generated = StackTracesMapToGraphDebugInfo(map);
StackTracesMap output = LoadTracesFromDebugInfo(generated);
for (auto [name, trace] : output) {
auto orig_trace = map[name];
EXPECT_NE(orig_trace, nullptr);
EXPECT_EQ(orig_trace->ToFrames(), trace->ToFrames());
}
}
TEST(StackTracesTest, ToFrames) {
StackTracesMap map;
std::vector<StackFrame> frames = {
StackFrame({"dummy_file_name", 10, "dummy_function_name"}),
StackFrame({"other_file_name", 20, "other_function_name"})};
auto stack_trace = TestStackTrace(frames);
EXPECT_EQ(stack_trace.ToFrames().size(), 2);
auto uncached_frames = stack_trace.ToUncachedFrames();
EXPECT_EQ(uncached_frames.size(), 2);
EXPECT_EQ(frames[0], uncached_frames[0]);
EXPECT_EQ(frames[1], uncached_frames[1]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_debug_info_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_debug_info_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f0470b6-f9db-44fe-a6aa-ed8d231c295a | cpp | tensorflow/tensorflow | validate | tensorflow/core/graph/validate.cc | tensorflow/core/graph/validate_test.cc | #include "tensorflow/core/graph/validate.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace graph {
Status ValidateGraphDef(const GraphDef& graph_def,
const OpRegistryInterface& op_registry) {
Status s;
const int version = graph_def.versions().producer();
for (const NodeDef& node_def : graph_def.node()) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(node_def.op(), &op_def));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));
TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, version));
}
return s;
}
Status ValidateGraphDefAgainstOpRegistry(
const GraphDef& graph_def, const OpRegistryInterface& op_registry) {
GraphDef copy(graph_def);
TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(©, op_registry, 0));
return ValidateGraphDef(copy, op_registry);
}
Status ValidateGraphDefAgainstOpList(const GraphDef& graph_def,
const OpList& op_list) {
OpListOpRegistry registry(&op_list);
return ValidateGraphDefAgainstOpRegistry(graph_def, registry);
}
void GetOpListForValidation(OpList* op_list, const OpRegistry& op_registry) {
op_registry.Export(false, op_list);
RemoveDescriptionsFromOpList(op_list);
}
Status ValidateGraphHasNoCycle(const Graph& graph) {
std::vector<const Node*> ready;
std::vector<int> pending_count(graph.num_node_ids(), 0);
for (int i = 0; i < graph.num_node_ids(); ++i) {
const Node* n = graph.FindNodeId(i);
if (n == nullptr) continue;
pending_count[i] = n->in_edges().size();
if (n->IsMerge()) {
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsNextIteration()) {
pending_count[i]--;
}
}
}
if (pending_count[i] == 0) {
ready.push_back(n);
}
}
int processed = 0;
while (!ready.empty()) {
const Node* node = ready.back();
ready.pop_back();
++processed;
for (const Edge* out : node->out_edges()) {
const int output_id = out->dst()->id();
pending_count[output_id]--;
if (pending_count[output_id] == 0) {
ready.push_back(out->dst());
}
}
}
if (processed < graph.num_nodes()) {
std::vector<string> nodes_in_cycle;
for (int i = 0; i < pending_count.size() && nodes_in_cycle.size() < 3;
++i) {
if (pending_count[i] != 0) {
nodes_in_cycle.push_back(graph.FindNodeId(i)->name());
}
}
return errors::InvalidArgument(
"Graph is invalid, contains a cycle with ",
graph.num_nodes() - processed,
" nodes, including: ", absl::StrJoin(nodes_in_cycle, ", "));
}
return absl::OkStatus();
}
Status VerifyNoDuplicateNodeNames(const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> nodes;
for (const auto& node : graph.node()) {
if (nodes.contains(node.name())) {
return errors::AlreadyExists("Node already exists: ", node.name());
}
nodes.insert(node.name());
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/graph/validate.h"
#include <string>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("FloatInput").Output("o: float");
REGISTER_OP("Int32Input").Output("o: int32");
TEST(ValidateGraphDefTest, TestValidGraph) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { "
" name: 'B' op: 'Cast' "
" attr { key: 'SrcT' value { type: DT_FLOAT } }"
" input: ['A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithOpOnlyInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("UniqueSnowflake").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'UniqueSnowflake' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDefAgainstOpList(graph_def, op_list));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithGlobalOpNotInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("NotAnywhere").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'FloatInput' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
ASSERT_FALSE(graph::ValidateGraphDefAgainstOpList(graph_def, op_list).ok());
}
REGISTER_OP("HasDocs").Doc("This is in the summary.");
TEST(GetOpListForValidationTest, ShouldStripDocs) {
bool found_float = false;
bool found_int32 = false;
bool found_has_docs = false;
OpList op_list;
graph::GetOpListForValidation(&op_list);
for (const OpDef& op_def : op_list.op()) {
if (op_def.name() == "FloatInput") {
EXPECT_FALSE(found_float);
found_float = true;
}
if (op_def.name() == "Int32Input") {
EXPECT_FALSE(found_int32);
found_int32 = true;
}
if (op_def.name() == "HasDocs") {
EXPECT_FALSE(found_has_docs);
found_has_docs = true;
EXPECT_TRUE(op_def.summary().empty());
}
}
EXPECT_TRUE(found_float);
EXPECT_TRUE(found_int32);
EXPECT_TRUE(found_has_docs);
}
TEST(VerifyNoDuplicateNodeNames, NoDuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::VerifyNoDuplicateNodeNames(graph_def));
}
TEST(VerifyNoDuplicateNodeNames, DuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'A' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
EXPECT_EQ(graph::VerifyNoDuplicateNodeNames(graph_def).code(),
tensorflow::error::ALREADY_EXISTS);
}
TEST(ValidateGraphHasNoCycleTest, NoCyclePasses) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
TEST(ValidateGraphHasNoCycleTest, NoCycleWithMergePasses) {
const string graph_def_str =
R"EOF(
node { name: 'A' op: 'FloatInput' }
node { name: 'merge' op: 'Merge' input: [ 'A:0', 'next:0' ]
attr { key: "N" value: { i: 2 } }
attr { key: "T" value: { type: DT_FLOAT } } }
node { name: 'B' op: 'Mul'
attr { key: 'T' value { type: DT_FLOAT } }
input: [ 'merge:0', 'merge:0' ] }
node { name: 'next' op: 'NextIteration' input: ['B:0']
attr { key: "T" value: { type: DT_FLOAT } } }
)EOF";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
Node* AddNodeFromNodeDef(Graph& graph, const string& name,
const string& node_type, int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
TEST(ValidateGraphHasNoCycleTest, CycleFails) {
Graph graph(OpRegistry::Global());
Node* a = AddNodeFromNodeDef(graph, "A", "FloatInput", 0);
Node* c = AddNodeFromNodeDef(graph, "B", "Mul", 2);
graph.AddEdge(a, 0, c, 0);
graph.AddEdge(c, 0, c, 1);
EXPECT_THAT(
graph::ValidateGraphHasNoCycle(graph),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Graph is invalid, contains a cycle")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
737f2727-23b2-402e-a0a0-5a2b7d539d6c | cpp | tensorflow/tensorflow | edgeset | tensorflow/core/graph/edgeset.cc | tensorflow/core/graph/edgeset_test.cc | #include "tensorflow/core/graph/edgeset.h"
namespace tensorflow {
std::pair<EdgeSet::const_iterator, bool> EdgeSet::insert(value_type value) {
RegisterMutation();
const_iterator ci;
ci.Init(this);
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == value) {
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, false);
}
}
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == nullptr) {
ptrs_[i] = value;
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, true);
}
}
s = new gtl::FlatSet<const Edge*>;
s->insert(reinterpret_cast<const Edge**>(std::begin(ptrs_)),
reinterpret_cast<const Edge**>(std::end(ptrs_)));
ptrs_[0] = this;
ptrs_[1] = s;
}
auto p = s->insert(value);
ci.tree_iter_ = p.first;
return std::make_pair(ci, p.second);
}
EdgeSet::size_type EdgeSet::erase(key_type key) {
RegisterMutation();
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == key) {
size_t n = size();
ptrs_[i] = ptrs_[n - 1];
ptrs_[n - 1] = nullptr;
return 1;
}
}
return 0;
} else {
return s->erase(key);
}
}
} | #include "tensorflow/core/graph/edgeset.h"
#include <set>
#include <vector>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class EdgeSetTest : public ::testing::Test {
public:
EdgeSetTest() : edges_(nullptr) {}
~EdgeSetTest() override { delete[] edges_; }
void MakeEdgeSet(int n) {
if (edges_) {
delete[] edges_;
}
edges_ = new Edge[n];
eset_.clear();
model_.clear();
for (int i = 0; i < n; i++) {
eset_.insert(&edges_[i]);
model_.insert(&edges_[i]);
}
}
void CheckSame() {
EXPECT_EQ(model_.size(), eset_.size());
EXPECT_EQ(model_.empty(), eset_.empty());
std::vector<const Edge*> modelv(model_.begin(), model_.end());
std::vector<const Edge*> esetv(eset_.begin(), eset_.end());
std::sort(modelv.begin(), modelv.end());
std::sort(esetv.begin(), esetv.end());
EXPECT_EQ(modelv.size(), esetv.size());
for (size_t i = 0; i < modelv.size(); i++) {
EXPECT_EQ(modelv[i], esetv[i]) << i;
}
}
static constexpr int kInline = 64 / sizeof(const void*);
Edge nonexistent_;
Edge* edges_;
EdgeSet eset_;
std::set<const Edge*> model_;
};
namespace {
TEST_F(EdgeSetTest, Ops) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
CheckSame();
EXPECT_EQ((n == 0), eset_.empty());
EXPECT_EQ(n, eset_.size());
eset_.clear();
model_.clear();
CheckSame();
eset_.insert(&edges_[0]);
model_.insert(&edges_[0]);
CheckSame();
}
}
TEST_F(EdgeSetTest, Exists) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
for (int pos = 0; pos < n; pos++) {
auto p = eset_.insert(&edges_[pos]);
EXPECT_FALSE(p.second);
EXPECT_EQ(&edges_[pos], *p.first);
EXPECT_EQ(1, eset_.erase(&edges_[pos]));
model_.erase(&edges_[pos]);
CheckSame();
}
}
}
TEST_F(EdgeSetTest, DoesNotExist) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
EXPECT_EQ(0, eset_.erase(&nonexistent_));
auto p = eset_.insert(&nonexistent_);
EXPECT_TRUE(p.second);
EXPECT_EQ(&nonexistent_, *p.first);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/edgeset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/edgeset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4eeac038-fc93-4cc4-828c-47132cc75b65 | cpp | tensorflow/tensorflow | collective_order | tensorflow/core/graph/collective_order.cc | tensorflow/core/graph/collective_order_test.cc | #include "tensorflow/core/graph/collective_order.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
Status DiscoverDataDependencies(
const Graph* graph, std::vector<Node*>* collective_nodes,
std::vector<int32>* instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies) {
Status s;
auto node_leave = [collective_nodes, instance_keys, data_dependencies,
&s](Node* node) {
int32_t instance_key;
bool enter_node =
node->IsCollective() && node->type_string() == "CollectiveReduce";
if (enter_node) {
Status get_attr_status =
GetNodeAttr(node->attrs(), "instance_key", &instance_key);
s.Update(get_attr_status);
collective_nodes->push_back(node);
instance_keys->push_back(instance_key);
VLOG(2) << "collective node " << node->DebugString();
}
data_dependencies->reserve(data_dependencies->size() + 1 +
node->out_edges().size());
const auto& node_deps = (*data_dependencies)[node];
for (const Edge* out_edge : node->out_edges()) {
auto& child_deps = (*data_dependencies)[out_edge->dst()];
child_deps.insert(node_deps.begin(), node_deps.end());
if (enter_node && s.ok()) {
child_deps.insert(instance_key);
}
}
};
ReverseDFS(*graph, nullptr, node_leave);
return s;
}
Status CreateControlDependencies(
const std::vector<Node*>& collective_nodes,
const std::vector<int32>& instance_keys,
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>>* data_dependencies,
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>* dependency_edges) {
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> all_paths;
for (int i = 0; i < collective_nodes.size() - 1; i++) {
if (!collective_nodes[i]->IsCollective() ||
collective_nodes[i]->type_string() != "CollectiveReduce") {
return errors::Internal("Unexpected node ",
collective_nodes[i]->DebugString());
}
const auto& deps_i = (*data_dependencies)[collective_nodes[i]];
for (int j = i + 1; j < collective_nodes.size(); j++) {
if (collective_nodes[i]->requested_device() !=
collective_nodes[j]->requested_device()) {
continue;
}
if (instance_keys[i] == instance_keys[j]) {
return errors::Internal("Unexpected same instance_key ",
instance_keys[i],
" on 2 nodes with the same device ",
collective_nodes[i]->requested_device());
}
const auto& deps_j = (*data_dependencies)[collective_nodes[j]];
if (deps_i.find(instance_keys[j]) == deps_i.end() &&
deps_j.find(instance_keys[i]) == deps_j.end()) {
int src_idx = instance_keys[i] > instance_keys[j] ? i : j;
int dst_idx = instance_keys[i] > instance_keys[j] ? j : i;
Node* src_node = collective_nodes[src_idx];
Node* dst_node = collective_nodes[dst_idx];
VLOG(1) << "Adding control dependency from node " << src_node->name()
<< " instance " << instance_keys[src_idx] << " to node "
<< dst_node->name() << " instance " << instance_keys[dst_idx];
(*dependency_edges)[src_node].insert(dst_node);
auto& src_paths = all_paths[src_node];
src_paths.insert(dst_node);
for (Node* downstream_node : all_paths[dst_node]) {
src_paths.insert(downstream_node);
}
}
}
}
for (int i = 0; i < collective_nodes.size(); ++i) {
Node* node = collective_nodes[i];
auto& neighbor_set = (*dependency_edges)[node];
std::vector<Node*> neighbor_list(neighbor_set.begin(), neighbor_set.end());
for (int j = 0; j < neighbor_list.size(); ++j) {
Node* n1 = neighbor_list[j];
if (n1 == nullptr) continue;
auto& n1_paths = all_paths[n1];
for (int k = 0; k < neighbor_list.size(); ++k) {
Node* n2 = neighbor_list[k];
if (j == k || n2 == nullptr) continue;
if (n1_paths.find(n2) != n1_paths.end()) {
neighbor_set.erase(n2);
neighbor_list[k] = nullptr;
}
}
}
}
return absl::OkStatus();
}
Status InsertControlDependencies(
Graph* graph, GraphCollectiveOrder order_type,
const absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>>&
dependency_edges) {
if (order_type == GraphCollectiveOrder::kEdges) {
for (const auto& pair : dependency_edges) {
Node* src_node = pair.first;
for (Node* dst_node : pair.second) {
graph->AddControlEdge(src_node, dst_node);
}
}
} else if (order_type == GraphCollectiveOrder::kAttrs) {
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> wait_for;
for (const auto& pair : dependency_edges) {
int32_t src_instance;
TF_RETURN_IF_ERROR(
GetNodeAttr(pair.first->attrs(), "instance_key", &src_instance));
for (Node* dst_node : pair.second) {
wait_for[dst_node].insert(src_instance);
}
}
for (const auto& pair : wait_for) {
std::vector<int32> wait_for_list(pair.second.begin(), pair.second.end());
pair.first->ClearAttr("wait_for");
pair.first->AddAttr("wait_for", wait_for_list);
}
} else {
return errors::Internal("Unexpected GraphCollectiveOrder type ",
static_cast<int>(order_type));
}
return absl::OkStatus();
}
}
Status OrderCollectives(Graph* graph, GraphCollectiveOrder order_type) {
std::vector<Node*> collective_nodes;
std::vector<int32> instance_keys;
absl::flat_hash_map<Node*, absl::flat_hash_set<int32>> data_dependencies;
TF_RETURN_IF_ERROR(DiscoverDataDependencies(
graph, &collective_nodes, &instance_keys, &data_dependencies));
if (collective_nodes.empty()) return absl::OkStatus();
absl::flat_hash_map<Node*, absl::flat_hash_set<Node*>> dependency_edges;
TF_RETURN_IF_ERROR(CreateControlDependencies(
collective_nodes, instance_keys, &data_dependencies, &dependency_edges));
return InsertControlDependencies(graph, order_type, dependency_edges);
}
} | #include "tensorflow/core/graph/collective_order.h"
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::UnorderedElementsAreArray;
REGISTER_OP("TestParams").Output("o: float");
void VerifyGraph(const Graph& graph,
const std::vector<string>& expected_collective_nodes,
const std::vector<std::pair<string, string>>&
expected_collective_control_edges) {
std::vector<string> actual_collective_nodes;
std::vector<std::pair<string, string>> actual_collective_control_edges;
for (const Node* src : graph.nodes()) {
if (!src->IsCollective()) {
continue;
}
actual_collective_nodes.push_back(src->name());
for (const Edge* edge : src->out_edges()) {
VLOG(2) << "collective edge " << edge->src()->name() << " -> "
<< edge->dst()->name();
if (!edge->IsControlEdge() || edge->dst()->name() == "_SINK") {
continue;
}
actual_collective_control_edges.emplace_back(src->name(),
edge->dst()->name());
}
}
EXPECT_THAT(actual_collective_nodes,
UnorderedElementsAreArray(expected_collective_nodes));
EXPECT_THAT(actual_collective_control_edges,
UnorderedElementsAreArray(expected_collective_control_edges));
}
void VerifyAttrs(
const Graph& graph,
const std::unordered_map<string, std::vector<int32>> wait_for_map) {
for (const Node* node : graph.nodes()) {
if (node->IsCollective() ||
wait_for_map.find(node->name()) == wait_for_map.end()) {
continue;
}
std::vector<int32> wait_for_actual;
TF_EXPECT_OK(GetNodeAttr(node->attrs(), "wait_for", &wait_for_actual));
auto wait_for_expected = wait_for_map.at(node->name());
EXPECT_THAT(wait_for_actual, UnorderedElementsAreArray(wait_for_expected));
}
}
Node* CollectiveReduceNode(GraphDefBuilder* builder, Node* input,
const string& name, const string& device,
int instance_key) {
Node* collective_node =
ops::UnaryOp("CollectiveReduce", input,
builder->opts()
.WithName(name)
.WithDevice(device)
.WithAttr("T", DT_FLOAT)
.WithAttr("group_size", 2)
.WithAttr("group_key", 1)
.WithAttr("instance_key", instance_key)
.WithAttr("merge_op", "Add")
.WithAttr("final_op", "Id")
.WithAttr("subdiv_offsets", {1}));
return collective_node;
}
std::unique_ptr<Graph> InitGraph() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const string dev1 = "/job:localhost/replica:0/task:0/device:CPU:1";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* b = ops::SourceOp("TestParams",
builder.opts().WithName("b").WithDevice(dev1));
Node* c1_0 = CollectiveReduceNode(&builder, a, "c1_0", dev0, 1);
Node* c1_1 = CollectiveReduceNode(&builder, b, "c1_1", dev1, 1);
Node* id0 = ops::UnaryOp(
"Identity", c1_0,
builder.opts().WithName("id0").WithDevice(dev0).WithAttr("T", DT_FLOAT));
Node* id1 = ops::UnaryOp(
"Identity", c1_1,
builder.opts().WithName("id1").WithDevice(dev1).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id0, "c2_0", dev0, 2);
CollectiveReduceNode(&builder, id1, "c2_1", dev1, 2);
CollectiveReduceNode(&builder, id0, "c3_0", dev0, 3);
CollectiveReduceNode(&builder, id1, "c3_1", dev1, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1_0", "c1_1", "c2_0", "c2_1", "c3_0", "c3_1"},
{{"c3_0", "c2_0"}, {"c3_1", "c2_1"}});
}
TEST(CollectiveOrderTest, SimpleOrderAttr) {
std::unique_ptr<Graph> graph = InitGraph();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c2_0", {3}}, {"c2_1", {3}}});
}
std::unique_ptr<Graph> InitGraph2() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* a = ops::SourceOp("TestParams",
builder.opts().WithName("a").WithDevice(dev0));
Node* c1 = CollectiveReduceNode(&builder, a, "c1", dev0, 1);
CollectiveReduceNode(&builder, c1, "c4", dev0, 4);
Node* id = ops::UnaryOp(
"Identity", c1,
builder.opts().WithName("id").WithDevice(dev0).WithAttr("T", DT_FLOAT));
CollectiveReduceNode(&builder, id, "c2", dev0, 2);
CollectiveReduceNode(&builder, id, "c3", dev0, 3);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, SimpleOrder2) {
std::unique_ptr<Graph> graph = InitGraph2();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kEdges));
VerifyGraph(*graph, {"c1", "c2", "c3", "c4"}, {{"c4", "c3"}, {"c3", "c2"}});
}
std::unique_ptr<Graph> InitGraphForPruning() {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
const string dev0 = "/job:localhost/replica:0/task:0/device:CPU:0";
Node* w = ops::SourceOp("TestParams",
builder.opts().WithName("w").WithDevice(dev0));
Node* x = ops::SourceOp("TestParams",
builder.opts().WithName("x").WithDevice(dev0));
Node* y = ops::SourceOp("TestParams",
builder.opts().WithName("y").WithDevice(dev0));
Node* z = ops::SourceOp("TestParams",
builder.opts().WithName("z").WithDevice(dev0));
CollectiveReduceNode(&builder, w, "c1", dev0, 1);
CollectiveReduceNode(&builder, x, "c2", dev0, 2);
CollectiveReduceNode(&builder, y, "c3", dev0, 3);
CollectiveReduceNode(&builder, z, "c4", dev0, 4);
std::unique_ptr<Graph> graph = absl::make_unique<Graph>(OpRegistry::Global());
Status s = GraphDefBuilderToGraph(builder, graph.get());
if (!s.ok()) {
LOG(FATAL) << "Error building graph " << s;
}
return graph;
}
TEST(CollectiveOrderTest, Pruning) {
std::unique_ptr<Graph> graph = InitGraphForPruning();
TF_EXPECT_OK(OrderCollectives(graph.get(), GraphCollectiveOrder::kAttrs));
VerifyAttrs(*graph, {{"c3", {4}}, {"c2", {3}}, {"c1", {2}}});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/collective_order.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/collective_order_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76cfe482-6e50-4a45-9dcc-84e407a2fbd2 | cpp | tensorflow/tensorflow | costmodel | tensorflow/core/graph/costmodel.cc | tensorflow/core/graph/costmodel_test.cc | #include "tensorflow/core/graph/costmodel.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
const Microseconds kDefaultTimeEstimate(1);
const Microseconds kMinTimeEstimate(1);
}
void CostModel::SuppressInfrequent() {
if (count_.empty()) return;
std::vector<int32> non_zero;
for (auto v : count_) {
if (v > 0) non_zero.push_back(v);
}
const size_t sz = non_zero.size();
if (sz > 0) {
std::nth_element(non_zero.begin(), non_zero.begin() + sz / 2,
non_zero.end());
int32_t median_value = non_zero[sz / 2];
min_count_ = median_value / 2;
VLOG(1) << "num non_zero vals: " << non_zero.size() << " median_value "
<< median_value;
} else {
min_count_ = 1;
}
}
void CostModel::MergeFromLocal(const Graph& g, const CostModel& cm) {
CHECK(is_global_);
CHECK(!cm.is_global());
for (const Node* n : g.nodes()) {
const int local_id = cm.Id(n);
const int global_id = Id(n);
if (local_id < 0 || global_id < 0) continue;
int num_slots = cm.slot_bytes_[local_id].size();
Ensure(global_id, num_slots);
count_[global_id] += cm.count_[local_id];
time_[global_id] += cm.time_[local_id];
if (num_slots > 0) {
if (slot_bytes_[global_id].empty()) {
slot_bytes_[global_id].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[global_id].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[global_id][s];
auto other_v = cm.slot_bytes_[local_id][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromGlobal(const CostModel& cm) {
CHECK(is_global_);
CHECK_EQ(true, cm.is_global());
const int num_nodes = cm.count_.size();
for (int i = num_nodes - 1; i >= 0; --i) {
int num_slots = cm.slot_bytes_[i].size();
Ensure(i, num_slots);
count_[i] += cm.count_[i];
time_[i] += cm.time_[i];
if (num_slots > 0) {
if (slot_bytes_[i].empty()) {
slot_bytes_[i].resize(num_slots);
} else {
CHECK_EQ(num_slots, slot_bytes_[i].size());
}
for (int s = 0; s < num_slots; ++s) {
auto& current_v = slot_bytes_[i][s];
auto other_v = cm.slot_bytes_[i][s];
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::MergeFromStats(const NodeNameToCostIdMap& map,
const StepStats& ss) {
CHECK(is_global_);
for (auto& ds : ss.dev_stats()) {
for (auto& ns : ds.node_stats()) {
NodeNameToCostIdMap::const_iterator iter = map.find(ns.node_name());
if (iter == map.end()) continue;
int32_t global_id = iter->second;
Ensure(global_id, ns.output_size());
int64_t elapsed_micros =
ns.op_end_rel_micros() - ns.op_start_rel_micros();
count_[global_id]++;
time_[global_id] += elapsed_micros;
for (auto& no : ns.output()) {
int si = no.slot();
if (static_cast<size_t>(si) >= slot_bytes_[global_id].size()) {
slot_bytes_[global_id].resize(1 + si);
}
auto& current_v = slot_bytes_[global_id][si];
auto other_v =
no.tensor_description().allocation_description().requested_bytes();
if (current_v < 0) {
current_v = other_v;
} else if (other_v > 0) {
current_v += other_v;
}
}
}
}
}
void CostModel::Ensure(int id, int num_outputs) {
if (slot_bytes_.size() <= static_cast<size_t>(id)) {
slot_bytes_.resize(id + 1);
count_.resize(id + 1);
time_.resize(id + 1);
max_mem_usage_.resize(id + 1);
max_exec_time_.resize(id + 1);
output_port_alloc_ids_.resize(id + 1);
}
if (num_outputs > 0) {
auto perslot = &slot_bytes_[id];
auto output_port_alloc_ids = &output_port_alloc_ids_[id];
auto max_mem_usage = &max_mem_usage_[id];
CHECK_LE(perslot->size(), num_outputs);
DCHECK_EQ(output_port_alloc_ids->size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_mem.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_shape.size(), perslot->size());
DCHECK_EQ(max_mem_usage->output_port_type.size(), perslot->size());
perslot->resize(num_outputs, Bytes(-1));
output_port_alloc_ids->resize(num_outputs, -1);
max_mem_usage->output_port_mem.resize(num_outputs, Bytes(-1));
max_mem_usage->output_port_shape.resize(num_outputs, unknown_shape_);
max_mem_usage->output_port_type.resize(num_outputs, DT_INVALID);
}
}
void CostModel::SetNumOutputs(const Node* node, int num_outputs) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, 0);
auto perslot = &slot_bytes_[id];
if (!perslot->empty()) {
CHECK_EQ(num_outputs, perslot->size())
<< "Cannot resize slot_bytes, node=" << node->name();
}
Ensure(id, num_outputs);
}
void CostModel::RecordCount(const Node* node, int count) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
count_[id] += count;
}
int32 CostModel::TotalCount(const Node* node) const {
const int id = Id(node);
if (id < 0) return 0;
return (static_cast<size_t>(id) < slot_bytes_.size()) ? count_[id] : 0;
}
void CostModel::RecordSize(const Node* node, int slot, Bytes bytes) {
const int id = Id(node);
if (id < 0) return;
CHECK_LT(id, slot_bytes_.size());
auto perslot = &slot_bytes_[id];
CHECK_LT(slot, perslot->size());
auto v = &(*perslot)[slot];
if (*v >= 0) {
*v += bytes;
} else {
*v = bytes;
}
}
Bytes CostModel::TotalBytes(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= slot_bytes_.size() ||
slot_bytes_[id].size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return slot_bytes_[id][slot];
}
Bytes CostModel::SizeEstimate(const Node* node, int slot) const {
int32_t count = TotalCount(node);
if (count < min_count_) return Bytes(0);
return TotalBytes(node, slot) / std::max(1, TotalCount(node));
}
void CostModel::RecordTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
DCHECK(node->IsOp()) << node->DebugString();
Ensure(id, node->num_outputs());
time_[id] += time;
}
Microseconds CostModel::TotalTime(const Node* node) const {
DCHECK(node->IsOp()) << node->DebugString();
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= time_.size() ||
time_[id] < Microseconds(0)) {
return Microseconds(0);
}
return time_[id];
}
Microseconds CostModel::TimeEstimate(const Node* node) const {
int32_t count = TotalCount(node);
if (count <= min_count_) return kMinTimeEstimate;
return std::max(kMinTimeEstimate, TotalTime(node) / std::max(1, count));
}
void CostModel::CheckInitialized(const Graph& graph) const {
for (const Node* n : graph.op_nodes()) {
CHECK(static_cast<size_t>(n->id()) < time_.size() &&
time_[n->id()] >= Microseconds(0))
<< ": no time estimate for " << n->DebugString();
CHECK(static_cast<size_t>(n->id()) < slot_bytes_.size())
<< ": no size estimate for " << n->DebugString();
const auto& perslot = slot_bytes_[n->id()];
for (size_t i = 0; i < perslot.size(); i++) {
CHECK_GE(perslot[i], Bytes(0)) << ": no size estimate for output# " << i
<< " of " << n->DebugString();
}
}
}
void CostModel::RecordMaxMemorySize(const Node* node, int output_slot,
Bytes bytes,
const TensorShapeProto& tensor_shape,
const DataType& dtype) {
const int id = Id(node);
if (id < 0) return;
if (output_slot >= node->num_outputs()) {
LOG(ERROR) << "Unexpected output slot for node " << node->DebugString()
<< ". Got " << output_slot << " but its num_outputs is "
<< node->num_outputs();
return;
}
Ensure(id, node->num_outputs());
auto& current_max = max_mem_usage_[id].output_port_mem[output_slot];
if (bytes.value() < 0) {
bytes = MinTensorMemoryUsage(tensor_shape, dtype);
}
if (bytes.value() > current_max.value()) {
current_max = bytes.value();
max_mem_usage_[id].output_port_shape[output_slot] = tensor_shape;
max_mem_usage_[id].output_port_type[output_slot] = dtype;
}
}
Bytes CostModel::MaxMemorySize(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_mem.size() <= static_cast<size_t>(slot)) {
return Bytes(0);
}
return max_mem_usage_[id].output_port_mem[slot];
}
const TensorShapeProto& CostModel::MaxMemoryShape(const Node* node,
int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_shape.size() <=
static_cast<size_t>(slot)) {
return unknown_shape_;
}
return max_mem_usage_[id].output_port_shape[slot];
}
DataType CostModel::MaxMemoryType(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size() ||
max_mem_usage_[id].output_port_type.size() <= static_cast<size_t>(slot)) {
return DT_INVALID;
}
return max_mem_usage_[id].output_port_type[slot];
}
Bytes CostModel::TempMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].temp_memory_size;
}
Bytes CostModel::PersistentMemorySize(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_mem_usage_.size()) {
return Bytes(0);
}
return max_mem_usage_[id].persistent_memory_size;
}
void CostModel::RecordMemoryStats(const Node* node,
const MemoryStats& memory_stats) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_mem_usage_[id].temp_memory_size = memory_stats.temp_memory_size();
max_mem_usage_[id].persistent_memory_size =
memory_stats.persistent_memory_size();
for (int64_t alloc_id : memory_stats.persistent_tensor_alloc_ids()) {
if (alloc_id > 0) {
persistent_alloc_ids_.insert(alloc_id);
}
}
}
void CostModel::RecordMaxExecutionTime(const Node* node, Microseconds time) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
max_exec_time_[id] = std::max(max_exec_time_[id], time);
}
Microseconds CostModel::MaxExecutionTime(const Node* node) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= max_exec_time_.size()) {
return Microseconds(0);
}
return max_exec_time_[id];
}
void CostModel::RecordAllocationId(const Node* node, int output_slot,
int64_t alloc_id) {
const int id = Id(node);
if (id < 0) return;
Ensure(id, node->num_outputs());
output_port_alloc_ids_[id][output_slot] = alloc_id;
}
int64_t CostModel::AllocationId(const Node* node, int slot) const {
const int id = Id(node);
if (id < 0 || static_cast<size_t>(id) >= output_port_alloc_ids_.size() ||
output_port_alloc_ids_[id].size() <= static_cast<size_t>(slot)) {
return -1;
}
return output_port_alloc_ids_[id][slot];
}
bool CostModel::IsPersistentTensor(const Node* node, int64_t alloc_id) const {
if (persistent_alloc_ids_.count(alloc_id) > 0) {
return true;
}
return false;
}
Microseconds CostModel::CopyTimeEstimate(Bytes b, double network_latency_millis,
double estimated_gbps) {
int64_t copy_bytes = b.value();
const double bytes_per_usec = estimated_gbps * 1000.0 / 8;
const double min_micros = network_latency_millis * 1000.0;
return Microseconds(
static_cast<int64_t>(copy_bytes / bytes_per_usec + min_micros));
}
Microseconds CostModel::ComputationTimeEstimate(int64_t math_ops) {
return Microseconds(math_ops / 1000);
}
void CostModel::IncrementUpdateTimes() { update_times_++; }
int32 CostModel::GetUpdateTimes() const { return update_times_; }
namespace {
static void AddNodesToCostModel(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
const int num_outputs = n->num_outputs();
cost_model->SetNumOutputs(n, num_outputs);
for (int output = 0; output < num_outputs; output++) {
cost_model->RecordSize(n, output, Bytes(1));
}
}
}
static void AssignSizes(const Graph& g, CostModel* cost_model) {
for (const Edge* e : g.edges()) {
if (e->IsControlEdge()) {
continue;
}
const Node* src = e->src();
Bytes size(1);
cost_model->RecordSize(src, e->src_output(), size);
}
}
static Microseconds TimeEstimateForNode(CostModel* cost_model, Node* n) {
CHECK(n->IsOp());
VLOG(2) << "Node " << n->id() << ": " << n->name()
<< " type_string: " << n->type_string();
if (IsConstant(n) || IsVariable(n)) {
return Microseconds(0);
}
return kDefaultTimeEstimate;
}
static void EstimateComputationCosts(const Graph& g, CostModel* cost_model) {
for (Node* n : g.nodes()) {
if (!n->IsOp()) continue;
cost_model->RecordTime(n, TimeEstimateForNode(cost_model, n));
}
}
}
void CostModel::InitFromGraph(const Graph& g) {
const int num_node_ids = g.num_node_ids();
slot_bytes_.reserve(num_node_ids);
count_.reserve(num_node_ids);
time_.reserve(num_node_ids);
max_mem_usage_.reserve(num_node_ids);
max_exec_time_.reserve(num_node_ids);
output_port_alloc_ids_.reserve(num_node_ids);
AddNodesToCostModel(g, this);
AssignSizes(g, this);
EstimateComputationCosts(g, this);
CheckInitialized(g);
}
void CostModel::AddToCostGraphDef(const Graph* graph,
CostGraphDef* cost_graph) const {
std::vector<const Edge*> inputs;
std::vector<const Edge*> control_inputs;
int offset = cost_graph->node_size();
for (const Node* n : graph->nodes()) {
CostGraphDef::Node* cnode = cost_graph->add_node();
cnode->set_name(n->name());
cnode->set_device(n->assigned_device_name());
cnode->set_id(GlobalId(n, offset));
inputs.clear();
inputs.resize(n->num_inputs(), nullptr);
control_inputs.clear();
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
control_inputs.push_back(e);
} else {
inputs[e->dst_input()] = e;
}
}
std::sort(control_inputs.begin(), control_inputs.end(),
[this](Edge const* a, Edge const* b) {
return Id(a->src()) < Id(b->src());
});
for (const Edge* e : inputs) {
CostGraphDef::Node::InputInfo* input_info = cnode->add_input_info();
input_info->set_preceding_node(GlobalId(e->src(), offset));
input_info->set_preceding_port(e->src_output());
}
for (int i = 0; i < n->num_outputs(); i++) {
CostGraphDef::Node::OutputInfo* output_info = cnode->add_output_info();
int64_t alloc_id = AllocationId(n, i);
int64_t alias_to_input = -1;
for (const Edge* e : inputs) {
int64_t input_alloc_id = AllocationId(e->src(), e->src_output());
if (input_alloc_id == alloc_id) {
alias_to_input = e->dst_input();
break;
}
}
output_info->set_alias_input_port(alias_to_input);
output_info->set_dtype(MaxMemoryType(n, i));
*output_info->mutable_shape() = MaxMemoryShape(n, i);
if (alias_to_input < 0 && IsPersistentTensor(n, alloc_id)) {
output_info->set_size(0);
} else {
output_info->set_size(MaxMemorySize(n, i).value());
}
}
for (const Edge* e : control_inputs) {
cnode->add_control_input(GlobalId(e->src(), offset));
}
cnode->set_temporary_memory_size(TempMemorySize(n).value());
cnode->set_persistent_memory_size(PersistentMemorySize(n).value());
cnode->set_compute_cost(MaxExecutionTime(n).value());
cnode->set_is_final(n->IsSend());
}
}
void CostModel::WriteSummaryToLog() const {
LOG(INFO) << " min_count_=" << min_count_;
for (size_t i = 0; i < count_.size(); ++i) {
LOG(INFO) << "Node " << i << " count " << count_[i] << " total time "
<< time_[i] << " avg time "
<< (time_[i] / (std::max(1, count_[i])));
}
}
Bytes CostModel::MinTensorMemoryUsage(const TensorShapeProto& tensor_shape,
const DataType& dtype) {
if (tensor_shape.unknown_rank()) {
return Bytes(-1);
}
size_t num_coefficients = 1;
for (const TensorShapeProto::Dim& dim : tensor_shape.dim()) {
num_coefficients *= std::max<size_t>(dim.size(), 1);
}
return Bytes(num_coefficients * DataTypeSize(dtype));
}
} | #include "tensorflow/core/graph/costmodel.h"
#include <memory>
#include <string>
#include <unordered_map>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/common_runtime/costmodel_manager.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
using ::testing::Not;
MATCHER_P(ShapeProtoEquals, other, "") {
if (arg.unknown_rank()) {
return other.unknown_rank();
}
if (arg.dim_size() != other.dim_size()) {
return false;
}
for (int i = 0; i < arg.dim_size(); ++i) {
if (arg.dim(i).size() != other.dim(i).size()) {
return false;
}
}
return true;
}
static void InitGraph(const string& s, Graph* graph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(s, &graph_def)) << s;
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, graph));
}
static void InitModelFromGraph(const Graph& graph, CostModel& cm) {
for (const auto& node : graph.nodes()) {
cm.SetNumOutputs(node, node->num_outputs());
}
}
static std::unique_ptr<Graph> CreateBasicTestGraph() {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A' op: 'Input'}"
"node { name: 'B' op: 'Input'}"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }"
"node { name: 'D' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }",
graph.get());
return graph;
}
Node* FindNode(const Graph& graph, std::string name) {
for (const auto& node : graph.nodes()) {
if (node->name() == name) {
return node;
}
}
return nullptr;
}
Node* AddNode(Graph& graph, const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
static void GenerateStepStats(Graph* graph, StepStats* step_stats,
const string& device_name) {
DeviceStepStats* device_stepstats = step_stats->add_dev_stats();
device_stepstats->set_device(device_name);
for (const auto& node_def : graph->nodes()) {
NodeExecStats* node_stats = device_stepstats->add_node_stats();
node_stats->set_node_name(node_def->name());
}
}
REGISTER_OP("Input").Output("o: float").SetIsStateful();
TEST(CostModelTest, WorksWithManager) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto graph1 = std::make_unique<Graph>(OpRegistry::Global());
auto graph2 = std::make_unique<Graph>(OpRegistry::Global());
InitGraph(
"node { name: 'A1' op: 'Input'}"
"node { name: 'B1' op: 'Input'}"
"node { name: 'C1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }"
"node { name: 'D1' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A1', 'B1'] }",
graph1.get());
InitGraph(
"node { name: 'A2' op: 'Input'}"
"node { name: 'B2' op: 'Input'}"
"node { name: 'C2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }"
"node { name: 'D2' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A2', 'B2'] }",
graph2.get());
StepStats step_stats;
GenerateStepStats(graph1.get(), &step_stats, "DummyDevice1");
GenerateStepStats(graph2.get(), &step_stats, "DummyDevice2");
StepStatsCollector collector(&step_stats);
std::unordered_map<string, const Graph*> device_map;
device_map["DummyDevice1"] = graph1.get();
device_map["DummyDevice2"] = graph2.get();
CostModelManager cost_model_manager;
collector.BuildCostModel(&cost_model_manager, device_map);
CostGraphDef cost_graph_def;
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph1.get(), &cost_graph_def));
TF_ASSERT_OK(
cost_model_manager.AddToCostGraphDef(graph2.get(), &cost_graph_def));
ASSERT_EQ(cost_graph_def.node_size(), 12);
absl::flat_hash_map<int32, const CostGraphDef::Node> ids;
for (auto node : cost_graph_def.node()) {
int32_t index = node.id();
auto result = ids.insert({index, node});
EXPECT_TRUE(result.second);
}
}
TEST(CostModelTest, GlobalId) {
auto graph = CreateBasicTestGraph();
CostModel cm_local(false);
CostModel cm_global(true);
constexpr int kOffset = 7;
for (const auto& node : graph->nodes()) {
EXPECT_EQ(cm_local.GlobalId(node, kOffset), node->id() + kOffset);
EXPECT_EQ(cm_global.GlobalId(node, kOffset), node->cost_id());
}
}
TEST(CostModelTest, RecordTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kMicrosPerIter = 1000;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordTime(node, node->id() * Microseconds(kMicrosPerIter));
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalTime(node),
Microseconds(node->id() * kIters * kMicrosPerIter));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordCount) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kCountPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
cm.RecordCount(node, node->id() * kCountPerIter);
}
}
for (const auto& node : graph->op_nodes()) {
EXPECT_EQ(cm.TotalCount(node), node->id() * kIters * kCountPerIter);
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalCount(E), 0);
}
TEST(CostModelTest, RecordSize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
constexpr int kIters = 100;
constexpr int kBytesPerIter = 4;
for (int i = 0; i < kIters; ++i) {
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
cm.RecordSize(node, slot, Bytes((node->id() + slot) * kBytesPerIter));
}
}
}
for (const auto& node : graph->op_nodes()) {
for (int slot = 0; slot < node->num_outputs(); ++slot) {
EXPECT_EQ(cm.TotalBytes(node, slot),
Bytes((node->id() + slot) * kIters * kBytesPerIter));
}
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(0));
}
TEST(CostModelTest, SizeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kBytesPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordSize(C, 0, Bytes(kCount * kBytesPerCount));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(kBytesPerCount));
}
TEST(CostModelTest, TimeEstimate) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
constexpr int kMicrosPerCount = 31;
constexpr int kCount = 17;
cm.RecordCount(C, kCount);
cm.RecordTime(C, Microseconds(kCount * kMicrosPerCount));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(kMicrosPerCount));
}
TensorShapeProto CreateTensorShapeProto(absl::Span<const int64_t> dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim()->set_size(dims[i]);
}
return shape;
}
int64_t Count(const TensorShapeProto& shape) {
int64_t count = 1;
for (int i = 0; i < shape.dim_size(); ++i) {
count *= shape.dim(i).size();
}
return count;
}
TEST(CostModelTest, RecordMaxMemorySize) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
Node* C = FindNode(*graph, "C");
InitModelFromGraph(*graph, cm);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(-1));
{
const TensorShapeProto shape = CreateTensorShapeProto({2, 5, 10});
const DataType dtype = DataType::DT_FLOAT;
const Bytes bytes = Bytes(Count(shape) * sizeof(float));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({3, 6, 11});
const DataType dtype = DataType::DT_DOUBLE;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), bytes);
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({1, 1, 1});
const DataType dtype = DataType::DT_BFLOAT16;
const Bytes bytes = Bytes(Count(shape) * sizeof(double));
cm.RecordMaxMemorySize(C, 0, bytes, shape, dtype);
EXPECT_GT(cm.MaxMemorySize(C, 0), bytes);
EXPECT_NE(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), Not(ShapeProtoEquals(shape)));
}
{
const TensorShapeProto shape = CreateTensorShapeProto({100, 100, 100});
const DataType dtype = DataType::DT_BFLOAT16;
cm.RecordMaxMemorySize(C, 0, Bytes(-1), shape, dtype);
EXPECT_EQ(cm.MaxMemorySize(C, 0), Bytes(Count(shape) * sizeof(bfloat16)));
EXPECT_EQ(cm.MaxMemoryType(C, 0), dtype);
EXPECT_THAT(cm.MaxMemoryShape(C, 0), ShapeProtoEquals(shape));
}
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxMemorySize(E, 0), Bytes(0));
EXPECT_THAT(cm.MaxMemoryType(E, 0), DataType::DT_INVALID);
TensorShapeProto unknown;
unknown.set_unknown_rank(true);
EXPECT_THAT(cm.MaxMemoryShape(E, 0), ShapeProtoEquals(unknown));
}
TEST(CostModelTest, RecordMaxExecutionTime) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(0));
cm.RecordMaxExecutionTime(C, Microseconds(13));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(13));
cm.RecordMaxExecutionTime(C, Microseconds(27));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
cm.RecordMaxExecutionTime(C, Microseconds(9));
EXPECT_EQ(cm.MaxExecutionTime(C), Microseconds(27));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.MaxExecutionTime(E), Microseconds(0));
}
TEST(CostModelTest, RecordMemoryStats) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
MemoryStats stats;
stats.set_temp_memory_size(256);
stats.set_persistent_memory_size(16);
stats.add_persistent_tensor_alloc_ids(1);
stats.add_persistent_tensor_alloc_ids(3);
stats.add_persistent_tensor_alloc_ids(5);
stats.add_persistent_tensor_alloc_ids(5);
cm.RecordMemoryStats(C, stats);
EXPECT_EQ(cm.TempMemorySize(C), stats.temp_memory_size());
EXPECT_EQ(cm.PersistentMemorySize(C), stats.persistent_memory_size());
EXPECT_TRUE(cm.IsPersistentTensor(C, 1));
EXPECT_TRUE(cm.IsPersistentTensor(C, 3));
EXPECT_TRUE(cm.IsPersistentTensor(C, 5));
EXPECT_FALSE(cm.IsPersistentTensor(C, 31));
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.TempMemorySize(E), Bytes(0));
EXPECT_EQ(cm.PersistentMemorySize(E), Bytes(0));
}
TEST(CostModelTest, RecordAllocationId) {
auto graph = CreateBasicTestGraph();
CostModel cm(false);
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
cm.RecordAllocationId(C, 0, 13);
EXPECT_EQ(cm.AllocationId(C, 0), 13);
EXPECT_EQ(cm.AllocationId(C, 7), -1);
Node* E = AddNode(*graph, "E", "Mul", 2);
EXPECT_EQ(cm.AllocationId(E, 0), -1);
}
TEST(CostModelTest, CopyTimeEstimate) {
int64_t bytes = 32568;
double latency_ms = 10.2;
double gbps = 2.2;
double bytes_per_usec = gbps * 1000 / 8;
double cost_usecs = (bytes / bytes_per_usec + latency_ms * 1000);
EXPECT_EQ(CostModel::CopyTimeEstimate(Bytes(bytes), latency_ms, gbps),
Microseconds(static_cast<uint64_t>(cost_usecs)));
}
TEST(CostModelTest, ComputationTimeEstimate) {
constexpr int64_t kNumMathOps = 32150;
EXPECT_EQ(CostModel::ComputationTimeEstimate(kNumMathOps),
Microseconds(kNumMathOps / 1000));
}
TEST(CostModel, UpdateTimes) {
CostModel cm(false);
EXPECT_EQ(cm.GetUpdateTimes(), 0);
constexpr int kNumUpdates = 111;
for (int i = 0; i < kNumUpdates; ++i) {
cm.IncrementUpdateTimes();
}
EXPECT_EQ(cm.GetUpdateTimes(), kNumUpdates);
}
TEST(CostModel, SuppressInfrequent) {
CostModel cm(false);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Node* A = AddNode(*graph, "A", "Mul", 2);
Node* B = AddNode(*graph, "B", "Mul", 2);
Node* C = AddNode(*graph, "B", "Mul", 2);
InitModelFromGraph(*graph, cm);
cm.RecordCount(A, 1000);
cm.RecordSize(A, 0, Bytes(8 * 1000));
cm.RecordTime(A, Microseconds(8 * 1000));
cm.RecordCount(B, 2000);
cm.RecordSize(B, 0, Bytes(2000 * 10));
cm.RecordTime(B, Microseconds(2000 * 10));
cm.RecordCount(C, 17);
cm.RecordSize(C, 0, Bytes(32 * 17));
cm.RecordTime(C, Microseconds(32 * 17));
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(32));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(32));
cm.SuppressInfrequent();
EXPECT_EQ(cm.SizeEstimate(A, 0), Bytes(8));
EXPECT_EQ(cm.TimeEstimate(A), Microseconds(8));
EXPECT_EQ(cm.SizeEstimate(B, 0), Bytes(10));
EXPECT_EQ(cm.TimeEstimate(B), Microseconds(10));
EXPECT_EQ(cm.SizeEstimate(C, 0), Bytes(0));
EXPECT_EQ(cm.TimeEstimate(C), Microseconds(1));
}
TEST(CostModelTest, MergeFromLocal) {
CostModel cm_global(true);
CostModel cm_local(false);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm_global);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm_global.RecordCount(C, 23);
cm_global.RecordSize(C, 0, Bytes(23));
cm_global.RecordTime(C, Microseconds(123));
cm_global.RecordCount(D, 17);
cm_global.RecordSize(D, 0, Bytes(17));
cm_global.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm_local);
cm_local.RecordCount(E, 37);
cm_local.RecordSize(E, 0, Bytes(37));
cm_local.RecordTime(E, Microseconds(137));
cm_local.RecordCount(F, 41);
cm_local.RecordSize(F, 0, Bytes(41));
cm_local.RecordTime(F, Microseconds(141));
cm_local.RecordCount(C, 1);
cm_local.RecordSize(C, 0, Bytes(1));
cm_local.RecordTime(C, Microseconds(100));
cm_global.MergeFromLocal(*graph, cm_local);
EXPECT_EQ(cm_global.TotalCount(E), cm_local.TotalCount(E));
EXPECT_EQ(cm_global.TotalBytes(E, 0), cm_local.TotalBytes(E, 0));
EXPECT_EQ(cm_global.TotalTime(E), cm_local.TotalTime(E));
EXPECT_EQ(cm_global.TotalCount(F), cm_local.TotalCount(F));
EXPECT_EQ(cm_global.TotalBytes(F, 0), cm_local.TotalBytes(F, 0));
EXPECT_EQ(cm_global.TotalTime(F), cm_local.TotalTime(F));
EXPECT_EQ(cm_global.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm_global.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm_global.TotalTime(C), Microseconds(223));
}
TEST(CostModelTest, MergeFromGlobal) {
CostModel cm1(true);
CostModel cm2(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm1);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm1.RecordCount(C, 23);
cm1.RecordSize(C, 0, Bytes(23));
cm1.RecordTime(C, Microseconds(123));
cm1.RecordCount(D, 17);
cm1.RecordSize(D, 0, Bytes(17));
cm1.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
InitModelFromGraph(*graph, cm2);
cm2.RecordCount(E, 37);
cm2.RecordSize(E, 0, Bytes(37));
cm2.RecordTime(E, Microseconds(137));
cm2.RecordCount(F, 41);
cm2.RecordSize(F, 0, Bytes(41));
cm2.RecordTime(F, Microseconds(141));
cm2.RecordCount(C, 1);
cm2.RecordSize(C, 0, Bytes(1));
cm2.RecordTime(C, Microseconds(100));
cm1.MergeFromGlobal(cm2);
EXPECT_EQ(cm1.TotalCount(E), cm2.TotalCount(E));
EXPECT_EQ(cm1.TotalBytes(E, 0), cm2.TotalBytes(E, 0));
EXPECT_EQ(cm1.TotalTime(E), cm2.TotalTime(E));
EXPECT_EQ(cm1.TotalCount(F), cm2.TotalCount(F));
EXPECT_EQ(cm1.TotalBytes(F, 0), cm2.TotalBytes(F, 0));
EXPECT_EQ(cm1.TotalTime(F), cm2.TotalTime(F));
EXPECT_EQ(cm1.TotalCount(C), Microseconds(24));
EXPECT_EQ(cm1.TotalBytes(C, 0), Bytes(24));
EXPECT_EQ(cm1.TotalTime(C), Microseconds(223));
}
NodeExecStats CreateNodeExecStats(const Node* node, int64_t time,
int64_t bytes) {
NodeExecStats stats;
stats.set_node_name(node->name());
stats.set_op_start_rel_micros(10);
stats.set_op_end_rel_micros(10 + time);
for (int i = 0; i < node->num_outputs(); ++i) {
NodeOutput* no = stats.add_output();
no->set_slot(i);
no->mutable_tensor_description()
->mutable_allocation_description()
->set_requested_bytes(bytes);
}
return stats;
}
TEST(CostModelTest, MergeFromStats) {
CostModel cm(true);
auto graph = CreateBasicTestGraph();
InitModelFromGraph(*graph, cm);
Node* C = FindNode(*graph, "C");
Node* D = FindNode(*graph, "D");
cm.RecordCount(C, 23);
cm.RecordTime(C, Microseconds(123));
cm.RecordCount(D, 17);
cm.RecordTime(D, Microseconds(117));
Node* E = AddNode(*graph, "E", "Mul", 2);
graph->AddEdge(C, 0, E, 0);
graph->AddEdge(D, 0, E, 1);
Node* F = AddNode(*graph, "F", "Mul", 2);
graph->AddEdge(E, 0, F, 0);
graph->AddEdge(D, 0, F, 1);
StepStats stats;
DeviceStepStats* dstats = stats.add_dev_stats();
*(dstats->add_node_stats()) = CreateNodeExecStats(C, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(D, 10, 10);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(E, 20, 20);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
*(dstats->add_node_stats()) = CreateNodeExecStats(F, 30, 30);
NodeNameToCostIdMap id_map;
for (const auto& node : graph->nodes()) {
id_map.emplace(node->name(), node->cost_id());
}
cm.MergeFromStats(id_map, stats);
EXPECT_EQ(cm.TotalCount(C), 24);
EXPECT_EQ(cm.TotalTime(C), Microseconds(133));
EXPECT_EQ(cm.TotalBytes(C, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(D), 18);
EXPECT_EQ(cm.TotalTime(D), Microseconds(127));
EXPECT_EQ(cm.TotalBytes(D, 0), Bytes(10));
EXPECT_EQ(cm.TotalCount(E), 2);
EXPECT_EQ(cm.TotalTime(E), Microseconds(40));
EXPECT_EQ(cm.TotalBytes(E, 0), Bytes(40));
EXPECT_EQ(cm.TotalCount(F), 2);
EXPECT_EQ(cm.TotalTime(F), Microseconds(60));
EXPECT_EQ(cm.TotalBytes(F, 0), Bytes(60));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/costmodel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/costmodel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e8ed1a7-a21b-45fb-94a1-dd7bf937e305 | cpp | tensorflow/tensorflow | node_builder | tensorflow/core/graph/node_builder.cc | tensorflow/core/graph/node_builder_test.cc | #include "tensorflow/core/graph/node_builder.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
NodeBuilder::NodeOut::NodeOut(Node* n, int32_t i)
: node(n),
error(false),
name(node != nullptr ? node->name() : (error = true, "")),
index(i),
dt(SafeGetOutput(node, i, &error)) {}
NodeBuilder::NodeOut::NodeOut(OutputTensor t) : NodeOut(t.node, t.index) {}
NodeBuilder::NodeOut::NodeOut(StringPiece n, int32_t i, DataType t)
: node(nullptr), error(false), name(n), index(i), dt(t) {}
NodeBuilder::NodeOut::NodeOut()
: node(nullptr), error(true), index(0), dt(DT_FLOAT) {}
NodeBuilder::NodeBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug)
: def_builder_(name, op_name, op_registry, debug) {}
NodeBuilder::NodeBuilder(StringPiece name, const OpDef* op_def)
: def_builder_(name, op_def) {}
NodeBuilder::NodeBuilder(const NodeDefBuilder& def_builder)
: def_builder_(def_builder) {}
NodeBuilder& NodeBuilder::Input(Node* src_node, int src_index) {
inputs_.emplace_back(src_node, src_index);
DataType dt;
if (GetOutputType(src_node, src_index, &dt)) {
def_builder_.Input(src_node->name(), src_index, dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(NodeOut src) {
if (src.error) {
AddIndexError(src.node, src.index);
} else {
inputs_.emplace_back(src.node, src.index);
def_builder_.Input(src.name, src.index, src.dt);
}
return *this;
}
NodeBuilder& NodeBuilder::Input(absl::Span<const NodeOut> src_list) {
std::vector<NodeDefBuilder::NodeOut> srcs;
srcs.reserve(src_list.size());
for (const auto& node_out : src_list) {
if (node_out.error) {
AddIndexError(node_out.node, node_out.index);
} else {
srcs.emplace_back(node_out.name, node_out.index, node_out.dt);
inputs_.emplace_back(node_out.node, node_out.index);
}
}
def_builder_.Input(absl::Span<const NodeDefBuilder::NodeOut>(srcs));
return *this;
}
NodeBuilder& NodeBuilder::ControlInput(Node* src_node) {
control_inputs_.emplace_back(src_node);
def_builder_.ControlInput(src_node->name());
return *this;
}
NodeBuilder& NodeBuilder::ControlInputs(absl::Span<Node* const> src_nodes) {
control_inputs_.insert(control_inputs_.end(), src_nodes.begin(),
src_nodes.end());
for (const Node* src_node : src_nodes) {
def_builder_.ControlInput(src_node->name());
}
return *this;
}
NodeBuilder& NodeBuilder::Device(StringPiece device_spec) {
def_builder_.Device(device_spec);
return *this;
}
NodeBuilder& NodeBuilder::AssignedDevice(StringPiece device) {
assigned_device_ = string(device);
return *this;
}
NodeBuilder& NodeBuilder::XlaCluster(StringPiece xla_cluster) {
def_builder_.Attr("_XlaCluster", xla_cluster);
return *this;
}
absl::StatusOr<Node*> NodeBuilder::Finalize(Graph* graph, bool consume) {
Node* out;
TF_RETURN_IF_ERROR(Finalize(graph, &out, consume));
return out;
}
Status NodeBuilder::Finalize(Graph* graph, Node** created_node, bool consume) {
if (created_node != nullptr) {
*created_node = nullptr;
}
if (!errors_.empty()) {
return errors::InvalidArgument(absl::StrJoin(errors_, "\n"));
}
NodeDef node_def;
TF_RETURN_IF_ERROR(def_builder_.Finalize(&node_def, consume));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, def_builder_.op_def()));
TF_RETURN_IF_ERROR(
CheckOpDeprecation(def_builder_.op_def(), graph->versions().producer()));
TF_ASSIGN_OR_RETURN(Node * node, graph->AddNode(std::move(node_def)));
node->set_assigned_device_name(assigned_device_);
for (size_t i = 0; i < inputs_.size(); ++i) {
if (inputs_[i].node != nullptr) {
graph->AddEdge(inputs_[i].node, inputs_[i].index, node, i);
}
}
for (Node* control_input : control_inputs_) {
graph->AddControlEdge(control_input, node);
}
if (created_node != nullptr) *created_node = node;
return absl::OkStatus();
}
void NodeBuilder::AddIndexError(const Node* node, int i) {
if (node == nullptr) {
errors_.emplace_back(
strings::StrCat("Attempt to add nullptr Node to node with type ",
def_builder_.op_def().name()));
} else {
errors_.emplace_back(strings::StrCat(
"Attempt to add output ", i, " of ", node->name(), " not in range [0, ",
node->num_outputs(), ") to node with type ",
def_builder_.op_def().name(), ". Node: ", FormatNodeForError(*node)));
}
}
bool NodeBuilder::GetOutputType(const Node* node, int i, DataType* dt) {
bool error;
*dt = SafeGetOutput(node, i, &error);
if (error) AddIndexError(node, i);
return !error;
}
} | #include "tensorflow/core/graph/node_builder.h"
#include <string>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("Source").Output("o: out_types").Attr("out_types: list(type)");
REGISTER_OP("Sink").Input("i: T").Attr("T: type");
TEST(NodeBuilderTest, Simple) {
Graph graph(OpRegistry::Global());
Node* source_node;
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
EXPECT_FALSE(NodeBuilder("sink3", "Sink")
.Input(source_node, 2)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink4", "Sink")
.Input(source_node, -1)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink5", "Sink")
.Input({source_node, -1})
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink6", "Sink")
.Input(nullptr)
.Finalize(&graph, nullptr)
.ok());
EXPECT_FALSE(NodeBuilder("sink7", "Sink")
.Input(NodeBuilder::NodeOut(nullptr, 0))
.Finalize(&graph, nullptr)
.ok());
}
REGISTER_OP("FullTypeOpBasicType")
.Output("o1: out_type")
.Attr("out_type: type")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_type");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorBasicType) {
Graph graph(OpRegistry::Global());
Node* node;
TF_EXPECT_OK(NodeBuilder("op", "FullTypeOpBasicType")
.Attr("out_type", DT_FLOAT)
.Finalize(&graph, &node));
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 1);
auto ot = ft.args(0);
ASSERT_EQ(ot.type_id(), TFT_ARRAY);
ASSERT_EQ(ot.args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ot.args(0).args().size(), 0);
}
REGISTER_OP("FullTypeOpListType")
.Output("o1: out_types")
.Attr("out_types: list(type)")
.SetTypeConstructor([](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(TFT_ARRAY);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s("out_types");
return absl::OkStatus();
});
TEST(NodeBuilderTest, TypeConstructorListType) {
Graph graph(OpRegistry::Global());
Node* node;
ASSERT_FALSE(NodeBuilder("op", "FullTypeOpListType")
.Attr("out_types", {DT_FLOAT, DT_INT32})
.Finalize(&graph, &node)
.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/node_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/node_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed0939e5-4c7d-4e30-8840-90284cbc9a53 | cpp | tensorflow/tensorflow | graph | tensorflow/core/graph/graph.cc | tensorflow/core/graph/graph_test.cc | #include "tensorflow/core/graph/graph.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_properties.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/while_context.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
Node::NodeClass Node::GetNodeClassForOp(const std::string& ts) {
static const absl::flat_hash_map<std::string, Node::NodeClass>*
kNodeClassTable =
#define REF_CLASS(key, value) \
{key, value}, { "Ref" key, value }
new absl::flat_hash_map<std::string, Node::NodeClass>({
REF_CLASS("Switch", NC_SWITCH),
REF_CLASS("_SwitchN", NC_SWITCH),
REF_CLASS("Merge", NC_MERGE),
REF_CLASS("Enter", NC_ENTER),
REF_CLASS("Exit", NC_EXIT),
REF_CLASS("NextIteration", NC_NEXT_ITERATION),
{"LoopCond", NC_LOOP_COND},
{"ControlTrigger", NC_CONTROL_TRIGGER},
{"_Send", NC_SEND},
{"_HostSend", NC_HOST_SEND},
{"_Recv", NC_RECV},
{"_HostRecv", NC_HOST_RECV},
{"Const", NC_CONSTANT},
{"HostConst", NC_CONSTANT},
{"Variable", NC_VARIABLE},
{"VariableV2", NC_VARIABLE},
REF_CLASS("Identity", NC_IDENTITY),
{"GetSessionHandle", NC_GET_SESSION_HANDLE},
{"GetSessionHandleV2", NC_GET_SESSION_HANDLE},
{"GetSessionTensor", NC_GET_SESSION_TENSOR},
{"DeleteSessionTensor", NC_DELETE_SESSION_TENSOR},
{"Size", NC_METADATA},
{"Shape", NC_METADATA},
{"Rank", NC_METADATA},
{"_ScopedAllocator", NC_SCOPED_ALLOCATOR},
{"CollectiveReduce", NC_COLLECTIVE},
{"CollectiveBcastSend", NC_COLLECTIVE},
{"CollectiveBcastRecv", NC_COLLECTIVE},
{"CollectiveGather", NC_COLLECTIVE},
{"FakeParam", NC_FAKE_PARAM},
{"PartitionedCall", NC_PARTITIONED_CALL},
{"StatefulPartitionedCall", NC_PARTITIONED_CALL},
{"SymbolicGradient", NC_SYMBOLIC_GRADIENT},
{"If", NC_IF},
{"StatelessIf", NC_IF},
{"While", NC_WHILE},
{"StatelessWhile", NC_WHILE},
{"Case", NC_CASE},
{"StatelessCase", NC_CASE},
{"_Arg", NC_ARG},
{"_DeviceArg", NC_ARG},
{"_Retval", NC_RETVAL},
{"_DeviceRetval", NC_RETVAL},
{"_XlaMerge", NC_MERGE},
});
#undef REF_CLASS
auto it = kNodeClassTable->find(ts);
if (it != kNodeClassTable->end()) {
return it->second;
} else {
return NC_OTHER;
}
}
std::string Node::DebugString() const {
std::string ret = strings::StrCat("{name:'", name(), "' id:", id_);
if (IsSource()) {
strings::StrAppend(&ret, " source}");
} else if (IsSink()) {
strings::StrAppend(&ret, " sink}");
} else {
strings::StrAppend(&ret, " op device:", "{requested: '", requested_device(),
"', assigned: '", assigned_device_name(), "'}", " def:{",
SummarizeNode(*this), "}}");
}
return ret;
}
Node::Node()
: id_(-1),
cost_id_(-1),
class_(NC_UNINITIALIZED),
props_(nullptr),
assigned_device_name_index_(0),
while_ctx_(nullptr) {}
void Node::Initialize(int id, int cost_id,
std::shared_ptr<NodeProperties> props,
Node::NodeClass node_class) {
DCHECK_EQ(id_, -1);
DCHECK(in_edges_.empty());
DCHECK(out_edges_.empty());
id_ = id;
cost_id_ = cost_id;
props_ = std::move(props);
class_ = node_class;
}
void Node::Clear() {
in_edges_.clear();
out_edges_.clear();
id_ = -1;
cost_id_ = -1;
class_ = NC_UNINITIALIZED;
props_.reset();
assigned_device_name_index_ = 0;
}
void Node::UpdateProperties() {
DataTypeVector inputs;
DataTypeVector outputs;
Status status =
InOutTypesForNode(props_->node_def, *(props_->op_def), &inputs, &outputs);
if (!status.ok()) {
LOG(ERROR) << "Failed at updating node: " << status;
return;
}
if (props_->input_types != inputs || props_->output_types != outputs) {
if (TF_PREDICT_TRUE(props_.use_count() == 1)) {
props_->input_types = inputs;
props_->input_types_slice = props_->input_types;
props_->output_types = outputs;
props_->output_types_slice = props_->output_types;
} else {
props_ = std::make_shared<NodeProperties>(
props_->op_def, std::move(props_->node_def), inputs, outputs);
}
}
}
void Node::ClearTypeInfo() {
if (props_->node_def.has_experimental_type()) {
MaybeCopyOnWrite();
props_->node_def.clear_experimental_type();
}
}
Status Node::ShrinkTypeInfo(const absl::flat_hash_map<int, int>& index_mapping,
const string& type_attr_name,
bool update_full_type) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(def(), type_attr_name, &dtypes));
std::vector<DataType> new_dtypes;
new_dtypes.reserve(index_mapping.size());
for (int i = 0; i < dtypes.size(); ++i) {
if (index_mapping.contains(i)) {
new_dtypes.emplace_back(dtypes[i]);
}
}
ClearAttr(type_attr_name);
AddAttr(type_attr_name, new_dtypes);
if (!update_full_type || !def().has_experimental_type()) {
return absl::OkStatus();
}
FullTypeDef ft = def().experimental_type();
if (ft.type_id() != TFT_PRODUCT) {
return errors::Internal(
"In ShrinkTypeInfo, full type information does not start with "
"TFT_PRODUCT\n",
ft.DebugString());
}
if (ft.args_size() != dtypes.size()) {
return errors::Internal("In ShrinkTypeInfo, ft.args_size() ",
ft.args_size(), " != dtypes.size() ",
dtypes.size());
}
FullTypeDef new_ft;
new_ft.set_type_id(TFT_PRODUCT);
for (int i = 0; i < ft.args_size(); ++i) {
if (index_mapping.contains(i)) {
(*new_ft.add_args()) = ft.args(i);
}
}
MaybeCopyOnWrite();
*(mutable_def()->mutable_experimental_type()) = new_ft;
return absl::OkStatus();
}
const std::string& Node::name() const { return props_->node_def.name(); }
const std::string& Node::type_string() const { return props_->node_def.op(); }
const NodeDef& Node::def() const { return props_->node_def; }
const OpDef& Node::op_def() const { return *props_->op_def; }
NodeDef* Node::mutable_def() { return &props_->node_def; }
int32 Node::num_inputs() const { return props_->input_types.size(); }
DataType Node::input_type(int32_t i) const { return props_->input_types[i]; }
const DataTypeVector& Node::input_types() const { return props_->input_types; }
int32 Node::num_outputs() const { return props_->output_types.size(); }
DataType Node::output_type(int32_t o) const { return props_->output_types[o]; }
const DataTypeVector& Node::output_types() const {
return props_->output_types;
}
AttrSlice Node::attrs() const { return AttrSlice(def()); }
const protobuf::RepeatedPtrField<std::string>& Node::requested_inputs() const {
return def().input();
}
const std::string& Node::requested_device() const { return def().device(); }
gtl::iterator_range<NeighborIter> Node::out_nodes() const {
return gtl::make_range(NeighborIter(out_edges_.begin(), false),
NeighborIter(out_edges_.end(), false));
}
gtl::iterator_range<NeighborIter> Node::in_nodes() const {
return gtl::make_range(NeighborIter(in_edges_.begin(), true),
NeighborIter(in_edges_.end(), true));
}
void Node::MaybeCopyOnWrite() {
if (!(props_.use_count() == 1)) {
props_ = std::make_shared<NodeProperties>(*props_);
}
}
AttrValue* Node::AddAttrHelper(const std::string& name) {
MaybeCopyOnWrite();
return &((*props_->node_def.mutable_attr())[name]);
}
void Node::ClearAttr(const std::string& name) {
MaybeCopyOnWrite();
(*props_->node_def.mutable_attr()).erase(name);
}
void Node::set_name(std::string name) {
MaybeCopyOnWrite();
props_->node_def.set_name(std::move(name));
}
void Node::set_requested_device(const std::string& device) {
MaybeCopyOnWrite();
props_->node_def.set_device(device);
}
void Node::set_original_node_names(const std::vector<std::string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_node_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_node_names() = {names.begin(), names.end()};
}
}
void Node::set_original_func_names(const std::vector<std::string>& names) {
MaybeCopyOnWrite();
props_->node_def.mutable_experimental_debug_info()
->clear_original_func_names();
if (!names.empty()) {
*props_->node_def.mutable_experimental_debug_info()
->mutable_original_func_names() = {names.begin(), names.end()};
}
}
Status Node::input_edge(int idx, const Edge** e) const {
if (idx < 0 || idx >= num_inputs()) {
return errors::InvalidArgument("Invalid input_edge index: ", idx, ", Node ",
name(), " only has ", num_inputs(),
" inputs.");
}
for (const Edge* edge : in_edges()) {
if (edge->dst_input() == idx) {
*e = edge;
return absl::OkStatus();
}
}
return errors::NotFound("Could not find input edge ", idx, " for ", name());
}
Status Node::input_edges(std::vector<const Edge*>* input_edges) const {
input_edges->clear();
input_edges->resize(num_inputs(), nullptr);
for (const Edge* edge : in_edges()) {
if (edge->IsControlEdge()) continue;
if (edge->dst_input() < 0 || edge->dst_input() >= num_inputs()) {
return errors::Internal("Invalid edge input number ", edge->dst_input());
}
if ((*input_edges)[edge->dst_input()] != nullptr) {
return errors::Internal("Duplicate edge input number: ",
edge->dst_input());
}
(*input_edges)[edge->dst_input()] = edge;
}
for (int i = 0; i < num_inputs(); ++i) {
if ((*input_edges)[i] == nullptr) {
return errors::InvalidArgument("Missing edge input number: ", i);
}
}
return absl::OkStatus();
}
Status Node::input_node(int idx, Node** n) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
if (e == nullptr) {
*n = nullptr;
} else {
*n = e->src();
}
return absl::OkStatus();
}
Status Node::input_node(int idx, const Node** const_n) const {
Node* n;
TF_RETURN_IF_ERROR(input_node(idx, &n));
*const_n = n;
return absl::OkStatus();
}
Status Node::input_tensor(int idx, OutputTensor* t) const {
const Edge* e;
TF_RETURN_IF_ERROR(input_edge(idx, &e));
DCHECK(e != nullptr);
*t = OutputTensor(e->src(), e->src_output());
return absl::OkStatus();
}
NodeDebugInfo::NodeDebugInfo(const Node& n) : NodeDebugInfo(n.def()) {}
NodeDebugInfo::NodeDebugInfo(const NodeDef& ndef)
: NodeDebugInfo(ndef.name(), ndef.has_experimental_debug_info(),
ndef.experimental_debug_info()) {}
NodeDebugInfo::NodeDebugInfo(
StringPiece node_name, bool has_experimental_debug_info,
const NodeDef_ExperimentalDebugInfo& experimental_debug_info)
: name(node_name) {
if (has_experimental_debug_info) {
const auto& node_names = experimental_debug_info.original_node_names();
original_node_names.assign(node_names.begin(), node_names.end());
const auto& func_names = experimental_debug_info.original_func_names();
original_func_names.assign(func_names.begin(), func_names.end());
}
}
bool InputTensor::operator==(const InputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 InputTensor::Hash::operator()(InputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
bool OutputTensor::operator==(const OutputTensor& other) const {
return node == other.node && index == other.index;
}
uint64 OutputTensor::Hash::operator()(OutputTensor const& s) const {
return Hash64Combine(std::hash<const Node*>()(s.node),
std::hash<int>()(s.index));
}
Graph::Graph(const OpRegistryInterface* ops)
: ops_(ops, FunctionDefLibrary()),
versions_(new VersionDef),
arena_(8 << 10 ) {
versions_->set_producer(TF_GRAPH_DEF_VERSION);
versions_->set_min_consumer(TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
device_names_.push_back("");
DCHECK_EQ(0, InternDeviceName(""));
NodeDef def;
def.set_name("_SOURCE");
def.set_op("NoOp");
Status status;
Node* source = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(source->id(), kSourceId);
def.set_name("_SINK");
Node* sink = AddNode(def, &status);
TF_CHECK_OK(status);
CHECK_EQ(sink->id(), kSinkId);
AddControlEdge(source, sink);
}
Graph::Graph(const FunctionLibraryDefinition& flib_def)
: Graph(flib_def.default_registry()) {
if (flib_def.num_functions() > 0 && versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
Status s = ops_.AddLibrary(flib_def);
CHECK(s.ok()) << s.message();
}
Graph::~Graph() {
for (Node* node : nodes_) {
if (node != nullptr) {
node->~Node();
}
}
for (Node* node : free_nodes_) {
node->~Node();
}
}
std::unique_ptr<Graph> Graph::Clone() {
std::unique_ptr<Graph> new_graph(new Graph(flib_def()));
new_graph->Copy(*this);
return new_graph;
}
void Graph::Clear() {
for (Node* n : nodes()) {
if (!n->IsSource() && !n->IsSink()) RemoveNode(n);
}
}
const VersionDef& Graph::versions() const { return *versions_; }
void Graph::set_versions(const VersionDef& versions) { *versions_ = versions; }
void Graph::Copy(const Graph& src) {
SetConstructionContext(src.GetConstructionContextInternal());
for (Node* n : nodes()) {
CHECK(n->IsSource() || n->IsSink()) << "*dest must be empty";
}
set_versions(src.versions());
gtl::FlatMap<const Node*, Node*> node_map;
node_map.reserve(src.num_nodes());
node_map[src.source_node()] = source_node();
node_map[src.sink_node()] = sink_node();
for (Node* n : src.op_nodes()) {
auto copy = CopyNode(n);
copy->in_edges_.reserve(n->in_edges().size());
copy->out_edges_.reserve(n->out_edges().size());
node_map[n] = copy;
}
edges_.reserve(src.num_edges());
for (const Edge* e : src.edges()) {
Node* src_copy = node_map[e->src()];
Node* dst_copy = node_map[e->dst()];
AddEdge(src_copy, e->src_output(), dst_copy, e->dst_input());
}
}
absl::StatusOr<Node*> Graph::AddNode(NodeDef node_def) {
Status s;
Node* out = AddNode(std::move(node_def), &s);
TF_RETURN_IF_ERROR(s);
return out;
}
Node* Graph::AddNode(NodeDef node_def, Status* status) {
const OpRegistrationData* op_reg_data;
status->Update(ops_.LookUp(node_def.op(), &op_reg_data));
if (!status->ok()) return nullptr;
DataTypeVector inputs;
DataTypeVector outputs;
status->Update(
InOutTypesForNode(node_def, op_reg_data->op_def, &inputs, &outputs));
if (!status->ok()) {
*status = AttachDef(*status, node_def);
return nullptr;
}
Node::NodeClass node_class = op_reg_data->is_function_op
? Node::NC_FUNCTION_OP
: Node::GetNodeClassForOp(node_def.op());
if (node_def.has_experimental_type()) {
VLOG(3) << "AddNode: node has type set, skipping type constructor "
<< node_def.name();
} else {
if (op_reg_data->type_ctor != nullptr) {
VLOG(3) << "AddNode: found type constructor for " << node_def.name();
Status s =
full_type::SpecializeType(AttrSlice(node_def), op_reg_data->op_def,
*(node_def.mutable_experimental_type()));
if (!s.ok()) {
*status = errors::InvalidArgument("type error: ", s.ToString());
VLOG(3) << "AddNode: type inference failed for " << node_def.name()
<< ": " << s;
return nullptr;
}
} else {
VLOG(3) << "AddNode: no type constructor for " << node_def.name();
}
}
Node* node = AllocateNode(
std::make_shared<NodeProperties>(&op_reg_data->op_def,
std::move(node_def), inputs, outputs),
nullptr, node_class);
return node;
}
Node* Graph::CopyNode(const Node* node) {
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
Node* copy = AllocateNode(node->props_, node, node->class_);
copy->set_assigned_device_name(node->assigned_device_name());
const OpDef* op_def;
TF_CHECK_OK(ops_.LookUpOpDef(node->type_string(), &op_def));
if (op_def != node->props_->op_def) {
copy->MaybeCopyOnWrite();
copy->props_->op_def = op_def;
}
copy->SetStackTrace(node->GetStackTrace());
return copy;
}
void Graph::RemoveNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
DCHECK(!node->IsSource());
DCHECK(!node->IsSink());
for (const Edge* e : node->in_edges_) {
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
node->in_edges_.clear();
for (const Edge* e : node->out_edges_) {
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
node->out_edges_.clear();
ReleaseNode(node);
}
const Edge* Graph::AddEdge(Node* source, int x, Node* dest, int y) {
TF_DCHECK_OK(IsValidNode(source)) << source->DebugString();
TF_DCHECK_OK(IsValidNode(dest)) << dest->DebugString();
if (source == source_node() || dest == sink_node() || x == kControlSlot ||
y == kControlSlot) {
DCHECK_EQ(x, kControlSlot) << source->DebugString();
DCHECK_EQ(y, kControlSlot) << dest->DebugString();
}
Edge* e = nullptr;
if (free_edges_.empty()) {
e = new (arena_.Alloc(sizeof(Edge))) Edge;
} else {
e = free_edges_.back();
free_edges_.pop_back();
}
e->id_ = edges_.size();
e->src_ = source;
e->dst_ = dest;
e->src_output_ = x;
e->dst_input_ = y;
CHECK(source->out_edges_.insert(e).second);
CHECK(dest->in_edges_.insert(e).second);
edges_.push_back(e);
++num_edges_;
return e;
}
void Graph::RemoveEdge(const Edge* e) {
TF_DCHECK_OK(IsValidNode(e->src_)) << e->src_->DebugString();
TF_DCHECK_OK(IsValidNode(e->dst_)) << e->dst_->DebugString();
CHECK_EQ(e->src_->out_edges_.erase(e), size_t{1});
CHECK_EQ(e->dst_->in_edges_.erase(e), size_t{1});
CHECK_EQ(e, edges_[e->id_]);
CHECK_GT(num_edges_, 0);
edges_[e->id_] = nullptr;
RecycleEdge(e);
--num_edges_;
}
void Graph::RecycleEdge(const Edge* e) {
free_edges_.push_back(const_cast<Edge*>(e));
}
const Edge* Graph::AddControlEdge(Node* source, Node* dest,
bool allow_duplicates) {
if (!allow_duplicates) {
for (const Edge* edge : dest->in_edges()) {
if (edge->IsControlEdge() && edge->src() == source) {
return nullptr;
}
}
}
if (!source->IsSource() && !dest->IsSink() && !allow_duplicates) {
const std::string new_input = strings::StrCat("^", source->name());
bool input_exists = false;
for (const std::string& input : dest->props_->node_def.input()) {
if (input == new_input) {
input_exists = true;
break;
}
}
if (!input_exists) {
dest->MaybeCopyOnWrite();
dest->props_->node_def.add_input(new_input);
}
}
return AddEdge(source, kControlSlot, dest, kControlSlot);
}
void Graph::RemoveControlEdge(const Edge* e) {
if (!e->src_->IsSource() && !e->dst_->IsSink()) {
e->dst_->MaybeCopyOnWrite();
std::string e_src_name = strings::StrCat("^", e->src_->name());
auto* inputs = e->dst_->props_->node_def.mutable_input();
for (auto it = inputs->begin(); it != inputs->end(); ++it) {
if (*it == e_src_name) {
inputs->erase(it);
break;
}
}
}
RemoveEdge(e);
}
namespace {
const Edge* FindEdge(const Node* dst, int index) {
for (const Edge* e : dst->in_edges()) {
if (e->dst_input() == index) return e;
}
return nullptr;
}
}
Status Graph::UpdateEdge(Node* new_src, int new_src_index, Node* dst,
int dst_index) {
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
const Edge* e = FindEdge(dst, dst_index);
if (e == nullptr) {
return errors::InvalidArgument("Couldn't find edge to ",
FormatNodeForError(*dst));
}
RemoveEdge(e);
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
(*dst->props_->node_def.mutable_input())[dst_index] =
strings::StrCat(new_src->name(), ":", new_src_index);
return absl::OkStatus();
}
void Graph::AddInput(NodeDef* dst, StringPiece src_name, int src_slot) {
if (src_slot == Graph::kControlSlot) {
dst->add_input(strings::StrCat("^", src_name));
} else if (src_slot == 0) {
dst->add_input(src_name.data(), src_name.size());
} else {
dst->add_input(strings::StrCat(src_name, ":", src_slot));
}
}
Status Graph::AddWhileInputHack(Node* new_src, int new_src_index, Node* dst) {
if (!dst->IsWhileNode()) {
return errors::Internal(
"dst argument to AddWhileEdgeHack should be a While op, got: ",
dst->DebugString());
}
TF_RETURN_IF_ERROR(IsValidOutputTensor(new_src, new_src_index));
int dst_index = 0;
for (const Edge* edge : dst->in_edges()) {
if (edge->IsControlEdge()) continue;
++dst_index;
}
TF_RETURN_IF_ERROR(IsValidInputTensor(dst, dst_index));
AddEdge(new_src, new_src_index, dst, dst_index);
dst->MaybeCopyOnWrite();
dst->props_->node_def.add_input(
strings::StrCat(new_src->name(), ":", new_src_index));
return absl::OkStatus();
}
Status Graph::AddFunctionLibrary(
const FunctionDefLibrary& fdef_lib,
const FunctionDefLibraryStackTraces& library_traces) {
return AddFunctionLibrary(FunctionDefLibrary(fdef_lib), library_traces);
}
Status Graph::AddFunctionLibrary(
FunctionDefLibrary&& fdef_lib,
const FunctionDefLibraryStackTraces& library_traces) {
if (fdef_lib.function_size() > 0 && versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddLibrary(std::move(fdef_lib), library_traces);
}
Status Graph::AddFunctionLibrary(const FunctionDefLibrary& fdef_lib) {
return AddFunctionLibrary(fdef_lib, {});
}
Status Graph::AddFunctionLibrary(FunctionDefLibrary&& fdef_lib) {
return AddFunctionLibrary(std::move(fdef_lib), {});
}
Status Graph::AddFunctionDef(const FunctionDef& fdef,
const StackTracesMap& stack_traces) {
if (versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddFunctionDef(fdef, stack_traces);
}
Status Graph::AddGradientDef(const GradientDef& gdef) {
if (versions_->min_consumer() < 12) {
versions_->set_min_consumer(12);
}
return ops_.AddGradientDef(gdef);
}
void Graph::ToGraphDef(GraphDef* graph_def, bool include_flib_def,
bool include_debug_info) const {
ToGraphDefSubRange(graph_def, 0, include_flib_def,
include_debug_info);
}
GraphDef Graph::ToGraphDefDebug() const {
GraphDef ret;
ToGraphDef(&ret);
return ret;
}
void Graph::ToGraphDefSubRange(GraphDef* graph_def, int from_node_id,
bool include_flib_def,
bool include_debug_info) const {
graph_def->Clear();
*graph_def->mutable_versions() = versions();
if (include_flib_def) {
*graph_def->mutable_library() = ops_.ToProto();
}
if (include_debug_info) {
*graph_def->mutable_debug_info() = BuildDebugInfo();
}
graph_def->mutable_node()->Reserve(std::max(1, num_nodes() - from_node_id));
std::vector<const Edge*>
inputs;
for (auto id = from_node_id; id < num_node_ids(); ++id) {
const Node* node = FindNodeId(id);
if (node == nullptr || !node->IsOp()) continue;
NodeDef* node_def = graph_def->add_node();
*node_def = node->def();
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
inputs.clear();
inputs.resize(node->num_inputs(), nullptr);
for (const Edge* edge : node->in_edges()) {
if (edge->IsControlEdge()) {
inputs.push_back(edge);
} else {
DCHECK(edge->dst_input() < inputs.size())
<< "Edge " << edge->DebugString()
<< " is overflowing the expected number of inputs ("
<< node->num_inputs() << ") for node " << node->DebugString();
CHECK(inputs[edge->dst_input()] == nullptr)
<< "Edge " << edge->src()->name() << "->" << edge->dst()->name()
<< " conflicts with pre-existing input edge "
<< inputs[edge->dst_input()]->src()->name() << "->"
<< inputs[edge->dst_input()]->dst()->name();
inputs[edge->dst_input()] = edge;
}
}
std::sort(inputs.begin() + node->num_inputs(), inputs.end(),
[](const Edge* a, const Edge* b) -> bool {
return a->src()->name() < b->src()->name();
});
node_def->clear_input();
node_def->mutable_input()->Reserve(inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
const Edge* edge = inputs[i];
if (edge == nullptr) {
if (i < node->requested_inputs().size()) {
node_def->add_input(node->requested_inputs()[i]);
} else {
node_def->add_input("");
}
} else {
const Node* src = edge->src();
if (!src->IsOp()) continue;
AddInput(node_def, src->name(), edge->src_output());
}
}
}
}
std::string Graph::NewName(StringPiece prefix) {
return strings::StrCat(prefix, "/_", name_counter_++);
}
Status Graph::IsValidNode(const Node* node) const {
if (node == nullptr) {
return errors::InvalidArgument("Node is null");
}
const int id = node->id();
if (id < 0) {
return errors::InvalidArgument("node id ", id, " is less than zero");
}
if (static_cast<size_t>(id) >= nodes_.size()) {
return errors::InvalidArgument(
"node id ", id, " is >= than number of nodes in graph ", nodes_.size());
}
if (nodes_[id] != node) {
return errors::InvalidArgument("Node with id ", id,
" is different from the passed in node. "
"Does it belong to a different graph?");
}
return absl::OkStatus();
}
Status Graph::IsValidOutputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_outputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of outputs: ", node->num_outputs(),
") does not have ", "output ", idx);
}
return absl::OkStatus();
}
Status Graph::IsValidInputTensor(const Node* node, int idx) const {
TF_RETURN_IF_ERROR(IsValidNode(node));
if (idx >= node->num_inputs() || idx < 0) {
return errors::OutOfRange("Node '", node->name(), "' (type: '",
node->op_def().name(),
"', num of inputs: ", node->num_inputs(),
") does not have ", "input ", idx);
}
return absl::OkStatus();
}
Node* Graph::AllocateNode(std::shared_ptr<NodeProperties> props,
const Node* cost_node, Node::NodeClass node_class) {
Node* node = nullptr;
if (free_nodes_.empty()) {
node = new (arena_.Alloc(sizeof(Node))) Node;
} else {
node = free_nodes_.back();
free_nodes_.pop_back();
}
node->graph_ = this;
const int id = nodes_.size();
int cost_id = cost_node ? cost_node->cost_id() : id;
node->Initialize(id, cost_id, std::move(props), node_class);
nodes_.push_back(node);
++num_nodes_;
return node;
}
void Graph::ReleaseNode(Node* node) {
TF_DCHECK_OK(IsValidNode(node)) << node->DebugString();
nodes_[node->id()] = nullptr;
free_nodes_.push_back(node);
--num_nodes_;
node->Clear();
}
int Graph::InternDeviceName(const std::string& device_name) {
if (device_name.empty()) {
return 0;
}
int& index_cell = device_names_map_[device_name];
if (index_cell > 0) {
return index_cell;
}
const int index = device_names_map_.size();
index_cell = index;
device_names_.push_back(device_name);
return index;
}
Status Graph::AddWhileContext(StringPiece frame_name,
std::vector<Node*> enter_nodes,
std::vector<Node*> exit_nodes,
OutputTensor cond_output,
std::vector<OutputTensor> body_inputs,
std::vector<OutputTensor> body_outputs,
WhileContext** result) {
auto pair = while_ctxs_.insert(std::pair<std::string, WhileContext>(
std::string(frame_name),
WhileContext(frame_name, std::move(enter_nodes), std::move(exit_nodes),
cond_output, std::move(body_inputs),
std::move(body_outputs))));
if (!pair.second) {
*result = nullptr;
return errors::InvalidArgument("WhileContext with frame name '", frame_name,
"' already exists");
}
*result = &pair.first->second;
return absl::OkStatus();
}
std::unordered_map<std::string, Node*> Graph::BuildNodeNameIndex() const {
std::unordered_map<std::string, Node*> result;
for (Node* n : nodes()) {
result[n->name()] = n;
}
return result;
}
void Graph::SetNodeType(StringPiece name, const FullTypeDef& ft) {
for (Node* n : op_nodes()) {
if (n->name() == name) {
NodeDef& node_def = n->props_->node_def;
n->MaybeCopyOnWrite();
*(node_def.mutable_experimental_type()) = ft;
break;
}
}
}
void Graph::NodeType(StringPiece name, const FullTypeDef** result) {
*result = nullptr;
for (Node* n : op_nodes()) {
if (n->name() == name) {
NodeDef& node_def = n->props_->node_def;
*result = &node_def.experimental_type();
break;
}
}
}
GraphDebugInfo Graph::BuildDebugInfo() const {
GraphDebugInfoBuilder builder;
for (const std::string& function_name : flib_def().ListFunctionNames()) {
if (core::RefCountPtr<FunctionRecord> function_record =
flib_def().FindRecord(function_name)) {
builder.AccumulateStackTracesMap(function_record->stack_traces(),
absl::StrCat("@", function_name));
}
}
for (const Node* node : nodes()) {
if (node == nullptr || !node->IsOp()) {
continue;
}
const std::shared_ptr<AbstractStackTrace>& stack_trace =
node->GetStackTrace();
if (stack_trace != nullptr) {
builder.AccumulateStackTrace(stack_trace, node->name());
}
}
return builder.Build();
}
std::string Edge::DebugString() const {
auto src_name = src_ ? src_->name().c_str() : "<NULL>";
auto dst_name = dst_ ? dst_->name().c_str() : "<NULL>";
return strings::Printf("[id=%d %s:%d -> %s:%d]", id_, src_name, src_output_,
dst_name, dst_input_);
}
} | #include "tensorflow/core/graph/graph.h"
#include <memory>
#include <set>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
using ::testing::UnorderedElementsAre;
REGISTER_OP("OneInput").Input("x: float");
REGISTER_OP("OneOutput").Output("y: float");
REGISTER_OP("OneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float");
REGISTER_OP("TwoInputsOneOutput")
.Input("x: float")
.Input("y: float")
.Output("z: float");
class GraphTest : public ::testing::Test {
protected:
GraphTest() : graph_(OpRegistry::Global()) {}
~GraphTest() override {}
static void VerifyNodes(Node* node, const std::vector<Node*>& expected_in,
const std::vector<Node*>& expected_out) {
std::vector<Node*> in;
for (const Edge* e : node->in_edges()) {
in.push_back(e->src());
}
EXPECT_EQ(Stringify(expected_in), Stringify(in));
std::vector<Node*> out;
for (const Edge* e : node->out_edges()) {
out.push_back(e->dst());
}
EXPECT_EQ(Stringify(expected_out), Stringify(out));
}
std::unique_ptr<Edge> BuildEdge(int id = 0, Node* src = nullptr,
Node* dst = nullptr, int x = 0, int y = 0) {
Edge* e = new Edge;
e->id_ = id;
e->src_ = src;
e->dst_ = dst;
e->src_output_ = x;
e->dst_input_ = y;
return absl::WrapUnique(e);
}
void VerifyGraphStats() {
int nodes = 0;
for (const Node* n : graph_.nodes()) {
VLOG(1) << n->id();
++nodes;
}
EXPECT_EQ(nodes, graph_.num_nodes());
int edges = 0;
for (const Edge* e : graph_.edges()) {
VLOG(1) << e->id();
++edges;
}
EXPECT_EQ(edges, graph_.num_edges());
}
Node* AddNodeWithName(const string& name) {
Node* node;
TF_CHECK_OK(NodeBuilder(name, "NoOp").Finalize(&graph_, &node));
return node;
}
Node* FromNodeDef(const string& name, const string& node_type,
int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
void FromGraphDef(const string& gdef_ascii) {
GraphDef gdef;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef));
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef, &graph_));
}
Node* FindNode(const string& name) {
for (Node* node : graph_.nodes()) {
if (node->name() == name) return node;
}
LOG(FATAL) << name;
}
bool ControlEdgeExistsInGraphOrNodeDef(const Node* src, const Node* dst) {
for (const Edge* e : dst->in_edges()) {
if (e->IsControlEdge() && e->src() == src &&
e->src_output() == Graph::kControlSlot &&
e->dst_input() == Graph::kControlSlot) {
return true;
}
}
std::string control_edge_name = strings::StrCat("^", src->name());
for (int i = 0; i < dst->def().input_size(); ++i) {
if (dst->def().input(i) == control_edge_name) {
return true;
}
}
return false;
}
Graph graph_;
private:
static std::vector<string> Stringify(const std::vector<Node*>& nodes) {
std::vector<string> result;
result.reserve(nodes.size());
for (Node* n : nodes) {
result.push_back(n->DebugString());
}
std::sort(result.begin(), result.end());
return result;
}
};
namespace {
TEST_F(GraphTest, Constructor) {
Node* source = graph_.source_node();
EXPECT_NE(source, nullptr);
Node* sink = graph_.sink_node();
EXPECT_NE(sink, nullptr);
VerifyNodes(source, {}, {sink});
VerifyNodes(sink, {source}, {});
EXPECT_EQ(2, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, RemoveThenAdd) {
AddNodeWithName("A");
Node* b = AddNodeWithName("B");
const int b_id = b->id();
AddNodeWithName("C");
EXPECT_EQ(5, graph_.num_node_ids());
graph_.RemoveNode(b);
EXPECT_EQ(5, graph_.num_node_ids());
Node* d = AddNodeWithName("D");
EXPECT_NE(b_id, d->id());
EXPECT_EQ(6, graph_.num_node_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, InNodesAndOutNodes) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
EXPECT_EQ("A", a->name());
VerifyNodes(a, {graph_.source_node()}, {c, graph_.sink_node()});
EXPECT_EQ("C", c->name());
VerifyNodes(c, {a}, {graph_.sink_node()});
EXPECT_EQ("D", d->name());
VerifyNodes(d, {}, {});
VerifyNodes(graph_.source_node(), {}, {a, graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, c, graph_.source_node()}, {});
graph_.RemoveEdge(source_to_a);
VerifyNodes(a, {}, {c, graph_.sink_node()});
VerifyNodes(graph_.source_node(), {}, {graph_.sink_node()});
graph_.RemoveNode(c);
VerifyNodes(a, {}, {graph_.sink_node()});
VerifyNodes(graph_.sink_node(), {a, graph_.source_node()}, {});
EXPECT_EQ(6, graph_.num_node_ids());
EXPECT_EQ(5, graph_.num_edge_ids());
VerifyGraphStats();
}
TEST_F(GraphTest, NodeByIndex) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.AddEdge(a, 0, c, 0);
const Node* a_copy;
TF_ASSERT_OK(c->input_node(0, &a_copy));
EXPECT_EQ(a, a_copy);
const Edge* e;
TF_ASSERT_OK(c->input_edge(0, &e));
EXPECT_EQ(0, e->dst_input());
EXPECT_EQ(a, e->src());
EXPECT_EQ(c, e->dst());
EXPECT_EQ(0, e->src_output());
Node* t = FromNodeDef("T", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, t, 0);
graph_.AddEdge(t, 0, t, 1);
const Node* t_0;
const Node* t_1;
TF_ASSERT_OK(t->input_node(0, &t_0));
EXPECT_EQ(a, t_0);
TF_ASSERT_OK(t->input_node(1, &t_1));
EXPECT_EQ(t, t_1);
TF_ASSERT_OK(t->input_edge(1, &e));
EXPECT_EQ(1, e->dst_input());
EXPECT_EQ(t, e->src());
std::vector<const Edge*> t_input_edges;
TF_ASSERT_OK(t->input_edges(&t_input_edges));
ASSERT_EQ(2, t_input_edges.size());
EXPECT_EQ(a, t_input_edges[0]->src());
EXPECT_EQ(e, t_input_edges[1]);
EXPECT_FALSE(c->input_node(1, &a_copy).ok());
EXPECT_FALSE(c->input_node(-1, &a_copy).ok());
graph_.RemoveNode(a);
Status s = c->input_node(0, &a_copy);
EXPECT_FALSE(s.ok());
Node* a_new = FromNodeDef("A_new", "OneOutput", 0);
Node* b_new = FromNodeDef("B_new", "OneOutput", 0);
graph_.AddEdge(a_new, 0, c, 0);
const Edge* a_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &a_new_c_edge));
graph_.AddEdge(b_new, 0, c, 0);
const Edge* b_new_c_edge;
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
graph_.RemoveEdge(a_new_c_edge);
TF_ASSERT_OK(c->input_edge(0, &b_new_c_edge));
std::vector<const Edge*> c_input_edges;
TF_ASSERT_OK(c->input_edges(&c_input_edges));
ASSERT_EQ(1, c_input_edges.size());
EXPECT_EQ(b_new_c_edge, c_input_edges[0]);
}
TEST_F(GraphTest, NodeIteration) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = AddNodeWithName("B");
Node* c = FromNodeDef("C", "OneInput", 1);
graph_.RemoveNode(b);
Node* d = AddNodeWithName("D");
const Edge* source_to_a = graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.RemoveEdge(source_to_a);
graph_.RemoveNode(c);
std::set<string> expected;
expected.insert(graph_.source_node()->DebugString());
expected.insert(a->DebugString());
expected.insert(d->DebugString());
expected.insert(graph_.sink_node()->DebugString());
std::set<string> actual;
for (int id = 0; id < graph_.num_node_ids(); ++id) {
Node* node = graph_.FindNodeId(id);
if (node != nullptr) {
actual.insert(node->DebugString());
}
}
EXPECT_EQ(expected, actual);
actual.clear();
for (Node* node : graph_.nodes()) {
actual.insert(node->DebugString());
}
EXPECT_EQ(expected, actual);
VerifyGraphStats();
}
static void CheckType(Node* node, bool b) {
EXPECT_TRUE(b) << node->DebugString();
int count = 0;
if (node->IsSource()) count++;
if (node->IsSink()) count++;
if (node->IsOp()) count++;
EXPECT_EQ(1, count) << node->DebugString();
}
TEST_F(GraphTest, Type) {
Node* op = AddNodeWithName("A");
CheckType(graph_.source_node(), graph_.source_node()->IsSource());
CheckType(graph_.sink_node(), graph_.sink_node()->IsSink());
CheckType(op, op->IsOp());
VerifyGraphStats();
}
TEST_F(GraphTest, AddAttr) {
Node* n1 = AddNodeWithName("A");
n1->AddAttr("_a", "new_attr");
string attr;
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
Node* n2 = graph_.CopyNode(n1);
n1->AddAttr("_b", "new_attr_2");
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n1->attrs(), "_b", &attr));
EXPECT_EQ("new_attr_2", attr);
EXPECT_EQ(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_a", &attr));
EXPECT_EQ("new_attr", attr);
EXPECT_NE(absl::OkStatus(), GetNodeAttr(n2->attrs(), "_b", &attr));
}
static string EdgeIter(const Graph& g) {
std::vector<std::pair<int, int> > edges;
for (const Edge* e : g.edges()) {
edges.push_back(std::make_pair(e->src()->id(), e->dst()->id()));
}
std::sort(edges.begin(), edges.end());
string result;
for (auto& p : edges) {
strings::StrAppend(&result, p.first, "->", p.second, ";");
}
return result;
}
TEST_F(GraphTest, EdgeIteration) {
EXPECT_EQ("0->1;", EdgeIter(graph_));
Node* a = FromNodeDef("A", "OneInputTwoOutputs", 1);
Node* b = FromNodeDef("B", "OneInput", 1);
EXPECT_EQ("0->1;", EdgeIter(graph_));
graph_.AddEdge(a, 0, b, 0);
EXPECT_EQ("0->1;2->3;", EdgeIter(graph_));
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(b, graph_.sink_node());
EXPECT_EQ("0->1;0->2;2->3;3->1;", EdgeIter(graph_));
graph_.AddEdge(a, 1, a, 0);
EXPECT_EQ("0->1;0->2;2->2;2->3;3->1;", EdgeIter(graph_));
VerifyGraphStats();
}
TEST_F(GraphTest, NewName) {
string a1 = graph_.NewName("A");
string a2 = graph_.NewName("A");
string b1 = graph_.NewName("B");
EXPECT_NE(a1, a2);
EXPECT_NE(a1, b1);
EXPECT_NE(a2, b1);
EXPECT_TRUE(absl::StartsWith(a1, "A")) << a1;
}
TEST_F(GraphTest, IsValidNode) {
Node* g1_node1;
TF_CHECK_OK(NodeBuilder("g1_node1", "NoOp").Finalize(&graph_, &g1_node1));
Graph graph2(OpRegistry::Global());
Node* g2_node1;
Node* g2_node2;
TF_CHECK_OK(NodeBuilder("g2_node1", "NoOp").Finalize(&graph2, &g2_node1));
TF_CHECK_OK(NodeBuilder("g2_node2", "NoOp").Finalize(&graph2, &g2_node2));
Status s = graph_.IsValidNode(nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node is null"), s.message());
s = graph_.IsValidNode(g2_node2);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("node id 3 is >= than number of nodes in graph 3"),
s.message());
s = graph_.IsValidNode(g2_node1);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_EQ(string("Node with id 2 is different from the passed in node. "
"Does it belong to a different graph?"),
s.message());
}
TEST_F(GraphTest, AddControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge != nullptr);
EXPECT_EQ(edge->src(), c);
EXPECT_EQ(edge->src_output(), Graph::kControlSlot);
EXPECT_EQ(edge->dst(), a);
EXPECT_EQ(edge->dst_input(), Graph::kControlSlot);
ASSERT_EQ(a->def().input_size(), 1);
EXPECT_EQ(a->def().input(0), "^C");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(a, b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(a, b, true);
EXPECT_TRUE(edge != nullptr);
ASSERT_EQ(b->def().input_size(), 2);
EXPECT_EQ(b->def().input(0), "A:0");
EXPECT_EQ(b->def().input(1), "^A");
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge != nullptr);
EXPECT_EQ(b->def().input_size(), 2);
edge = graph_.AddControlEdge(graph_.source_node(), b);
EXPECT_TRUE(edge == nullptr);
EXPECT_EQ(b->def().input_size(), 2);
}
TEST_F(GraphTest, RemoveControlEdge) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
Node* a = FindNode("A");
Node* b = FindNode("B");
Node* c = FindNode("C");
const Edge* edge_1 = graph_.AddControlEdge(c, a);
const Edge* edge_2 = graph_.AddControlEdge(a, b);
ASSERT_TRUE(edge_1 != nullptr);
ASSERT_TRUE(edge_2 != nullptr);
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_1);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(ControlEdgeExistsInGraphOrNodeDef(a, b));
graph_.RemoveControlEdge(edge_2);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(a, b));
const Edge* edge_3 = graph_.AddControlEdge(c, a);
const Edge* edge_4 = graph_.AddControlEdge(c, a);
ASSERT_TRUE(edge_3 != nullptr);
ASSERT_TRUE(edge_4 == nullptr);
graph_.RemoveControlEdge(edge_3);
ASSERT_TRUE(!ControlEdgeExistsInGraphOrNodeDef(c, a));
}
TEST_F(GraphTest, UpdateEdge) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInputTwoOutputs", 1);
Node* c = FromNodeDef("C", "OneInputTwoOutputs", 1);
Node* d = FromNodeDef("D", "OneInput", 1);
graph_.AddControlEdge(graph_.source_node(), a);
graph_.AddControlEdge(a, graph_.sink_node());
graph_.AddEdge(a, 0, c, 0);
graph_.AddControlEdge(c, graph_.sink_node());
graph_.AddEdge(c, 0, b, 0);
graph_.AddEdge(c, 1, d, 0);
EXPECT_EQ("0->1;0->2;2->1;2->4;4->1;4->3;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, b, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;4->1;4->5;", EdgeIter(graph_));
TF_EXPECT_OK(graph_.UpdateEdge(a, 0, d, 0));
EXPECT_EQ("0->1;0->2;2->1;2->3;2->4;2->5;4->1;", EdgeIter(graph_));
Status s = graph_.UpdateEdge(a, 1, d, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of outputs: 1) does not have output 1");
s = graph_.UpdateEdge(c, 0, a, 0);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
"Node 'A' (type: 'OneOutput', num of inputs: 0) does not have input 0");
}
TEST_F(GraphTest, InputEdges) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "TwoInputsOneOutput", 2);
graph_.AddEdge(a, 0, b, 0);
std::vector<const Edge*> edges;
EXPECT_EQ(error::INVALID_ARGUMENT, b->input_edges(&edges).code());
graph_.AddEdge(a, 0, b, 1);
TF_EXPECT_OK(b->input_edges(&edges));
}
TEST_F(GraphTest, EdgeDebugString) {
Node* a = FromNodeDef("A", "OneOutput", 0);
Node* b = FromNodeDef("B", "OneInput", 1);
auto e = graph_.AddEdge(a, 0, b, 0);
auto s = e->DebugString();
EXPECT_EQ(s, "[id=1 A:0 -> B:0]");
auto e1 = BuildEdge();
auto s1 = e1->DebugString();
EXPECT_EQ(s1, "[id=0 <NULL>:0 -> <NULL>:0]");
auto e2 = BuildEdge(2, nullptr, b, 1, 1);
auto s2 = e2->DebugString();
EXPECT_EQ(s2, "[id=2 <NULL>:1 -> B:1]");
auto e3 = BuildEdge(3, a, nullptr, 2, 1);
auto s3 = e3->DebugString();
EXPECT_EQ(s3, "[id=3 A:2 -> <NULL>:1]");
}
TEST_F(GraphTest, AddFunctionLibrary) {
FunctionDefLibrary proto;
*proto.add_function() = test::function::XTimesTwo();
*proto.add_function() = test::function::XTimesFour();
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_TRUE(graph_.flib_def().Find("XTimesTwo") != nullptr);
EXPECT_TRUE(graph_.flib_def().Find("XTimesFour") != nullptr);
FunctionDefLibrary error_proto = proto;
*error_proto.mutable_function(0)->add_node_def() =
error_proto.function(0).node_def(0);
Status s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'XTimesTwo' because a different function with "
"the same name already exists.");
error_proto = proto;
error_proto.mutable_function(0)->mutable_signature()->set_name("Add");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot add function 'Add' because an op with the same name "
"already exists.");
GradientDef* grad = proto.add_gradient();
grad->set_function_name("XTimesTwo");
grad->set_gradient_func("Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
TF_EXPECT_OK(graph_.AddFunctionLibrary(proto));
EXPECT_EQ(graph_.flib_def().FindGradient("XTimesTwo"), "Undefined");
error_proto = proto;
error_proto.mutable_gradient(0)->set_gradient_func("Undefined2");
s = graph_.AddFunctionLibrary(error_proto);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Cannot assign gradient function 'Undefined2' to 'XTimesTwo' "
"because it already has gradient function 'Undefined'");
}
TEST_F(GraphTest, BuildNodeNameIndex) {
FromGraphDef(
"node { name: 'A' op: 'OneOutput' }"
"node { name: 'B' op: 'OneInputTwoOutputs' input: [ 'A:0' ] }"
"node { name: 'C' op: 'NoOp' } ");
auto node_name_index = graph_.BuildNodeNameIndex();
EXPECT_EQ(node_name_index.size(), 5);
std::vector<string> node_names{"_SOURCE", "_SINK", "A", "B", "C"};
for (const string& node_name : node_names) {
EXPECT_NE(node_name_index.find(node_name), node_name_index.end());
EXPECT_EQ(node_name_index[node_name], FindNode(node_name));
}
}
TEST_F(GraphTest, Clear) {
const int num_nodes = 10;
const int num_edges_per_node = 2;
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
graph.Clear();
EXPECT_EQ(graph.num_nodes(), 2);
}
TEST_F(GraphTest, NodeFullType) {
FromNodeDef("A", "OneOutput", 0);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(TFT_FLOAT);
graph_.SetNodeType("A", node_t);
const FullTypeDef* ft;
graph_.NodeType("A", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
}
TEST_F(GraphTest, NodeShrinkTypeOutput) {
auto builder = NodeDefBuilder("while", "While");
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("while", node_t);
TF_CHECK_OK(
node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "T", true));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "T", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("while", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 2);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_STRING);
}
TEST_F(GraphTest, NodeShrinkTypeInput) {
auto builder = NodeDefBuilder("if", "If");
builder = builder.Input("cond", 0, DT_BOOL);
builder = builder.Input({NodeDefBuilder::NodeOut("node_0", 0, DT_FLOAT),
NodeDefBuilder::NodeOut("node_1", 0, DT_INT32),
NodeDefBuilder::NodeOut("node_2", 0, DT_INT64),
NodeDefBuilder::NodeOut("node_3", 0, DT_STRING)});
builder = builder.Attr("Tout", "[DT_FLOAT, DT_INT32, DT_INT63, DT_STRING]");
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph_.AddNode(node_def, &s);
TF_CHECK_OK(s);
FullTypeDef node_t;
node_t.set_type_id(TFT_PRODUCT);
for (FullTypeId id : {TFT_FLOAT, TFT_INT32, TFT_INT64, TFT_STRING}) {
FullTypeDef* output_t = node_t.add_args();
output_t->set_type_id(TFT_TENSOR);
output_t->add_args()->set_type_id(id);
}
graph_.SetNodeType("if", node_t);
TF_CHECK_OK(node->ShrinkTypeInfo({{1, 0}, {3, 1}}, "Tin",
false));
std::vector<DataType> new_dtypes;
TF_CHECK_OK(GetNodeAttr(node->def(), "Tin", &new_dtypes));
ASSERT_EQ(new_dtypes.size(), 2);
EXPECT_EQ(new_dtypes[0], DT_INT32);
EXPECT_EQ(new_dtypes[1], DT_STRING);
const FullTypeDef* ft;
graph_.NodeType("if", &ft);
EXPECT_NE(ft, nullptr);
EXPECT_EQ(ft->type_id(), TFT_PRODUCT);
ASSERT_EQ(ft->args_size(), 4);
ASSERT_EQ(ft->args(0).args_size(), 1);
EXPECT_EQ(ft->args(0).args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ft->args(1).args_size(), 1);
EXPECT_EQ(ft->args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft->args(2).args_size(), 1);
EXPECT_EQ(ft->args(2).args(0).type_id(), TFT_INT64);
ASSERT_EQ(ft->args(3).args_size(), 1);
EXPECT_EQ(ft->args(3).args(0).type_id(), TFT_STRING);
}
TEST(AddInput, AddsControlSlot) {
auto input_name = "input-name";
auto expected_input_name = absl::StrCat("^", input_name);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, Graph::kControlSlot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
TEST(AddInput, AddsSourceSlotZero) {
auto input_name = "input-name";
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, 0);
EXPECT_EQ(node_def.input(0), input_name);
}
TEST(AddInput, AddsOtherSlots) {
auto input_name = "input-name";
int arbitrary_slot = 37;
auto expected_input_name =
absl::StrCat(input_name, ":", arbitrary_slot);
NodeDef node_def;
tensorflow::Graph::AddInput(&node_def, input_name, arbitrary_slot);
EXPECT_EQ(node_def.input(0), expected_input_name);
}
void BM_InEdgeIteration(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
for (const Node* node : graph.nodes()) {
for (auto e : node->in_edges()) {
sum += e->id();
}
}
}
VLOG(1) << sum;
}
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 2);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 4);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 8);
BENCHMARK(BM_InEdgeIteration)->ArgPair(10, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 6, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 9, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 12, 16);
BENCHMARK(BM_InEdgeIteration)->ArgPair(1 << 15, 16);
void BM_GraphCreation(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
sum += graph.num_node_ids();
}
VLOG(1) << sum;
}
BENCHMARK(BM_GraphCreation)->ArgPair(10, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 2);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 4);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 8);
BENCHMARK(BM_GraphCreation)->ArgPair(10, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 6, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 9, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 12, 16);
BENCHMARK(BM_GraphCreation)->ArgPair(1 << 15, 16);
void BM_ToGraphDef(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
int64_t sum = 0;
for (auto s : state) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
sum += graph_def.node_size();
}
VLOG(1) << sum;
}
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 2);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 4);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 8);
BENCHMARK(BM_ToGraphDef)->ArgPair(10, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 6, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 9, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 12, 16);
BENCHMARK(BM_ToGraphDef)->ArgPair(1 << 15, 16);
void BM_RemoveNode(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
const GraphDef graph_def =
test::CreateGraphDef(num_nodes, num_edges_per_node);
const auto registry = OpRegistry::Global();
GraphConstructorOptions opts;
for (auto s : state) {
state.PauseTiming();
Graph graph(registry);
TF_CHECK_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
state.ResumeTiming();
for (Node* n : graph.op_nodes()) {
graph.RemoveNode(n);
}
}
}
BENCHMARK(BM_RemoveNode)->ArgPair(10, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 2);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 4);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 8);
BENCHMARK(BM_RemoveNode)->ArgPair(10, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 6, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 9, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 12, 16);
BENCHMARK(BM_RemoveNode)->ArgPair(1 << 15, 16);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc588779-d7fa-4d6d-84b8-e9aba3af929e | cpp | tensorflow/tensorflow | graph_partition | tensorflow/core/tfrt/utils/graph_partition.cc | tensorflow/core/tfrt/utils/graph_partition_test.cc | #include "tensorflow/core/tfrt/utils/graph_partition.h"
#include <algorithm>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
struct NodeInfo {
Node* node = nullptr;
DataType data_type;
int index = -1;
Node* node_copy = nullptr;
};
struct CallNodeInputInfo {
int index = -1;
DataType data_type;
Node* input_node = nullptr;
int input_node_index = -1;
Node* arg_node = nullptr;
Node* ret_node = nullptr;
};
struct OutputNodeInfo {
absl::flat_hash_map<std::string, NodeInfo> output_nodes;
std::optional<std::pair<std::string, NodeInfo>> auxiliary_output_node;
};
Status PrepareSubgraphForFunctionConversion(
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs, const Device* host_device,
const std::string& func_name,
absl::flat_hash_map<std::string, NodeInfo>& input_nodes,
absl::flat_hash_map<std::string, NodeInfo>& output_nodes,
std::optional<std::pair<std::string, NodeInfo>>& auxiliary_output_node,
Graph* subgraph, Graph* graph) {
std::unordered_map<std::string, Node*> name_to_node_map =
subgraph->BuildNodeNameIndex();
int input_index = 0, output_index = 0;
for (const auto& input : inputs) {
int position = -1;
std::string node_name = grappler::ParseNodeName(input, &position);
if (position != 0) {
return errors::Unimplemented(
"Support for input node with multiple output tensors is not "
"implemented.");
}
if (name_to_node_map.count(node_name) == 0) continue;
Node* node = name_to_node_map.at(node_name);
NodeInfo node_info;
node_info.node = node;
node_info.data_type = node->output_type(position);
node_info.index = input_index++;
node_info.node_copy = graph->CopyNode(node);
input_nodes.emplace(node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * arg_node,
NodeBuilder(absl::StrCat("arg_", node_info.index, "/", node->name()),
"_Arg")
.Attr("index", node_info.index)
.Attr("T", node_info.data_type)
.Finalize(subgraph));
CHECK_EQ(node->num_inputs(), 0);
std::vector<const Edge*> out_edges(node->out_edges().begin(),
node->out_edges().end());
for (const Edge* edge : out_edges) {
if (edge->IsControlEdge()) {
subgraph->AddControlEdge(arg_node, edge->dst());
} else {
TF_RETURN_IF_ERROR(
subgraph->UpdateEdge(arg_node, 0, edge->dst(), edge->dst_input()));
}
}
subgraph->RemoveNode(node);
}
for (const auto& output : outputs) {
int position = -1;
std::string node_name = grappler::ParseNodeName(output, &position);
if (position != 0) {
return errors::Unimplemented(
"Support for output node with multiple output tensors is not "
"implemented.");
}
if (name_to_node_map.count(node_name) == 0) continue;
Node* node = name_to_node_map.at(node_name);
NodeInfo node_info;
node_info.node = node;
node_info.data_type = node->output_type(position);
node_info.index = output_index++;
output_nodes.emplace(node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * ret_node,
NodeBuilder(absl::StrCat("ret_", node_info.index, "/", node->name()),
"_Retval")
.Attr("index", node_info.index)
.Attr("T", node_info.data_type)
.Input(NodeBuilder::NodeOut(node->name(), position,
node_info.data_type))
.Finalize(subgraph));
node->set_name(node->name() + "/partition_renamed");
subgraph->AddEdge(node, 0, ret_node, 0);
}
if (output_nodes.empty()) {
const DataType data_type = DT_INT32;
TensorShape const_shape;
Tensor const_tensor(data_type, const_shape);
const_tensor.flat<int>()(0) = 0;
TF_ASSIGN_OR_RETURN(
Node * const_node,
NodeBuilder(absl::StrCat("const/unused/", func_name), "Const")
.AssignedDevice(host_device->name())
.Attr("dtype", data_type)
.Attr("value", const_tensor)
.Finalize(subgraph));
NodeInfo node_info;
node_info.node = const_node;
node_info.data_type = data_type;
node_info.index = output_index++;
auxiliary_output_node.emplace(const_node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * ret_node,
NodeBuilder(
absl::StrCat("ret_", node_info.index, "/", const_node->name()),
"_Retval")
.Attr("index", node_info.index)
.Attr("T", data_type)
.Input(NodeBuilder::NodeOut(const_node->name(), 0, data_type))
.Finalize(subgraph));
subgraph->AddEdge(const_node, 0, ret_node, 0);
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildPartitionedCallOp(
const std::string& func_name, const Device* host_device,
const std::string& device,
const absl::flat_hash_map<std::string, NodeInfo>& input_nodes,
const absl::flat_hash_map<std::string, NodeInfo>& output_nodes,
const absl::optional<std::pair<std::string, NodeInfo>>&
auxiliary_output_node,
const std::vector<std::string>& control_outputs, Graph* subgraph,
Graph* graph) {
std::string call_node_name = absl::StrCat("partitioned_call/", func_name);
NodeBuilder call_builder(call_node_name, "PartitionedCall");
call_builder.AssignedDevice(host_device->name());
call_builder.Attr(tensorflow::kNoInlineAttr, true);
std::vector<DataType> input_dtypes(input_nodes.size());
for (const auto& input_node : input_nodes) {
input_dtypes[input_node.second.index] = input_node.second.data_type;
}
call_builder.Attr("Tin", input_dtypes);
CHECK(auxiliary_output_node ? output_nodes.empty() : !output_nodes.empty());
std::vector<DataType> output_dtypes(
auxiliary_output_node ? 1 : output_nodes.size());
if (auxiliary_output_node) {
CHECK_EQ(auxiliary_output_node->second.index, 0);
output_dtypes[auxiliary_output_node->second.index] =
auxiliary_output_node->second.data_type;
} else {
for (const auto& output_node : output_nodes) {
output_dtypes[output_node.second.index] = output_node.second.data_type;
}
}
call_builder.Attr("Tout", output_dtypes);
std::vector<NodeBuilder::NodeOut> call_node_inputs(input_nodes.size());
for (const auto& input_node : input_nodes) {
call_node_inputs[input_node.second.index] =
NodeBuilder::NodeOut(input_node.second.node_copy, 0);
}
call_builder.Input(call_node_inputs);
NameAttrList f;
f.set_name(func_name);
call_builder.Attr("f", f);
TF_ASSIGN_OR_RETURN(Node * call_node, call_builder.Finalize(graph));
absl::flat_hash_set<std::string> control_ret_names(control_outputs.begin(),
control_outputs.end());
for (const Node* node : subgraph->op_nodes()) {
if (node->IsSend()) {
control_ret_names.insert(node->name());
}
}
auto control_ret_node_names =
[&control_ret_names](const Node* node) -> absl::optional<std::string> {
if (control_ret_names.contains(node->name())) {
return node->name();
}
return std::nullopt;
};
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*subgraph, func_name,
control_ret_node_names, &new_fdef));
(*new_fdef.mutable_attr())[tensorflow::kNoInlineAttr].set_b(true);
(*new_fdef.mutable_attr())["device"].set_s(device);
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddFunctionDef(new_fdef));
return call_node;
}
absl::StatusOr<Node*> BuildStatefulPartitionedCallOp(
absl::flat_hash_map<std::string, CallNodeInputInfo>& call_node_input_info,
const absl::flat_hash_map<std::string, Node*>& all_partitioned_call_ops,
const std::string& stateful_call_func_name, const Device* host_device,
Graph* graph) {
std::string call_node_name =
absl::StrCat("stateful_partitioned_call/", stateful_call_func_name);
NodeBuilder call_builder(call_node_name, "StatefulPartitionedCall");
call_builder.Attr(tensorflow::kNoInlineAttr, true);
call_builder.AssignedDevice(host_device->name());
int num_output_nodes = call_node_input_info.size();
std::vector<DataType> input_dtypes(num_output_nodes);
for (const auto& node_info : call_node_input_info) {
CHECK(node_info.second.index < num_output_nodes);
input_dtypes[node_info.second.index] = node_info.second.data_type;
}
call_builder.Attr("Tin", input_dtypes);
call_builder.Attr("Tout", input_dtypes);
std::vector<NodeBuilder::NodeOut> call_node_inputs(num_output_nodes);
for (const auto& node_info : call_node_input_info) {
call_node_inputs[node_info.second.index] = NodeBuilder::NodeOut(
node_info.second.input_node, node_info.second.input_node_index);
}
call_builder.Input(call_node_inputs);
NameAttrList f;
f.set_name(stateful_call_func_name);
call_builder.Attr("f", f);
TF_ASSIGN_OR_RETURN(Node * stateful_call_node, call_builder.Finalize(graph));
auto id_graph = std::make_unique<Graph>(graph->flib_def().default_registry());
std::vector<NodeBuilder::NodeOut> output_tensors(num_output_nodes);
for (auto& node_info : call_node_input_info) {
TF_ASSIGN_OR_RETURN(node_info.second.arg_node,
NodeBuilder(absl::StrCat("arg_", node_info.second.index,
"/", stateful_call_func_name),
"_Arg")
.Attr("index", node_info.second.index)
.Attr("T", node_info.second.data_type)
.Finalize(id_graph.get()));
output_tensors[node_info.second.index] =
NodeBuilder::NodeOut(node_info.second.arg_node, 0);
}
TF_ASSIGN_OR_RETURN(
Node * identity_node,
NodeBuilder(absl::StrCat("identityN", "/", stateful_call_func_name),
"IdentityN")
.AssignedDevice(host_device->name())
.Input(output_tensors)
.Finalize(id_graph.get()));
for (auto& node_info : call_node_input_info) {
TF_ASSIGN_OR_RETURN(
node_info.second.ret_node,
NodeBuilder(absl::StrCat("ret_", node_info.second.index, "/",
stateful_call_func_name),
"_Retval")
.Attr("index", node_info.second.index)
.Attr("T", node_info.second.data_type)
.Input(NodeBuilder::NodeOut(identity_node, node_info.second.index))
.Finalize(id_graph.get()));
id_graph->AddEdge(identity_node, node_info.second.index,
node_info.second.ret_node, 0);
}
FunctionDef id_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*id_graph, stateful_call_func_name, &id_fdef));
(*id_fdef.mutable_attr())[tensorflow::kNoInlineAttr].set_b(true);
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddFunctionDef(id_fdef));
return stateful_call_node;
}
bool HasMultipleDevices(const Graph* graph) {
bool has_multiple_devices = false;
std::optional<std::string> location;
for (const Node* node : graph->op_nodes()) {
if (location) {
if (*location != node->assigned_device_name()) {
has_multiple_devices = true;
break;
}
} else {
location = node->assigned_device_name();
}
}
return has_multiple_devices;
}
std::string GetNameFromDevice(const std::string& device) {
std::string ret = device;
for (int i = 0; i < ret.size(); ++i) {
if (ret[i] == ':') ret[i] = '_';
}
return ret;
}
}
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const std::string& graph_func_name, const DeviceSet& device_set,
const Device* host_device, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const std::vector<std::string>& control_outputs,
std::unique_ptr<Graph> graph) {
if (!HasMultipleDevices(graph.get())) {
return graph;
}
auto new_graph = std::make_unique<Graph>(graph->flib_def());
FunctionDefLibrary flib = graph->flib_def().ToProto();
std::unordered_map<string, std::unique_ptr<Graph>> partitions;
TF_RETURN_IF_ERROR(
PartitionFunctionGraph(device_set, std::move(graph), &partitions));
absl::flat_hash_map<std::string, Node*> all_partitioned_call_ops;
std::map<std::string, OutputNodeInfo> device_to_output_info_map;
for (auto& partition : partitions) {
const string& device = partition.first;
VLOG(1) << "Process the partitioin on device: " << device;
Graph* subgraph = partition.second.get();
TF_RETURN_IF_ERROR(subgraph->AddFunctionLibrary(flib));
FunctionNameGenerator name_generator(
&new_graph->flib_def(), absl::StrCat(graph_func_name, "-partition-",
GetNameFromDevice(device)));
std::string func_name = name_generator.GetName();
absl::flat_hash_map<std::string, NodeInfo> input_nodes;
OutputNodeInfo& output_node_info = device_to_output_info_map[device];
absl::flat_hash_map<std::string, NodeInfo>& output_nodes =
output_node_info.output_nodes;
std::optional<std::pair<std::string, NodeInfo>>& auxiliary_output_node =
output_node_info.auxiliary_output_node;
TF_RETURN_IF_ERROR(PrepareSubgraphForFunctionConversion(
inputs, outputs, host_device, func_name, input_nodes, output_nodes,
auxiliary_output_node, subgraph, new_graph.get()));
TF_ASSIGN_OR_RETURN(
Node * call_node,
BuildPartitionedCallOp(func_name, host_device, device, input_nodes,
output_nodes, auxiliary_output_node,
control_outputs, subgraph, new_graph.get()));
all_partitioned_call_ops[device] = call_node;
}
int input_index = 0;
absl::flat_hash_map<std::string, CallNodeInputInfo> call_node_input_info;
auto get_call_node_input_info = [&](const std::string& device,
const std::string& node_name,
const NodeInfo& node_info) {
CHECK(!call_node_input_info.contains(node_name));
CallNodeInputInfo& info = call_node_input_info[node_name];
info.index = input_index++;
info.data_type = node_info.data_type;
info.input_node = all_partitioned_call_ops.at(device);
info.input_node_index = node_info.index;
};
for (const auto& entry : device_to_output_info_map) {
const std::string& device = entry.first;
const OutputNodeInfo& output_info = entry.second;
for (const auto& node_info : output_info.output_nodes) {
get_call_node_input_info(device, node_info.first, node_info.second);
}
if (output_info.auxiliary_output_node) {
get_call_node_input_info(device, output_info.auxiliary_output_node->first,
output_info.auxiliary_output_node->second);
}
}
FunctionNameGenerator name_generator(
&new_graph->flib_def(),
absl::StrCat(graph_func_name, "/output_aggregator"));
std::string stateful_call_func_name = name_generator.GetName();
TF_ASSIGN_OR_RETURN(
Node * stateful_call_node,
BuildStatefulPartitionedCallOp(
call_node_input_info, all_partitioned_call_ops,
stateful_call_func_name, host_device, new_graph.get()));
for (const auto& node_info : call_node_input_info) {
TF_RETURN_IF_ERROR(NodeBuilder(node_info.first, "Identity")
.Input(NodeBuilder::NodeOut(stateful_call_node,
node_info.second.index))
.Attr("T", node_info.second.data_type)
.AssignedDevice(host_device->name())
.Finalize(new_graph.get(), nullptr));
}
CHECK_GT(stateful_call_node->num_outputs(), 0);
for (const auto& control_output : control_outputs) {
TF_RETURN_IF_ERROR(NodeBuilder(control_output, "Identity")
.Input(NodeBuilder::NodeOut(stateful_call_node, 0))
.Attr("T", stateful_call_node->output_type(0))
.AssignedDevice(host_device->name())
.Finalize(new_graph.get(), nullptr));
}
return new_graph;
}
}
} | #include "tensorflow/core/tfrt/utils/graph_partition.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::IsEmpty;
using ::testing::NotNull;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphPartitionTest : public grappler::GrapplerTest {
public:
void SetUp() override {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 2});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&devices));
device0_ = devices[0].get();
device1_ = devices[1].get();
device_mgr_ = std::make_unique<DynamicDeviceMgr>(std::move(devices));
for (auto d : device_mgr_->ListDevices()) {
device_set_.AddDevice(d);
}
}
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
DeviceSet device_set_;
};
TEST_F(GraphPartitionTest, InsertTransferOpsWithOneDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice(device0_->name());
auto input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
auto id_x = ops::Identity(scope.WithOpName("identity"), input);
auto output = ops::Identity(scope.WithOpName("output"), id_x);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
GraphDef original_graphdef;
TF_ASSERT_OK(scope.ToGraphDef(&original_graphdef));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Graph> new_graph,
InsertTransferOps("test_graph", device_set_, device0_,
{"input"},
{"output"}, {},
std::move(graph)));
GraphDef new_graphdef;
new_graph->ToGraphDef(&new_graphdef);
CompareGraphs(original_graphdef, new_graphdef);
}
TEST_F(GraphPartitionTest, InsertTransferOpsWithTwoDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope();
Scope scope0 = scope.WithDevice(device0_->name());
Scope scope1 = scope.WithDevice(device1_->name());
auto input = ops::Placeholder(scope0.WithOpName("input"), DT_FLOAT);
Output id_x = ops::Identity(scope0.WithOpName("id_x"), input);
Output id_y = ops::Identity(scope1.WithOpName("id_y"), input);
auto output = ops::IdentityN(scope0.WithOpName("output"), {id_x, id_y});
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Graph> new_graph,
InsertTransferOps("test_graph", device_set_, device0_,
{"input"},
{"output"}, {},
std::move(graph)));
GraphDef new_graphdef;
new_graph->ToGraphDef(&new_graphdef);
NodeDef* input_node = nullptr;
NodeDef* output_node = nullptr;
NodeDef* stateful_partitioned_call_node = nullptr;
std::vector<NodeDef*> partitioned_call_nodes;
for (NodeDef& node : *new_graphdef.mutable_node()) {
if (node.op() == "PartitionedCall") {
partitioned_call_nodes.push_back(&node);
} else if (node.op() == "StatefulPartitionedCall") {
stateful_partitioned_call_node = &node;
} else if (node.name() == "input") {
input_node = &node;
} else if (node.name() == "output") {
output_node = &node;
}
}
ASSERT_THAT(input_node, NotNull());
ASSERT_THAT(output_node, NotNull());
ASSERT_THAT(partitioned_call_nodes, SizeIs(2));
ASSERT_THAT(stateful_partitioned_call_node, NotNull());
EXPECT_THAT(stateful_partitioned_call_node->input(),
UnorderedElementsAre(partitioned_call_nodes[0]->name(),
partitioned_call_nodes[1]->name()));
absl::flat_hash_map<std::string, FunctionDef> func_name_to_func;
EXPECT_THAT(new_graphdef.library().function(), SizeIs(3));
for (const FunctionDef& fdef : new_graphdef.library().function()) {
ASSERT_TRUE(fdef.attr().contains(tensorflow::kNoInlineAttr));
EXPECT_TRUE(fdef.attr().at(tensorflow::kNoInlineAttr).b());
func_name_to_func[fdef.signature().name()] = fdef;
}
for (NodeDef* node : partitioned_call_nodes) {
ASSERT_TRUE(node->attr().contains("f"));
ASSERT_TRUE(func_name_to_func.contains(node->attr().at("f").func().name()));
const FunctionDef& fdef =
func_name_to_func.at(node->attr().at("f").func().name());
ASSERT_TRUE(fdef.attr().contains("device"));
if (fdef.attr().at("device").s() == device0_->name()) {
EXPECT_THAT(node->input(), UnorderedElementsAre(input_node->name()));
} else if (fdef.attr().at("device").s() == device1_->name()) {
EXPECT_THAT(node->input(), IsEmpty());
}
ASSERT_TRUE(node->attr().contains(tensorflow::kNoInlineAttr));
EXPECT_TRUE(node->attr().at(tensorflow::kNoInlineAttr).b());
int send_count = 0, recv_count = 0;
for (const NodeDef& node : fdef.node_def()) {
if (node.op() == "_Send")
++send_count;
else if (node.op() == "_Recv")
++recv_count;
}
EXPECT_EQ(send_count, 1);
EXPECT_EQ(recv_count, 1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/graph_partition.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/graph_partition_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7b82235c-15b2-46d9-a376-934867b12139 | cpp | tensorflow/tensorflow | control_flow | tensorflow/core/graph/control_flow.cc | tensorflow/core/graph/control_flow_test.cc | #include "tensorflow/core/graph/control_flow.h"
#include <deque>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
struct Frame {
string name;
Frame* parent = nullptr;
const Node* loop_cond = nullptr;
};
Status ValidateControlFlowInfo(const Graph* graph,
const std::vector<ControlFlowInfo>& cf_info) {
std::unordered_map<string, Frame> frames;
for (const Node* node : graph->op_nodes()) {
const ControlFlowInfo& cf = cf_info[node->id()];
if (!cf.frame || !cf.parent_frame) {
continue;
}
Frame& frame = frames[cf.frame_name];
Frame* parent = &frames[cf_info[cf.parent_frame->id()].frame_name];
if (frame.parent == nullptr) {
frame.parent = parent;
frame.name = cf.frame_name;
} else if (frame.parent != parent) {
return errors::Internal(
"Invalid loop structure: Mismatched parent frames for \"",
cf.frame_name, "\": \"", parent->name, "\" vs \"", frame.parent->name,
"\". The node giving this error: ", FormatNodeForError(*node),
". This is an internal bug, please file a bug report with "
"instructions on how to reproduce the error.");
}
if (IsLoopCond(node)) {
if (frame.loop_cond &&
!absl::StrContains(frame.loop_cond->name(), "LoopCounter") &&
!absl::StrContains(node->name(), "LoopCounter")) {
return errors::InvalidArgument(
"Invalid loop structure: Loop \"", cf.frame_name,
"\" has more than one LoopCond node: ", FormatNodeForError(*node),
" and ", FormatNodeForError(*frame.loop_cond),
". This is an internal bug, please file a bug report with "
"instructions on how to reproduce the error.");
}
frame.loop_cond = node;
}
}
return absl::OkStatus();
}
}
Status BuildControlFlowInfo(const Graph* g, std::vector<ControlFlowInfo>* info,
std::vector<string>* unreachable_nodes) {
info->clear();
info->resize(g->num_node_ids());
std::vector<const Node*> parent_nodes;
parent_nodes.resize(g->num_node_ids());
const Node* src_node = g->source_node();
ControlFlowInfo& src_info = (*info)[src_node->id()];
src_info.frame = src_node;
src_info.parent_frame = src_node;
string frame_name;
std::deque<const Node*> ready;
ready.push_back(src_node);
while (!ready.empty()) {
const Node* curr_node = ready.front();
ready.pop_front();
const ControlFlowInfo& curr_info = (*info)[curr_node->id()];
const Node* frame = curr_info.frame;
const Node* parent = curr_info.parent_frame;
frame_name = curr_info.frame_name;
if (IsExit(curr_node)) {
const ControlFlowInfo& parent_info = (*info)[parent->id()];
frame = parent_info.frame;
parent = parent_info.parent_frame;
frame_name = parent_info.frame_name;
}
for (const Edge* out_edge : curr_node->out_edges()) {
const Node* out = out_edge->dst();
int out_id = out->id();
ControlFlowInfo* out_info = &(*info)[out_id];
const Node* out_parent = out_info->parent_frame;
bool is_visited = (parent_nodes[out_id] != nullptr);
if (!out->IsOp()) continue;
if (!is_visited) {
parent_nodes[out->id()] = curr_node;
ready.push_back(out);
}
if (IsEnter(out)) {
if (is_visited) {
const string& parent_frame = (*info)[out_parent->id()].frame_name;
if (parent_frame != frame_name) {
return errors::InvalidArgument(
FormatNodeForError(*out),
" has inputs from different frames. The input ",
FormatNodeForError(*curr_node), " is in frame '", frame_name,
"'. The input ", FormatNodeForError(*parent_nodes[out->id()]),
" is in frame '", parent_frame, "'.");
}
} else {
out_info->frame = out;
out_info->parent_frame = frame;
TF_RETURN_IF_ERROR(
GetNodeAttr(out->attrs(), "frame_name", &out_info->frame_name));
if (out_info->frame_name.empty()) {
return errors::InvalidArgument("The Enter ",
FormatNodeForError(*out),
" must have a frame name.");
}
}
} else {
if (is_visited) {
if (out_info->frame_name != frame_name) {
return errors::InvalidArgument(
FormatNodeForError(*out),
" has inputs from different frames. The input ",
FormatNodeForError(*curr_node), " is in frame '", frame_name,
"'. The input ", FormatNodeForError(*parent_nodes[out->id()]),
" is in frame '", out_info->frame_name, "'.");
}
} else {
out_info->frame = frame;
out_info->parent_frame = parent;
out_info->frame_name = frame_name;
}
}
}
}
if (unreachable_nodes) {
for (const Node* node : g->op_nodes()) {
if (!parent_nodes[node->id()]) {
unreachable_nodes->push_back(node->name());
}
}
}
TF_RETURN_IF_ERROR(ValidateControlFlowInfo(g, *info));
return absl::OkStatus();
}
} | #include "tensorflow/core/graph/control_flow.h"
#include <string>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status LessThanTenCond(const Scope& scope, const std::vector<Output>& inputs,
Output* output) {
*output = ops::Less(scope, inputs[0], 10);
return scope.status();
}
Status AddOneBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(scope, {inputs[0], 1}));
return scope.status();
}
Status NestedLoopBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
return ops::BuildWhileLoop(scope.NewSubScope("inner"), inputs,
LessThanTenCond, AddOneBody, "inner_loop",
outputs);
}
TEST(ValidateControlFlowTest, InputsFromDifferentFrames) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("outer"), inputs,
LessThanTenCond, NestedLoopBody,
"outer_loop", &outputs));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<ControlFlowInfo> info;
Status status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(
absl::StrContains(status.message(), "has inputs from different frames"))
<< status.message();
EXPECT_TRUE(
absl::StrContains(status.message(), "{{node outer/body/inner/Merge}}"))
<< status.message();
EXPECT_TRUE(
absl::StrContains(status.message(), "{{node outer/body/inner/Enter}}"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node outer/Switch}}"))
<< status.message();
}
TEST(ValidateControlFlowTest, MismatchedParentFrames) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope, inputs, LessThanTenCond, AddOneBody,
"test_loop", &outputs));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Node* enter_1 = nullptr;
for (Node* node : graph->op_nodes()) {
if (IsEnter(node)) {
enter_1 = node;
}
}
ASSERT_TRUE(enter_1 != nullptr);
NodeDef enter;
enter.set_name("Enter2");
enter.set_op("Enter");
(*enter.mutable_attr())["T"].set_type(DT_INT32);
(*enter.mutable_attr())["frame_name"].set_s("test_loop");
*enter.add_input() = "Enter";
Status status;
Node* enter_2 = graph->AddNode(enter, &status);
TF_ASSERT_OK(status);
graph->AddControlEdge(enter_1, enter_2);
std::vector<ControlFlowInfo> info;
status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "Mismatched parent frames"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node Enter2}}"))
<< status.message();
}
TEST(ValidateControlFlowTest, TwoLoopCond) {
Scope scope = Scope::NewRootScope().ExitOnError();
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope, DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope, inputs, LessThanTenCond, AddOneBody,
"test_loop", &outputs));
outputs.clear();
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("sub"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs, false));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<ControlFlowInfo> info;
Status status = BuildControlFlowInfo(graph.get(), &info);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(
absl::StrContains(status.message(), "more than one LoopCond node"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node sub/LoopCond}}"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node LoopCond}}"))
<< status.message();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/control_flow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/control_flow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09c2b85b-3754-48db-92c3-816f545d6444 | cpp | tensorflow/tensorflow | graph_def_builder | tensorflow/core/graph/graph_def_builder.cc | tensorflow/core/graph/graph_def_builder_test.cc | #include "tensorflow/core/graph/graph_def_builder.h"
#include <utility>
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
GraphDefBuilder::Options::Options(Graph* graph, Status* status)
: graph_(graph), status_(status) {}
GraphDefBuilder::Options::~Options() {}
GraphDefBuilder::Options GraphDefBuilder::Options::WithName(
StringPiece name) const {
return Options(*this).WithNameImpl(name);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithDevice(
StringPiece device) const {
return Options(*this).WithDeviceImpl(device);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInput(
Node* control_input) const {
return Options(*this).WithControlInputImpl(control_input);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputs(
absl::Span<Node* const> control_inputs) const {
return Options(*this).WithControlInputsImpl(control_inputs);
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithNameImpl(
StringPiece name) {
name_ = string(name);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithDeviceImpl(
StringPiece device) {
device_ = string(device);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputImpl(
Node* control_input) {
control_inputs_.push_back(control_input);
return *this;
}
GraphDefBuilder::Options GraphDefBuilder::Options::WithControlInputsImpl(
absl::Span<Node* const> control_inputs) {
control_inputs_.insert(control_inputs_.end(), control_inputs.begin(),
control_inputs.end());
return *this;
}
Status GraphDefBuilder::ToGraphDef(GraphDef* graph_def) const {
if (status_.ok()) {
graph_.ToGraphDef(graph_def);
*graph_def->mutable_library() = flib_def_.ToProto();
}
return status_;
}
string GraphDefBuilder::Options::GetNameForOp(StringPiece op) const {
if (name_.empty()) return graph_->NewName(op);
return name_;
}
Node* GraphDefBuilder::Options::FinalizeBuilder(NodeBuilder* builder) const {
builder->ControlInputs(control_inputs_);
if (!device_.empty()) builder->Device(device_);
for (const auto& attr : attrs_) {
builder->Attr(attr.first, attr.second);
}
Node* returned_node;
UpdateStatus(builder->Finalize(graph_, &returned_node));
return returned_node;
}
void GraphDefBuilder::Options::UpdateStatus(const Status& status) const {
if (status_ == nullptr) {
TF_CHECK_OK(status);
} else {
status_->Update(status);
}
}
namespace ops {
Node* SourceOp(const string& op_name, const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
return opts.FinalizeBuilder(&node_builder);
}
Node* UnaryOp(const string& op_name, NodeOut input,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(input));
return opts.FinalizeBuilder(&node_builder);
}
Node* BinaryOp(const string& op_name, NodeOut a, NodeOut b,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(a)).Input(std::move(b));
return opts.FinalizeBuilder(&node_builder);
}
Node* TernaryOp(const string& op_name, NodeOut a, NodeOut b, NodeOut c,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(a)).Input(std::move(b)).Input(std::move(c));
return opts.FinalizeBuilder(&node_builder);
}
}
} | #include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(GraphDefBuilderTest, Version) {
ASSERT_LT(0, TF_GRAPH_DEF_VERSION);
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Graph graph(OpRegistry::Global());
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, &graph));
ASSERT_EQ(graph.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph.versions().min_consumer(), TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
GraphDef graph_def;
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
ASSERT_EQ(graph_def.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph_def.versions().min_consumer(),
TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/graph_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94996556-fc86-4b9e-8614-170ea8e383c5 | cpp | tensorflow/tensorflow | tensor_id | tensorflow/core/graph/tensor_id.cc | tensorflow/core/graph/tensor_id_test.cc | #include "tensorflow/core/graph/tensor_id.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
TensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {}
SafeTensorId::SafeTensorId(const TensorId& id)
: SafeTensorId(string(id.first), id.second) {}
TensorId ParseTensorName(const string& name) {
return ParseTensorName(StringPiece(name.data(), name.size()));
}
TensorId ParseTensorName(StringPiece name) {
const char* base = name.data();
const char* p = base + name.size() - 1;
unsigned int index = 0;
unsigned int mul = 1;
while (p > base && (*p >= '0' && *p <= '9')) {
index += ((*p - '0') * mul);
mul *= 10;
p--;
}
TensorId id;
if (p > base && *p == ':' && mul > 1) {
id.first = StringPiece(base, p - base);
id.second = index;
} else if (absl::StartsWith(name, "^")) {
id.first = StringPiece(base + 1);
id.second = Graph::kControlSlot;
} else {
id.first = name;
id.second = 0;
}
return id;
}
bool IsTensorIdControl(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
} | #include "tensorflow/core/graph/tensor_id.h"
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string ParseHelper(const string& n) { return ParseTensorName(n).ToString(); }
TEST(TensorIdTest, ParseTensorName) {
EXPECT_EQ(ParseHelper("W1"), "W1:0");
EXPECT_EQ(ParseHelper("W1:0"), "W1:0");
EXPECT_EQ(ParseHelper("weights:0"), "weights:0");
EXPECT_EQ(ParseHelper("W1:1"), "W1:1");
EXPECT_EQ(ParseHelper("W1:17"), "W1:17");
EXPECT_EQ(ParseHelper("xyz1_17"), "xyz1_17:0");
EXPECT_EQ(ParseHelper("^foo"), "^foo");
}
uint32 Skewed(random::SimplePhilox* rnd, int max_log) {
const uint32 space = 1 << (rnd->Rand32() % (max_log + 1));
return rnd->Rand32() % space;
}
void BM_ParseTensorName(::testing::benchmark::State& state) {
const int arg = state.range(0);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<string> names;
for (int i = 0; i < 100; i++) {
string name;
switch (arg) {
case 0: {
size_t len = Skewed(&rnd, 4);
while (name.size() < len) {
name += rnd.OneIn(4) ? '0' : 'a';
}
if (rnd.OneIn(3)) {
strings::StrAppend(&name, ":", rnd.Uniform(12));
}
break;
}
case 1:
name = "W1";
break;
case 2:
name = "t0003";
break;
case 3:
name = "weights";
break;
case 4:
name = "weights:17";
break;
case 5:
name = "^weights";
break;
default:
LOG(FATAL) << "Unexpected arg";
break;
}
names.push_back(name);
}
TensorId id;
int index = 0;
int sum = 0;
for (auto s : state) {
id = ParseTensorName(names[index++ % names.size()]);
sum += id.second;
}
VLOG(2) << sum;
}
BENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);
TEST(TensorIdTest, IsTensorIdControl) {
string input = "^foo";
TensorId tensor_id = ParseTensorName(input);
EXPECT_TRUE(IsTensorIdControl(tensor_id));
input = "foo";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
input = "foo:2";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
}
TEST(TensorIdTest, PortZero) {
for (string input : {"foo", "foo:0"}) {
TensorId tensor_id = ParseTensorName(input);
EXPECT_EQ("foo", tensor_id.node());
EXPECT_EQ(0, tensor_id.index());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ad59b14-c70d-4f5b-b197-26ce368fa44d | cpp | tensorflow/tensorflow | simple_delete | tensorflow/core/graph/regularization/simple_delete.cc | tensorflow/core/graph/regularization/simple_delete_test.cc | #include "tensorflow/core/graph/regularization/simple_delete.h"
#include <cstdint>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/grappler/op_types.h"
namespace tensorflow::graph_regularization {
namespace {
void RegularizeNodes(GraphDef* graph_def) {
for (NodeDef& node : *graph_def->mutable_node()) {
if (grappler::IsPartitionedCall(node) ||
grappler::IsStatefulPartitionedCall(node)) {
std::string function_name = node.attr().find("f")->second.func().name();
absl::StatusOr<int64_t> uid = GetSuffixUID(function_name);
if (uid.ok()) {
node.mutable_attr()->find("f")->second.mutable_func()->set_name(
std::string(
absl::StripSuffix(function_name, std::to_string(*uid))));
}
auto node_config_proto = node.mutable_attr()->find("config_proto");
if (node_config_proto != node.attr().end()) {
node_config_proto->second.mutable_s()->erase();
}
}
if (grappler::IsConstant(node)) {
if (node.attr().at("dtype").type() == DT_STRING) {
node.mutable_attr()->find("value")->second.clear_value();
}
}
}
}
}
void SimpleDelete(GraphDef& graph_def) {
RegularizeNodes(&graph_def);
graph_def.mutable_library()->Clear();
graph_def.mutable_versions()->Clear();
}
} | #include "tensorflow/core/graph/regularization/simple_delete.h"
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::graph_regularization {
namespace {
absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) {
std::string file_path = io::JoinPath(file_dir, "saved_model.pb");
std::string serialized_saved_model;
auto status =
ReadFileToString(Env::Default(), file_path, &serialized_saved_model);
if (!status.ok()) {
return status;
}
SavedModel saved_model_pb;
saved_model_pb.ParseFromString(serialized_saved_model);
return saved_model_pb;
}
TEST(SimpleDeleteTest, TestSimpleDeleteModelSavedTwice) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/graph/regularization/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
MetaGraphDef* metagraph = saved_model_pb.mutable_meta_graphs(0);
GraphDef* graph_def = metagraph->mutable_graph_def();
SimpleDelete(*graph_def);
uint64 hash1 = ComputeHash(*graph_def);
const std::string export_dir2 =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/graph/regularization/testdata", "bert2");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2,
ReadSavedModel(export_dir2));
const MetaGraphDef& metagraph2 = saved_model_pb2.meta_graphs(0);
GraphDef graph_def2 = metagraph2.graph_def();
SimpleDelete(graph_def2);
uint64 hash2 = ComputeHash(graph_def2);
EXPECT_EQ(hash1, hash2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/regularization/simple_delete.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/regularization/simple_delete_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66f6266b-c289-42bd-953b-b20bfab56b36 | cpp | tensorflow/tensorflow | request_cost | tensorflow/core/common_runtime/request_cost.cc | tensorflow/core/common_runtime/request_cost_test.cc | #include "tensorflow/core/common_runtime/request_cost.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace tensorflow {
void RequestCost::RecordCost(
const std::vector<std::pair<absl::string_view, absl::Duration>>& costs) {
absl::MutexLock lock(&mutex_);
for (const auto& cost : costs) {
cost_map_[cost.first] += cost.second;
}
}
void RequestCost::ScaleCosts(int scale_factor) {
absl::MutexLock lock(&mutex_);
for (auto& [cost_type, cost] : cost_map_) {
cost *= scale_factor;
}
}
absl::flat_hash_map<std::string, absl::Duration> RequestCost::GetCosts() const {
absl::MutexLock lock(&mutex_);
return cost_map_;
}
void RequestCost::RecordMetrics(
const std::vector<std::pair<absl::string_view, double>>& metrics) {
absl::MutexLock lock(&mutex_);
for (const auto& metric : metrics) {
metric_map_[metric.first] = metric.second;
}
}
absl::flat_hash_map<std::string, double> RequestCost::GetMetrics() const {
absl::MutexLock lock(&mutex_);
return metric_map_;
}
void RequestCost::RecordBatchMetrics(const BatchMetrics& batch_metrics) {
absl::MutexLock lock(&mutex_);
batch_metrics_.push_back(batch_metrics);
}
void RequestCost::ScaleBatchCosts(int scale_factor) {
absl::MutexLock lock(&mutex_);
for (auto& batch_metrics : batch_metrics_) {
for (auto& [cost_type, cost] : batch_metrics.batch_costs) {
batch_metrics.batch_costs[cost_type] *= scale_factor;
}
}
}
std::vector<RequestCost::BatchMetrics> RequestCost::GetBatchMetrics() const {
absl::MutexLock lock(&mutex_);
return batch_metrics_;
}
} | #include "tensorflow/core/common_runtime/request_cost.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(RequestCostTest, RecordCost) {
RequestCost request_cost;
request_cost.RecordCost(
{{"tpu_v1", absl::Milliseconds(1)}, {"tpu_v2", absl::Milliseconds(2)}});
request_cost.RecordCost({{"tpu_v1", absl::Milliseconds(10)},
{"tpu_v2", absl::Milliseconds(20)},
{"cpu_v1", absl::Milliseconds(30)},
{"cpu_v2", absl::Milliseconds(40)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(30)),
Pair("cpu_v2", absl::Milliseconds(40))));
request_cost.RecordCost(
{{"cpu_v1", absl::Milliseconds(3)}, {"cpu_v2", absl::Milliseconds(4)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(33)),
Pair("cpu_v2", absl::Milliseconds(44))));
request_cost.ScaleCosts(2);
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(22)),
Pair("tpu_v2", absl::Milliseconds(44)),
Pair("cpu_v1", absl::Milliseconds(66)),
Pair("cpu_v2", absl::Milliseconds(88))));
}
TEST(RequestCostTest, RecordMetrics) {
RequestCost request_cost;
request_cost.RecordMetrics({{"metric_v1", 1}, {"metric_v2", 3.14}});
EXPECT_THAT(
request_cost.GetMetrics(),
UnorderedElementsAre(Pair("metric_v1", 1), Pair("metric_v2", 3.14)));
request_cost.RecordMetrics({{"metric_v1", 11},
{"metric_v2", 3.14159},
{"other_metric_v1", 3},
{"other_metric_v2", 4}});
EXPECT_THAT(request_cost.GetMetrics(),
UnorderedElementsAre(
Pair("metric_v1", 11), Pair("metric_v2", 3.14159),
Pair("other_metric_v1", 3), Pair("other_metric_v2", 4)));
}
TEST(RequestCostTest, RecordBatchMetrics) {
RequestCost request_cost;
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
8,
8,
0,
{{"gcu", absl::Milliseconds(80)}, {"tpu", absl::Milliseconds(160)}}});
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
4,
2,
1,
{{"gcu", absl::Milliseconds(40)}, {"tpu", absl::Milliseconds(80)}}});
EXPECT_THAT(
request_cost.GetBatchMetrics(),
ElementsAre(
FieldsAre(8, 8, 0,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(80)),
Pair("tpu", absl::Milliseconds(160)))),
FieldsAre(
4, 2, 1,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(40)),
Pair("tpu", absl::Milliseconds(80))))));
request_cost.ScaleBatchCosts(4);
EXPECT_THAT(
request_cost.GetBatchMetrics(),
ElementsAre(
FieldsAre(8, 8, 0,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(320)),
Pair("tpu", absl::Milliseconds(640)))),
FieldsAre(
4, 2, 1,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(160)),
Pair("tpu", absl::Milliseconds(320))))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/request_cost.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/request_cost_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0c0dc3d-b684-49ac-9164-5f32170d6e03 | cpp | tensorflow/tensorflow | graph_runner | tensorflow/core/common_runtime/graph_runner.cc | tensorflow/core/common_runtime/graph_runner_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/single_threaded_cpu_device.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class SimpleRendezvous : public RendezvousInterface {
public:
explicit SimpleRendezvous() {}
Status Send(const ParsedKey& parsed, const Args& send_args, const Tensor& val,
const bool is_dead) override {
if (is_dead) {
return errors::Internal("Send of a dead tensor");
}
mutex_lock l(mu_);
string edge_name(parsed.edge_name);
if (table_.count(edge_name) > 0) {
return errors::Internal("Send of an already sent tensor");
}
table_[edge_name] = val;
return absl::OkStatus();
}
void RecvAsync(const ParsedKey& parsed, const Args& recv_args,
DoneCallback done) override {
Tensor tensor;
Status status = absl::OkStatus();
{
string key(parsed.edge_name);
mutex_lock l(mu_);
if (table_.count(key) <= 0) {
status = errors::Internal("Did not find key ", key);
} else {
tensor = table_[key];
}
}
done(status, Args{}, recv_args, tensor, false);
}
void StartAbort(const Status& status) override {}
private:
typedef std::unordered_map<string, Tensor> Table;
mutex mu_;
Table table_ TF_GUARDED_BY(mu_);
};
}
GraphRunner::GraphRunner(Env* env)
: device_deleter_(NewSingleThreadedCpuDevice(env)),
device_(device_deleter_.get()) {}
GraphRunner::GraphRunner(Device* device) : device_(device) {}
GraphRunner::~GraphRunner() {}
Status GraphRunner::Run(Graph* graph, FunctionLibraryRuntime* function_library,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
if (device_ == nullptr) {
return errors::NotFound("Cannot find a device for GraphRunner.");
}
if (function_library && function_library->device() &&
function_library->device()->device_type() != device_->device_type()) {
VLOG(1) << "Cannot run on: " << device_->device_type()
<< " with a function library for a "
<< function_library->device()->device_type() << " device.";
function_library = nullptr;
}
std::unique_ptr<Graph> graph_to_run(new Graph(graph->op_registry()));
CopyGraph(*graph, graph_to_run.get());
SimpleRendezvous rendez;
std::vector<string> input_names;
for (const auto& in : inputs) {
const string& tensor_name = in.first;
input_names.emplace_back(tensor_name);
string full_key = Rendezvous::CreateKey("/device:CPU:0", 1, "/device:CPU:1",
tensor_name, FrameAndIter(0, 0));
Rendezvous::ParsedKey parsed;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(full_key, &parsed));
TF_RETURN_IF_ERROR(rendez.Send(parsed, Rendezvous::Args(), in.second,
false ));
}
subgraph::RewriteGraphMetadata metadata;
TF_RETURN_IF_ERROR(subgraph::RewriteGraphForExecution(
graph_to_run.get(), input_names, output_names, {} ,
device_->attributes(), false , &metadata));
auto runner = [](Executor::Args::Closure c) { c(); };
LocalExecutorParams params;
params.device = device_;
params.function_library = function_library;
const int producer = graph_to_run->versions().producer();
params.create_kernel = [this, function_library, producer](
const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
return CreateNonCachedKernel(device_, function_library, props, producer,
kernel);
};
params.delete_kernel = [](OpKernel* kernel) { delete kernel; };
Executor* executor;
TF_RETURN_IF_ERROR(NewLocalExecutor(params, *graph_to_run, &executor));
std::unique_ptr<Executor> executor_unref(executor);
Executor::Args args;
args.step_id = LogMemory::CONSTANT_FOLDING_STEP_ID;
args.runner = runner;
args.rendezvous = &rendez;
args.collective_executor = nullptr;
CancellationManager cancellation_manager;
args.cancellation_manager = &cancellation_manager;
if (function_library != nullptr) {
args.session_config = function_library->config_proto();
}
TF_RETURN_IF_ERROR(executor->Run(args));
outputs->resize(output_names.size());
for (size_t i = 0; i < output_names.size(); ++i) {
const string& output_key =
Rendezvous::CreateKey("/device:CPU:0", 1, "/device:CPU:1",
output_names[i], FrameAndIter(0, 0));
Rendezvous::ParsedKey parsed;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(output_key, &parsed));
bool is_dead;
Tensor output_tensor;
TF_RETURN_IF_ERROR(
rendez.Recv(parsed, Rendezvous::Args(), &output_tensor, &is_dead));
(*outputs)[i] = tensor::DeepCopy(output_tensor);
}
return absl::OkStatus();
}
} | #include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(GraphRunnerTest, SingleConst) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s = graph_runner.Run(root.graph(), nullptr, {}, {c.name()}, &outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(42.0f), outputs[0]);
}
TEST(GraphRunnerTest, DeepCopy) {
Scope root = Scope::NewRootScope();
auto p1 = ops::Placeholder(root.WithOpName("p1"), DT_FLOAT);
auto p2 = ops::Placeholder(root.WithOpName("p2"), DT_FLOAT);
auto add = ops::Add(root.WithOpName("add"), p1, p2);
Tensor p1_data(DT_FLOAT, TensorShape({}));
Tensor p2_data(DT_FLOAT, TensorShape({}));
p1_data.scalar<float>()() = 1.0f;
p2_data.scalar<float>()() = 2.0f;
std::vector<std::pair<string, Tensor>> inputs = {{"p1:0", p1_data},
{"p2:0", p2_data}};
std::vector<Tensor> outputs;
{
GraphRunner graph_runner(Env::Default());
Status s =
graph_runner.Run(root.graph(), nullptr, inputs, {"add:0"}, &outputs);
TF_ASSERT_OK(s);
}
test::ExpectEqual(test::AsScalar(3.0f), outputs[0]);
}
TEST(GraphRunnerTest, MultiFetchConst) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
auto pi = ops::Const(root, 3.14f);
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s = graph_runner.Run(root.graph(), nullptr, {}, {c.name(), pi.name()},
&outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(42.0f), outputs[0]);
test::ExpectEqual(test::AsScalar(3.14f), outputs[1]);
}
TEST(GraphRunnerTest, FeedAndFetch) {
Scope root = Scope::NewRootScope();
auto p1 = ops::Placeholder(root.WithOpName("p1"), DT_FLOAT);
auto p2 = ops::Placeholder(root.WithOpName("p2"), DT_FLOAT);
auto add = ops::Add(root.WithOpName("add"), p1, p2);
Tensor p1_data(DT_FLOAT, TensorShape({}));
Tensor p2_data(DT_FLOAT, TensorShape({}));
p1_data.scalar<float>()() = 1.0f;
p2_data.scalar<float>()() = 2.0f;
std::vector<std::pair<string, Tensor>> inputs = {{"p1:0", p1_data},
{"p2:0", p2_data}};
GraphRunner graph_runner(Env::Default());
std::vector<Tensor> outputs;
Status s =
graph_runner.Run(root.graph(), nullptr, inputs, {"add:0"}, &outputs);
TF_ASSERT_OK(s);
test::ExpectEqual(test::AsScalar(3.0f), outputs[0]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6563a9cb-dea9-48ee-bc96-d7524605a677 | cpp | tensorflow/tensorflow | replicate_per_replica_nodes | tensorflow/core/common_runtime/replicate_per_replica_nodes.cc | tensorflow/core/common_runtime/replicate_per_replica_nodes_test.cc | #include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <algorithm>
#include <queue>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
constexpr int kOptimizeCrossHostEdgesTheshold = 8;
constexpr int kOptimizeCrossHostDataEdgesTheshold = 2;
class ReplicateHelper {
public:
Status InitializeNode(const Node* node, int num_allowed_devices) {
if (replicated_nodes_map_.find(node) != replicated_nodes_map_.end()) {
return errors::InvalidArgument("Node ", node->name(),
" has been replicated.");
}
std::vector<Node*> replicated_nodes(num_allowed_devices, nullptr);
replicated_nodes_map_.emplace(node, std::move(replicated_nodes));
return absl::OkStatus();
}
Status ReplicateNode(const Node* node,
const std::vector<string>& allowed_devices,
int allowed_device_index, Graph* graph) {
auto& replicated_nodes = replicated_nodes_map_.at(node);
if (replicated_nodes[allowed_device_index] != nullptr) {
return absl::OkStatus();
}
const auto& device = allowed_devices.at(allowed_device_index);
NodeDef node_def = node->def();
const string suffix = strings::StrCat("/R", allowed_device_index);
node_def.set_name(graph->NewName(strings::StrCat(node_def.name(), suffix)));
TF_ASSIGN_OR_RETURN(Node * replicated_node, graph->AddNode(node_def));
replicated_node->set_assigned_device_name(device);
if (replicated_node->IsArg()) {
replicated_node->AddAttr("sub_index", allowed_device_index);
}
replicated_nodes[allowed_device_index] = replicated_node;
return absl::OkStatus();
}
void ReplicateFromRegularDeviceToCompositeDevice(const Edge* edge,
Graph* graph) const {
Node* src = edge->src();
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
for (Node* dst : dst_replicated_nodes) {
if (dst == nullptr) {
continue;
}
graph->AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
}
Status ReplicateFromCompositeDeviceToCompositeDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
if (src_replicated_nodes.size() != dst_replicated_nodes.size()) {
return errors::InvalidArgument(
"Nodes assigned to the same composite device should have the "
"same number of replicated nodes. Found an edge from node ",
edge->src()->name(), " (", src_replicated_nodes.size(),
" replicated nodes) to node ", edge->dst()->name(), " (",
dst_replicated_nodes.size(), " replicated nodes).");
}
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
Node* dst = dst_replicated_nodes.at(i);
if (dst == nullptr) {
continue;
}
TF_RETURN_IF_ERROR(ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
}
return absl::OkStatus();
}
Status ReplicateFromCompositeDeviceToRegularDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
Node* dst = edge->dst();
const string& dst_device = dst->assigned_device_name();
bool found_src_node = false;
for (int i = 0; i < allowed_devices.size(); ++i) {
if (allowed_devices.at(i) == dst_device) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
found_src_node = true;
break;
}
}
if (!found_src_node) {
for (int i = 0; i < allowed_devices.size(); ++i) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
}
if (edge->IsControlEdge()) {
for (Node* replicated_node : src_replicated_nodes) {
graph->AddControlEdge(replicated_node, dst,
true);
}
return absl::OkStatus();
}
if (edge->src()->type_string() == "_Arg") {
NodeDefBuilder pack_builder(
graph->NewName(absl::StrCat(edge->src()->name(), "/Packed")),
"Pack");
const int num_replicas = src_replicated_nodes.size();
pack_builder.Attr("N", num_replicas);
const DataType dtype = edge->src()->output_type(edge->src_output());
pack_builder.Attr("T", dtype);
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.reserve(src_replicated_nodes.size());
for (Node* replicated_node : src_replicated_nodes) {
inputs.emplace_back(NodeDefBuilder::NodeOut{
replicated_node->name(), edge->src_output(), dtype});
}
pack_builder.Input(inputs);
NodeDef pack_def;
TF_RETURN_IF_ERROR(pack_builder.Finalize(&pack_def));
TF_ASSIGN_OR_RETURN(Node * pack_node, graph->AddNode(pack_def));
pack_node->set_assigned_device_name(dst->assigned_device_name());
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
graph->AddEdge(src_replicated_nodes[i], edge->src_output(), pack_node,
i);
}
graph->AddEdge(pack_node, 0, dst, edge->dst_input());
} else {
return errors::InvalidArgument(
"Dst node should be assigned to an allowed device. Found an "
"edge from node ",
edge->src()->name(), " assigned to ",
edge->src()->assigned_device_name(), " to node ", dst->name(),
" assigned to ", dst_device);
}
}
return absl::OkStatus();
}
private:
absl::flat_hash_map<const Node*, std::vector<Node*>> replicated_nodes_map_;
};
Status ReplicateNodesAndEdges(const std::vector<string>& allowed_devices,
absl::flat_hash_map<Node*, int>* cluster_nodes,
ReplicateHelper* helper, Graph* graph) {
std::queue<Node*> nodes_ready_to_delete;
for (auto& pair : *cluster_nodes) {
Node* node = pair.first;
for (const Edge* edge : node->out_edges()) {
Node* dst = edge->dst();
if (dst->assigned_device_name() != node->assigned_device_name()) {
TF_RETURN_IF_ERROR(helper->ReplicateFromCompositeDeviceToRegularDevice(
edge, allowed_devices, graph));
--pair.second;
}
}
if (cluster_nodes->at(node) == 0) {
nodes_ready_to_delete.push(node);
}
}
while (!nodes_ready_to_delete.empty()) {
Node* node = nodes_ready_to_delete.front();
nodes_ready_to_delete.pop();
for (const Edge* edge : node->in_edges()) {
Node* src = edge->src();
if (src->assigned_device_name() != node->assigned_device_name()) {
helper->ReplicateFromRegularDeviceToCompositeDevice(edge, graph);
} else {
TF_RETURN_IF_ERROR(
helper->ReplicateFromCompositeDeviceToCompositeDevice(
edge, allowed_devices, graph));
if (--(*cluster_nodes)[src] == 0) {
nodes_ready_to_delete.push(src);
}
}
}
cluster_nodes->erase(node);
graph->RemoveNode(node);
}
return absl::OkStatus();
}
}
Status ReplicatePerReplicaNodesInFunctionGraph(
const absl::flat_hash_map<string, const std::vector<string>*>&
composite_devices,
Graph* graph) {
VLOG(1) << "Starting ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
std::set<string> composite_device_names;
for (const auto& it : composite_devices) {
composite_device_names.insert(it.first);
}
absl::flat_hash_map<string, absl::flat_hash_map<Node*, int>>
composite_device_to_cluster_nodes;
for (Node* n : graph->op_nodes()) {
if (composite_device_names.find(n->assigned_device_name()) !=
composite_device_names.end()) {
composite_device_to_cluster_nodes[n->assigned_device_name()].emplace(
n, n->out_edges().size());
}
}
if (composite_device_to_cluster_nodes.empty()) {
VLOG(1) << "No nodes with composiste device found.";
return absl::OkStatus();
}
for (auto& it : composite_device_to_cluster_nodes) {
const std::vector<string>& allowed_devices =
*composite_devices.at(it.first);
if (allowed_devices.empty()) {
return errors::InvalidArgument("No allowed device of composite device: ",
it.first);
}
absl::flat_hash_map<Node*, int>& cluster_nodes = it.second;
if (allowed_devices.size() == 1) {
for (const auto& pair : it.second) {
Node* n = pair.first;
n->set_assigned_device_name(allowed_devices.at(0));
if (n->IsArg()) {
n->AddAttr("sub_index", 0);
}
}
continue;
}
ReplicateHelper helper;
for (const auto& pair : cluster_nodes) {
TF_RETURN_IF_ERROR(
helper.InitializeNode(pair.first, allowed_devices.size()));
}
TF_RETURN_IF_ERROR(ReplicateNodesAndEdges(allowed_devices, &cluster_nodes,
&helper, graph));
if (!cluster_nodes.empty()) {
return errors::InvalidArgument(
"There are still ", cluster_nodes.size(),
" nodes on CompositiveDevice ",
cluster_nodes.begin()->first->assigned_device_name());
}
}
TF_RETURN_IF_ERROR(OptimizeCrossHostControlOutputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostControlInputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostDataOutputEdges(
graph, kOptimizeCrossHostDataEdgesTheshold));
VLOG(1) << "Finished ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <map>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class GraphHelper {
public:
explicit GraphHelper(const Graph& graph) : graph_(graph) {
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node;
}
}
Node* GetNodeByName(const string& name) {
const auto it = nodes_by_name_.find(name);
if (it != nodes_by_name_.end()) {
return it->second;
}
for (const auto& entry : nodes_by_name_) {
if (absl::StartsWith(entry.first, name)) {
return entry.second;
}
}
return nullptr;
}
void SetAssignedDevice(const string& node_name, const string& device_name) {
CHECK_NOTNULL(GetNodeByName(node_name))
->set_assigned_device_name(device_name);
}
void CheckArgNum(const int expected_num) {
int arg_num = 0;
for (Node* node : graph_.op_nodes()) {
if (node->IsArg()) {
arg_num++;
}
}
EXPECT_EQ(arg_num, expected_num);
}
void CheckAssignedDevice(const string& node_name,
const string& expected_device_name) {
EXPECT_EQ(expected_device_name,
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name());
}
void CheckAssignedDevicePrefix(const string& node_name,
const string& expected_device_name) {
auto assigned =
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name();
EXPECT_EQ(assigned.rfind(expected_device_name, 0), 0);
}
private:
const Graph& graph_;
std::map<string, Node*> nodes_by_name_;
};
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto one = ops::Const<int32>(scope.WithOpName("one"), 1);
auto write = ops::AssignVariableOp(scope.WithOpName("write"), arg, one);
auto ret = ops::_Retval(
scope.WithOpName("ret").WithControlDependencies({write}), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("one", "/device:CPU:0");
helper.SetAssignedDevice("write", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 9);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevicePrefix("arg/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("arg/R1", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R1", "/device:TPU");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("one", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDeviceToSingleDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, MultipleCompositeDevices) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_RESOURCE, 0);
Output arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 0);
auto read0 = ops::ReadVariableOp(scope.WithOpName("read0"), arg0, DT_INT32);
auto read1 = ops::ReadVariableOp(scope.WithOpName("read1"), arg1, DT_INT32);
auto identity0 = ops::Identity(scope.WithOpName("identity0"), read0);
auto identity1 = ops::Identity(scope.WithOpName("identity1"), read1);
auto add = ops::Add(scope.WithOpName("add"), identity0, identity1);
auto ret = ops::_Retval(scope.WithOpName("ret"), add, 0);
const std::vector<string> underlying_devices_0 = {"/device:TPU:0",
"/device:TPU:1"};
const std::vector<string> underlying_devices_1 = {"/device:TPU:2",
"/device:TPU:3"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices_0},
{"/device:TPU_COMPOSITE:1", &underlying_devices_1}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("identity0", "/device:TPU:1");
helper.SetAssignedDevice("arg1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("read1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("identity1", "/device:TPU:3");
helper.SetAssignedDevice("add", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("read0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("read1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("identity0", "/device:TPU:1");
helper.CheckAssignedDevice("identity1", "/device:TPU:3");
helper.CheckAssignedDevice("add", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, NestedFunctions) {
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "Func", &fdef));
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("func", "Func", &flib_def)
.Input(arg.name(), 0, DT_RESOURCE)
.Finalize(&def));
Status status;
Node* func = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
scope.graph()->AddEdge(arg.node(), 0, func, 0);
auto ret = ops::_Retval(scope.WithOpName("ret"), Output(func), 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
GraphHelper helper(graph);
EXPECT_EQ(graph.num_op_nodes(), 3);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("func", "/device:CPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("arg/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg/Packed", "/device:CPU:0");
helper.CheckAssignedDevice("func", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
const EdgeSet& packed_in_edges =
helper.GetNodeByName("arg/Packed")->in_edges();
EXPECT_EQ(packed_in_edges.size(), 2);
auto it = packed_in_edges.begin();
EXPECT_EQ(helper.GetNodeByName("arg/R0"), (*it++)->src());
EXPECT_EQ(helper.GetNodeByName("arg/R1"), (*it)->src());
const EdgeSet& func_in_edges = helper.GetNodeByName("func")->in_edges();
EXPECT_EQ(func_in_edges.size(), 1);
EXPECT_EQ(helper.GetNodeByName("arg/Packed"),
(*func_in_edges.begin())->src());
}
}
TEST(ReplicatePerReplicaNodesTest, DeadArgNodes) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_per_replica_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_per_replica_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba632a46-406b-400f-a370-41f10b15368d | cpp | tensorflow/tensorflow | device_resolver_local | tensorflow/core/common_runtime/device_resolver_local.cc | tensorflow/core/common_runtime/device_resolver_local_test.cc | #include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
Status DeviceResolverLocal::GetDeviceAttributes(const string& device,
DeviceAttributes* attributes) {
Device* dev;
Status s = dev_mgr_->LookupDevice(device, &dev);
if (absl::IsInvalidArgument(s)) {
return errors::NotFound(device, " not found");
} else if (!s.ok()) {
return s;
}
*attributes = dev->attributes();
return absl::OkStatus();
}
Status DeviceResolverLocal::GetAllDeviceAttributes(
const string& task, std::vector<DeviceAttributes>* attributes) {
return errors::Internal(
"GetTaskCached is not supposed to be called in local collectives");
}
Status DeviceResolverLocal::UpdateDeviceAttributes(
const std::vector<DeviceAttributes>& attributes) {
return errors::Internal(
"UpdateDeviceAttributes shouldn't be called with local collectives");
}
} | #include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
#define NUM_DEVS 3
class DeviceResolverLocalTest : public ::testing::Test {
protected:
DeviceResolverLocalTest() {
SessionOptions options;
string task_name = "/job:localhost/replica:0/task:0";
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_.reset(new DeviceResolverLocal(device_mgr_.get()));
}
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
};
TEST_F(DeviceResolverLocalTest, GetDeviceAttributesKnown) {
DeviceAttributes attributes;
TF_EXPECT_OK(drl_->GetDeviceAttributes(
"/job:localhost/replica:0/task:0/device:CPU:1", &attributes));
EXPECT_EQ(attributes.name(), "/job:localhost/replica:0/task:0/device:CPU:1");
}
TEST_F(DeviceResolverLocalTest, GetDeviceAttributesUnknown) {
DeviceAttributes attributes;
EXPECT_TRUE(errors::IsNotFound(drl_->GetDeviceAttributes(
"/job:localhost/replica:0/task:0/device:CPU:9", &attributes)));
}
TEST_F(DeviceResolverLocalTest, GetAllDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsInternal(
drl_->GetAllDeviceAttributes( "", &attributes)));
}
TEST_F(DeviceResolverLocalTest, UpdateDeviceAttributes) {
std::vector<DeviceAttributes> attributes;
EXPECT_TRUE(errors::IsInternal(drl_->UpdateDeviceAttributes(attributes)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_resolver_local.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_resolver_local_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c619a43b-b99b-4230-a09b-95349b0db5b6 | cpp | tensorflow/tensorflow | hierarchical_tree_broadcaster | tensorflow/core/common_runtime/hierarchical_tree_broadcaster.cc | tensorflow/core/common_runtime/hierarchical_tree_broadcaster_test.cc | #include "tensorflow/core/common_runtime/hierarchical_tree_broadcaster.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#define READABLE_KEYS false
namespace tensorflow {
namespace {
string BroadcastBufKey(const string& exec_key, int subdiv, int src_rank,
int dst_rank) {
if (READABLE_KEYS) {
return strings::StrCat("broadcast(", exec_key, "):subdiv(", subdiv,
"):src(", src_rank, "):dst(", dst_rank, ")");
} else {
return strings::StrCat(exec_key, ":", subdiv, ":", src_rank, ":", dst_rank);
}
}
}
HierarchicalTreeBroadcaster::HierarchicalTreeBroadcaster()
: col_ctx_(nullptr),
col_params_(nullptr),
done_(nullptr),
is_source_(false) {}
int HierarchicalTreeBroadcaster::GetDeviceTask(
int device_rank, const std::vector<int>& dev_per_task) {
int num_tasks = static_cast<int>(dev_per_task.size());
int task_lo = 0;
int task_hi = -1;
for (int ti = 0; ti < num_tasks; ti++) {
task_hi = task_lo + dev_per_task[ti];
if (task_lo <= device_rank && device_rank < task_hi) return ti;
task_lo = task_hi;
}
LOG(FATAL) << "Unexpected device rank " << device_rank << " for " << task_hi
<< " devices";
return -1;
}
Status HierarchicalTreeBroadcaster::InitializeCollectiveParams(
CollectiveParams* col_params) {
CHECK_EQ(col_params->instance.type, BROADCAST_COLLECTIVE);
CHECK_EQ(col_params->instance.impl_details.collective_name,
"HierarchicalTreeBroadcast");
const string& device_name =
col_params->group.members[col_params->default_rank].device.name();
std::vector<int> dev_per_task;
const string* prior_task_name = &col_params->group.members[0].task;
int dev_count = 1;
for (int di = 1; di < col_params->group.group_size; ++di) {
if (col_params->group.members[di].task != *prior_task_name) {
dev_per_task.push_back(dev_count);
dev_count = 1;
prior_task_name = &col_params->group.members[di].task;
} else {
++dev_count;
}
}
dev_per_task.push_back(dev_count);
CHECK_EQ(col_params->group.num_tasks, dev_per_task.size());
if (VLOG_IS_ON(2)) {
string dpt_buf;
for (int dpt : dev_per_task) strings::StrAppend(&dpt_buf, dpt, ";");
VLOG(2) << "HierarchicalTreeBroadcaster::InitializeCollectiveParams device="
<< device_name << " source_rank=" << col_params->source_rank
<< " dev_per_task=" << dpt_buf;
}
int num_tasks = col_params->group.num_tasks;
int num_subdivs = num_tasks + (num_tasks > 1 ? 1 : 0);
int total_num_devices = 0;
for (int num_dev : dev_per_task) total_num_devices += num_dev;
col_params->instance.impl_details.subdiv_permutations.resize(num_subdivs);
col_params->subdiv_rank.reserve(num_subdivs);
col_params->instance.impl_details.subdiv_source_rank.reserve(num_subdivs);
if (num_tasks > 1) {
const int sdi = 0;
std::vector<int>& perm =
col_params->instance.impl_details.subdiv_permutations[sdi];
CHECK_EQ(perm.size(), 0);
int device_count = 0;
int source_task = GetDeviceTask(col_params->source_rank, dev_per_task);
for (int ti = 0; ti < col_params->group.num_tasks; ti++) {
bool participate = false;
if (source_task == ti) {
perm.push_back(col_params->source_rank);
participate =
col_params->group.members[col_params->source_rank].device.name() ==
device_name;
} else {
perm.push_back(device_count);
participate = col_params->group.members[device_count].device.name() ==
device_name;
}
if (participate) col_params->subdiv_rank.push_back(ti);
device_count += dev_per_task[ti];
}
if (col_params->subdiv_rank.empty()) col_params->subdiv_rank.push_back(-1);
col_params->instance.impl_details.subdiv_source_rank.push_back(source_task);
}
VLOG(2) << collective_util::SubdivPermDebugString(*col_params);
int abs_di = 0;
for (int ti = 0; ti < col_params->group.num_tasks; ti++) {
const int sdi = ti + (num_tasks > 1 ? 1 : 0);
std::vector<int>& perm =
col_params->instance.impl_details.subdiv_permutations[sdi];
CHECK_EQ(perm.size(), 0);
bool participate = false;
int subdiv_source = 0;
for (int di = 0; di < dev_per_task[ti]; di++) {
perm.push_back(abs_di);
if (col_params->group.members[abs_di].device.name() == device_name) {
participate = true;
col_params->subdiv_rank.push_back(di);
}
if (abs_di == col_params->source_rank) subdiv_source = di;
abs_di++;
}
if (!participate) col_params->subdiv_rank.push_back(-1);
col_params->instance.impl_details.subdiv_source_rank.push_back(
subdiv_source);
}
for (int sri = 0; sri < num_subdivs; sri++) {
CHECK_GE(col_params->instance.impl_details.subdiv_source_rank[sri], 0);
}
VLOG(2) << collective_util::SubdivPermDebugString(*col_params);
return absl::OkStatus();
}
Status HierarchicalTreeBroadcaster::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
CHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void HierarchicalTreeBroadcaster::Run(StatusCallback done) {
CHECK(col_ctx_);
CHECK(col_params_);
done_ = std::move(done);
is_source_ = col_params_->is_source;
RunTree();
}
int HierarchicalTreeBroadcaster::TreeRecvFrom(const CollectiveParams& cp,
int subdiv) {
DCHECK_LT(subdiv, static_cast<int>(cp.subdiv_rank.size()));
int my_rank = cp.subdiv_rank[subdiv];
if (-1 == my_rank) return -1;
const auto& impl = cp.instance.impl_details;
DCHECK_LT(subdiv, static_cast<int>(impl.subdiv_source_rank.size()));
int source_rank = impl.subdiv_source_rank[subdiv];
if (my_rank == source_rank) return -1;
if (source_rank == 0) {
return (my_rank - 1) / 2;
} else {
int predecessor_rank = (my_rank / 2) - 1;
return (predecessor_rank < 0) ? source_rank : predecessor_rank;
}
}
void HierarchicalTreeBroadcaster::TreeSendTo(const CollectiveParams& cp,
int subdiv,
std::vector<int>* targets) {
DCHECK_LT(subdiv, static_cast<int>(cp.subdiv_rank.size()));
int my_rank = cp.subdiv_rank[subdiv];
if (-1 == my_rank) return;
const auto& impl = cp.instance.impl_details;
DCHECK_LT(subdiv, static_cast<int>(impl.subdiv_source_rank.size()));
int source_rank = impl.subdiv_source_rank[subdiv];
int group_size = 0;
for (int i = 0; i < impl.subdiv_permutations[subdiv].size(); i++) {
if (impl.subdiv_permutations[subdiv][i] >= 0) {
group_size++;
}
}
targets->clear();
int successor_rank = 0;
if (source_rank == 0) {
successor_rank = (2 * my_rank) + 1;
} else {
successor_rank = (2 * (my_rank + 1));
}
DCHECK_NE(successor_rank, my_rank);
if (cp.is_source && source_rank != 0) {
if (group_size > 1) {
targets->push_back(0);
}
if (group_size > 2 && source_rank != 1) {
targets->push_back(1);
}
}
for (int i = 0; i < 2; ++i) {
if (successor_rank < group_size && successor_rank != source_rank) {
targets->push_back(successor_rank);
}
++successor_rank;
}
}
void HierarchicalTreeBroadcaster::RunTree() {
int num_subdivs = static_cast<int>(col_params_->subdiv_rank.size());
for (int si = 0; si < num_subdivs; si++) {
int my_rank = col_params_->subdiv_rank[si];
if (-1 == my_rank) continue;
int source_rank = col_params_->instance.impl_details.subdiv_source_rank[si];
if (VLOG_IS_ON(1)) {
string subdiv_buf;
for (int r : col_params_->instance.impl_details.subdiv_permutations[si]) {
strings::StrAppend(&subdiv_buf, r, ",");
}
VLOG(1) << "Running Broadcast tree device=" << col_ctx_->device_name
<< " subdiv=" << si << " perm=" << subdiv_buf
<< " my_rank=" << my_rank << " source_rank=" << source_rank;
}
mutex mu;
int pending_count = 0;
condition_variable all_done;
if (my_rank >= 0 && my_rank != source_rank) {
tsl::profiler::TraceMe activity(
[&] { return strings::StrCat("ReceiveValue:", si); },
tsl::profiler::TraceMeLevel::kInfo);
int recv_from_rank = TreeRecvFrom(*col_params_, si);
Notification note;
DispatchRecv(si, recv_from_rank, my_rank, col_ctx_->output,
[this, &mu, ¬e](const Status& s) {
mutex_lock l(mu);
status_.Update(s);
note.Notify();
});
note.WaitForNotification();
}
{
tsl::profiler::TraceMe activity(
[&] { return strings::StrCat("ForwardValue:", si); },
tsl::profiler::TraceMeLevel::kInfo);
if (my_rank >= 0 && status_.ok()) {
std::vector<int> send_to_ranks;
TreeSendTo(*col_params_, si, &send_to_ranks);
for (int i = 0; i < send_to_ranks.size(); ++i) {
int target_rank = send_to_ranks[i];
{
mutex_lock l(mu);
++pending_count;
}
DispatchSend(si, target_rank, my_rank,
(is_source_ ? col_ctx_->input : col_ctx_->output),
[this, &mu, &pending_count, &all_done](const Status& s) {
mutex_lock l(mu);
status_.Update(s);
--pending_count;
if (pending_count == 0) {
all_done.notify_all();
}
});
}
}
if (status_.ok() && is_source_ && (1 == num_subdivs || 0 != si)) {
VLOG(2) << "copying input to output for device="
<< col_ctx_->device_name << " subdiv=" << si;
if (col_ctx_->input != col_ctx_->output &&
(DMAHelper::base(col_ctx_->input) !=
DMAHelper::base(col_ctx_->output))) {
{
mutex_lock l(mu);
++pending_count;
}
DeviceContext* op_dev_ctx = col_ctx_->op_ctx->op_device_context();
CollectiveRemoteAccessLocal::MemCpyAsync(
op_dev_ctx, op_dev_ctx, col_ctx_->device, col_ctx_->device,
col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), col_ctx_->input,
col_ctx_->output, 0,
[this, &mu, &pending_count, &all_done](const Status& s) {
mutex_lock l(mu);
status_.Update(s);
--pending_count;
if (0 == pending_count) {
all_done.notify_all();
}
});
}
}
{
mutex_lock l(mu);
if (pending_count > 0) {
all_done.wait(l);
}
}
}
}
VLOG(2) << "device=" << col_ctx_->device_name << " return status " << status_;
done_(status_);
}
void HierarchicalTreeBroadcaster::DispatchSend(int subdiv, int dst_rank,
int src_rank,
const Tensor* src_tensor,
const StatusCallback& done) {
tsl::profiler::ScopedMemoryDebugAnnotation op_annotation(
col_params_->name.data(), col_ctx_->step_id, "dynamic",
src_tensor->dtype(),
[src_tensor]() { return src_tensor->shape().DebugString(); });
string send_buf_key =
BroadcastBufKey(col_ctx_->exec_key, subdiv, src_rank, dst_rank);
int dst_idx =
col_params_->instance.impl_details.subdiv_permutations[subdiv][dst_rank];
VLOG(3) << "DispatchSend " << send_buf_key << " from_device "
<< col_ctx_->device_name << " to_device "
<< col_params_->group.members[dst_idx].device.name()
<< " subdiv=" << subdiv << " dst_rank=" << dst_rank
<< " dst_idx=" << dst_idx;
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->group.members[dst_idx].device.name(),
col_params_->group.members[dst_idx].task, send_buf_key, col_ctx_->device,
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), src_tensor,
col_ctx_->device_locality, col_ctx_->op_ctx->cancellation_manager(),
done);
}
void HierarchicalTreeBroadcaster::DispatchRecv(int subdiv, int src_rank,
int dst_rank, Tensor* dst_tensor,
const StatusCallback& done) {
string recv_buf_key =
BroadcastBufKey(col_ctx_->exec_key, subdiv, src_rank, dst_rank);
int src_idx =
col_params_->instance.impl_details.subdiv_permutations[subdiv][src_rank];
VLOG(3) << "DispatchRecv " << recv_buf_key << " from_device "
<< col_params_->group.members[src_idx].device.name() << " to_device "
<< col_ctx_->device_name << " subdiv=" << subdiv
<< " src_rank=" << src_rank << " src_idx=" << src_idx;
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->group.members[src_idx].device.name(),
col_params_->group.members[src_idx].task,
col_params_->group.members[src_idx].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), dst_tensor,
col_ctx_->device_locality, 0 ,
col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(HierarchicalTreeBroadcast, HierarchicalTreeBroadcaster);
}
} | #include "tensorflow/core/common_runtime/hierarchical_tree_broadcaster.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class TrivialTest : public ::testing::Test {
protected:
TrivialTest() {}
};
#define DEF_TL_TEST(D, S, R, RF, ST) \
TEST_F(TrivialTest, TreeLinks_##D##Devs_##S##Source_##R##Rank) { \
auto* cp = new CollectiveParams(); \
core::ScopedUnref unref(cp); \
cp->group.group_size = D; \
cp->instance.impl_details.subdiv_source_rank = {S}; \
cp->instance.impl_details.subdiv_permutations.push_back( \
std::vector<int>(D, 0)); \
cp->subdiv_rank = {R}; \
cp->is_source = (S == R); \
EXPECT_EQ(RF, HierarchicalTreeBroadcaster::TreeRecvFrom(*cp, 0)); \
std::vector<int> expected = ST; \
std::vector<int> send_to; \
HierarchicalTreeBroadcaster::TreeSendTo(*cp, 0, &send_to); \
ASSERT_EQ(expected.size(), send_to.size()); \
for (int i = 0; i < expected.size(); ++i) { \
EXPECT_EQ(expected[i], send_to[i]); \
} \
}
#define V(...) std::vector<int>({__VA_ARGS__})
DEF_TL_TEST(2, 0, 0, -1, V(1))
DEF_TL_TEST(2, 1, 0, 1, V())
DEF_TL_TEST(2, 0, 1, 0, V())
DEF_TL_TEST(2, 1, 1, -1, V(0))
DEF_TL_TEST(3, 0, 0, -1, V(1, 2))
DEF_TL_TEST(3, 0, 1, 0, V())
DEF_TL_TEST(3, 0, 2, 0, V())
DEF_TL_TEST(3, 1, 0, 1, V(2))
DEF_TL_TEST(3, 1, 1, -1, V(0))
DEF_TL_TEST(3, 1, 2, 0, V())
DEF_TL_TEST(3, 2, 0, 2, V())
DEF_TL_TEST(3, 2, 1, 2, V())
DEF_TL_TEST(3, 2, 2, -1, V(0, 1))
DEF_TL_TEST(4, 0, 0, -1, V(1, 2))
DEF_TL_TEST(4, 0, 1, 0, V(3))
DEF_TL_TEST(4, 0, 2, 0, V())
DEF_TL_TEST(4, 0, 3, 1, V())
DEF_TL_TEST(4, 1, 0, 1, V(2, 3))
DEF_TL_TEST(4, 1, 1, -1, V(0))
DEF_TL_TEST(4, 1, 2, 0, V())
DEF_TL_TEST(4, 1, 3, 0, V())
DEF_TL_TEST(4, 2, 0, 2, V(3))
DEF_TL_TEST(4, 2, 1, 2, V())
DEF_TL_TEST(4, 2, 2, -1, V(0, 1))
DEF_TL_TEST(4, 2, 3, 0, V())
DEF_TL_TEST(4, 3, 0, 3, V(2))
DEF_TL_TEST(4, 3, 1, 3, V())
DEF_TL_TEST(4, 3, 2, 0, V())
DEF_TL_TEST(4, 3, 3, -1, V(0, 1))
DEF_TL_TEST(8, 0, 0, -1, V(1, 2))
DEF_TL_TEST(8, 0, 1, 0, V(3, 4))
DEF_TL_TEST(8, 0, 2, 0, V(5, 6))
DEF_TL_TEST(8, 0, 3, 1, V(7))
DEF_TL_TEST(8, 0, 4, 1, V())
DEF_TL_TEST(8, 0, 5, 2, V())
DEF_TL_TEST(8, 0, 6, 2, V())
DEF_TL_TEST(8, 0, 7, 3, V())
DEF_TL_TEST(8, 7, 0, 7, V(2, 3))
DEF_TL_TEST(8, 7, 1, 7, V(4, 5))
DEF_TL_TEST(8, 7, 2, 0, V(6))
DEF_TL_TEST(8, 7, 3, 0, V())
DEF_TL_TEST(8, 7, 4, 1, V())
DEF_TL_TEST(8, 7, 5, 1, V())
DEF_TL_TEST(8, 7, 6, 2, V())
DEF_TL_TEST(8, 7, 7, -1, V(0, 1))
#undef DEF_TL_TEST
#undef V
class HierarchicalTreeBroadcasterTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, dtype, shape, test_env_.get()));
}
}
}
typedef std::function<void(Tensor*)> InitFunc;
void Broadcast(bool forward_input) {
VLOG(2) << "#instances=" << instances_.size();
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, forward_input, &done] {
di->DoBroadcast(forward_input);
++done;
});
}
while (done < instances_.size()) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int tensor_len, int fail_after,
bool forward_input) {
Init(num_workers, num_devices, dtype, TensorShape({tensor_len}),
device_type, fail_after);
for (int di = 0; di < instances_.size(); ++di) {
instances_[di]->InitTensor([di](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
t->flat<T>()(i) = value;
}
});
}
Tensor expected = instances_[0]->input_tensor_;
Broadcast(forward_input);
for (int di = 0; di < instances_.size(); ++di) {
if (!instances_[di]->status_.ok()) {
ASSERT_GT(fail_after, 0);
ASSERT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
++failure_count_;
continue;
}
test::ExpectTensorEqual<T>(expected, instances_[di]->output_tensor_);
}
if (fail_after > 0) {
EXPECT_GT(failure_count_, 0);
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, DataType dtype, const TensorShape& shape,
CollectiveTestEnv* test_env)
: test_env_(test_env), input_tensor_(dtype, shape) {
col_params_ =
CreateCollectiveParams(*test_env_, rank, "HierarchicalTreeBroadcast",
BROADCAST_COLLECTIVE, dtype, shape);
col_params_->is_source = (rank == 0);
col_params_->source_rank = 0;
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
}
void InitTensor(const InitFunc& f) { f(&input_tensor_); }
void DoBroadcast(bool forward_input) {
if (forward_input) {
output_tensor_ = input_tensor_;
} else {
output_tensor_ = Tensor(input_tensor_.dtype(), input_tensor_.shape());
}
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
int failure_count_ = 0;
};
class HierarchicalTreeBroadcasterInitParamsTest : public ::testing::Test {
protected:
void RunSubdivPermsTest(
CollectiveParams* cp,
const std::vector<std::vector<int>>& expected_subdiv_perms,
const std::vector<int>& expected_subdiv_rank,
const std::vector<int>& expected_subdiv_source_rank) {
cp->instance.impl_details.subdiv_permutations.clear();
cp->subdiv_rank.clear();
cp->instance.impl_details.subdiv_source_rank.clear();
HierarchicalTreeBroadcaster* broadcaster = new HierarchicalTreeBroadcaster;
core::ScopedUnref unref(broadcaster);
TF_CHECK_OK(broadcaster->InitializeCollectiveParams(cp));
EXPECT_EQ(expected_subdiv_perms,
cp->instance.impl_details.subdiv_permutations);
EXPECT_EQ(expected_subdiv_rank, cp->subdiv_rank);
EXPECT_EQ(expected_subdiv_source_rank,
cp->instance.impl_details.subdiv_source_rank);
}
};
TEST_F(HierarchicalTreeBroadcasterInitParamsTest,
InitializeParams1Task8Device) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 1;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "HierarchicalTreeBroadcast",
BROADCAST_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->source_rank = 0;
cp->default_rank = 0;
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3, 4, 5, 6, 7}}, {0}, {0});
cp->source_rank = 2;
cp->default_rank = 2;
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3, 4, 5, 6, 7}}, {2}, {2});
cp->source_rank = 2;
cp->default_rank = 0;
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3, 4, 5, 6, 7}}, {0}, {2});
}
TEST_F(HierarchicalTreeBroadcasterInitParamsTest,
InitializeParams4Tasks8Device) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "HierarchicalTreeBroadcast",
BROADCAST_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->source_rank = 0;
cp->default_rank = 0;
RunSubdivPermsTest(cp.get(),
{{0, 8, 16, 24},
{0, 1, 2, 3, 4, 5, 6, 7},
{8, 9, 10, 11, 12, 13, 14, 15},
{16, 17, 18, 19, 20, 21, 22, 23},
{24, 25, 26, 27, 28, 29, 30, 31}},
{0, 0, -1, -1, -1}, {0, 0, 0, 0, 0});
cp->source_rank = 2;
cp->default_rank = 0;
RunSubdivPermsTest(cp.get(),
{{2, 8, 16, 24},
{0, 1, 2, 3, 4, 5, 6, 7},
{8, 9, 10, 11, 12, 13, 14, 15},
{16, 17, 18, 19, 20, 21, 22, 23},
{24, 25, 26, 27, 28, 29, 30, 31}},
{-1, 0, -1, -1, -1}, {0, 2, 0, 0, 0});
cp->source_rank = 9;
cp->default_rank = 9;
RunSubdivPermsTest(cp.get(),
{{0, 9, 16, 24},
{0, 1, 2, 3, 4, 5, 6, 7},
{8, 9, 10, 11, 12, 13, 14, 15},
{16, 17, 18, 19, 20, 21, 22, 23},
{24, 25, 26, 27, 28, 29, 30, 31}},
{1, -1, 1, -1, -1}, {1, 0, 1, 0, 0});
}
TEST_F(HierarchicalTreeBroadcasterInitParamsTest,
InitializeParams4TasksVariableDevice) {
auto* cp = new CollectiveParams();
core::ScopedUnref unref(cp);
int num_tasks = 4;
cp->group.device_type = DeviceType("GPU");
cp->group.num_tasks = num_tasks;
cp->group.group_size = 0;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->instance.impl_details.collective_name = "HierarchicalTreeBroadcast";
std::vector<int> dev_per_task = {4, 4, 6, 8};
for (int ti = 0; ti < cp->group.num_tasks; ti++) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", ti);
for (int di = 0; di < dev_per_task[ti]; di++) {
CollGroupMember member;
member.device.set_name(strings::StrCat(task_name, "/device:GPU:", di));
member.task = task_name;
cp->group.members.push_back(member);
cp->group.group_size++;
}
}
cp->source_rank = 0;
cp->default_rank = 0;
RunSubdivPermsTest(cp,
{{0, 4, 8, 14},
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11, 12, 13},
{14, 15, 16, 17, 18, 19, 20, 21}},
{0, 0, -1, -1, -1}, {0, 0, 0, 0, 0});
cp->source_rank = 2;
cp->default_rank = 0;
RunSubdivPermsTest(cp,
{{2, 4, 8, 14},
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11, 12, 13},
{14, 15, 16, 17, 18, 19, 20, 21}},
{-1, 0, -1, -1, -1}, {0, 2, 0, 0, 0});
cp->source_rank = 9;
cp->default_rank = 5;
RunSubdivPermsTest(cp,
{{0, 4, 9, 14},
{0, 1, 2, 3},
{4, 5, 6, 7},
{8, 9, 10, 11, 12, 13},
{14, 15, 16, 17, 18, 19, 20, 21}},
{-1, -1, 1, -1, -1}, {2, 0, 0, 1, 0});
}
#define DEF_TEST(B, T, W, D, L, A, F) \
TEST_F(HierarchicalTreeBroadcasterTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Len##L##_Abt##A##_Fw##F) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_BOOL: { \
RunTest<bool>(dtype, DEVICE_##T, W, D, L, A, F); \
} break; \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, L, A, F); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, L, A, F); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, L, A, F); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, L, A, F); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 0, false)
DEF_TEST(FLOAT, CPU, 1, 2, 1001, 0, true)
DEF_TEST(FLOAT, CPU, 2, 1, 128, 0, false)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 0, true)
DEF_TEST(FLOAT, CPU, 2, 8, 4095, 0, false)
DEF_TEST(FLOAT, CPU, 4, 4, 1045991, 0, true)
DEF_TEST(BOOL, CPU, 1, 4, 1, 0, false)
DEF_TEST(BOOL, CPU, 2, 4, 1, 0, false)
DEF_TEST(BOOL, CPU, 2, 4, 1001, 0, false)
DEF_TEST(DOUBLE, CPU, 2, 4, 128, 0, false)
DEF_TEST(INT32, CPU, 2, 4, 128, 0, true)
DEF_TEST(INT64, CPU, 2, 4, 128, 0, false)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 1, true)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 5, false)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 0, true)
DEF_TEST(FLOAT, GPU, 1, 2, 33, 0, false)
DEF_TEST(FLOAT, GPU, 1, 3, 64, 0, true)
DEF_TEST(FLOAT, GPU, 1, 8, 1001, 0, false)
DEF_TEST(FLOAT, GPU, 1, 8, 4095, 0, true)
DEF_TEST(FLOAT, GPU, 1, 8, 1045991, 0, false)
DEF_TEST(BOOL, GPU, 1, 4, 1, 0, false)
DEF_TEST(BOOL, GPU, 1, 4, 1001, 0, false)
DEF_TEST(DOUBLE, GPU, 1, 8, 1001, 0, true)
DEF_TEST(INT64, GPU, 1, 8, 1001, 0, false)
DEF_TEST(FLOAT, GPU, 1, 8, 128, 6, true)
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/hierarchical_tree_broadcaster.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/hierarchical_tree_broadcaster_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a098cd4-54c3-4324-ad2c-669555b1b427 | cpp | tensorflow/tensorflow | int32_fulltype | tensorflow/core/common_runtime/int32_fulltype.cc | tensorflow/core/common_runtime/int32_fulltype_test.cc | #include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Status Int32FulltypePass::Int32FullTypeForTensor(DataType dtype,
FullTypeDef* tensor_t,
bool set_only_int32,
Node* node, int output_idx) {
if (tensor_t->type_id() == TFT_TENSOR) {
if (tensor_t->args_size() != 1) {
if (node != nullptr) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", node->name(), "' (op='",
node->op_def().name(), "') in '", debug_location_,
"' has TFT_TENSOR output ", output_idx, " which has ",
tensor_t->args_size(), " args instead of 1.\n got:\n",
tensor_t->DebugString()));
} else {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("TFT_TENSOR has ", tensor_t->args_size(),
" args instead of 1.\n got:\n",
tensor_t->DebugString()));
}
}
if (tensor_t->args(0).type_id() == TFT_INT32) {
tensor_t->set_type_id(TFT_SHAPE_TENSOR);
}
} else if ((tensor_t->type_id() == TFT_UNSET) &&
((dtype == DT_INT32) || !set_only_int32)) {
FullTypeDef data_t;
map_dtype_to_tensor(dtype, data_t);
tensor_t->set_type_id(TFT_SHAPE_TENSOR);
(*tensor_t->add_args()) = data_t;
}
return absl::OkStatus();
}
static bool is_host_memory_int32(MemoryType mtype, DataType dtype) {
return (mtype == HOST_MEMORY) && (dtype == DT_INT32);
}
Status Int32FulltypePass::ProcessGraph(Graph* graph, bool ints_on_device) {
for (Node* n : graph->op_nodes()) {
auto output_types = n->output_types();
bool needs_annotation = false;
for (const auto& output_type : output_types) {
MemoryType mtype = ints_on_device
? MTypeFromDTypeIntsOnDevice(output_type)
: MTypeFromDType(output_type);
if (is_host_memory_int32(mtype, output_type)) {
needs_annotation = true;
}
}
if (!needs_annotation) {
continue;
}
if (n->def().has_experimental_type()) {
FullTypeDef* node_t = n->mutable_def()->mutable_experimental_type();
if (node_t->type_id() != TFT_PRODUCT) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", n->name(), "' (op='",
n->op_def().name(),
"') does not start with TFT_PRODUCT.\n got:\n",
node_t->DebugString()));
}
if (node_t->args_size() != output_types.size()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Full type for node='", n->name(), "' (op='",
n->op_def().name(), "') has ", node_t->args_size(),
" outputs but output_types has ", output_types.size(),
" outputs.\n got:\n", node_t->DebugString()));
}
for (int i = 0; i < node_t->args_size(); ++i) {
if (MTypeFromDType(output_types[i]) == HOST_MEMORY) {
TF_RETURN_IF_ERROR(
Int32FullTypeForTensor(output_types[i], node_t->mutable_args(i),
true, n, i));
}
}
VLOG(2) << "Full type information in node '" << n->name() << "' (op='"
<< n->op_def().name()
<< "') modified to use TFT_SHAPE_TENSOR for int32.\n"
<< node_t->DebugString();
} else {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
for (const auto& output_type : output_types) {
MemoryType mtype = ints_on_device
? MTypeFromDTypeIntsOnDevice(output_type)
: MTypeFromDType(output_type);
if (is_host_memory_int32(mtype, output_type)) {
FullTypeDef data_t;
map_dtype_to_tensor(output_type, data_t);
FullTypeDef out_t;
out_t.set_type_id(TFT_SHAPE_TENSOR);
(*out_t.add_args()) = data_t;
(*t.add_args()) = out_t;
} else {
t.add_args();
}
}
(*n->mutable_def()->mutable_experimental_type()) = t;
VLOG(2) << "Full type information with TFT_SHAPE_TENSOR for int32 added "
"to node '"
<< n->name() << "' (op='" << n->op_def().name() << "').\n"
<< t.DebugString();
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/int32_fulltype.h"
#include <string>
#include <unordered_map>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace {
REGISTER_OP("FloatInt32").Output("a: float").Output("b: int32");
REGISTER_OP("FloatInt32Int32FT")
.Output("a: float")
.Output("b: int32")
.Output("c: int32");
REGISTER_OP("FloatWithoutInt32").Output("a: float");
REGISTER_OP("StringWithoutInt32").Output("a: string");
class Int32FulltypeTest : public ::testing::Test {
protected:
Int32FulltypeTest() {}
Status BuildGraph(const GraphDefBuilder& builder, Graph* out_graph) {
TF_RETURN_IF_ERROR(GraphDefBuilderToGraph(builder, out_graph));
RebuildNodeNameMap(*out_graph);
return absl::OkStatus();
}
void AddTensorFT(FullTypeDef& t, tensorflow::FullTypeId out_t_id,
tensorflow::FullTypeId data_t_id) {
FullTypeDef out_t;
FullTypeDef data_t;
if (out_t_id != TFT_UNSET) {
data_t.set_type_id(data_t_id);
out_t.set_type_id(out_t_id);
(*out_t.add_args()) = data_t;
}
(*t.add_args()) = out_t;
}
Status Int32FulltypeAnnotate(Graph* graph, bool ints_on_device = false) {
Int32FulltypePass int32_fulltype;
return int32_fulltype.ProcessGraph(graph, ints_on_device);
}
Node* GetNodeByName(const Graph& graph, const string& name) {
const auto search = nodes_by_name_.find(name);
CHECK(search != nodes_by_name_.end()) << "Unknown node name: " << name;
return graph.FindNodeId(search->second);
}
protected:
std::unordered_map<string, int> nodes_by_name_;
private:
void RebuildNodeNameMap(const Graph& graph) {
nodes_by_name_.clear();
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node->id();
}
}
};
TEST_F(Int32FulltypeTest, CreateFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatInt32", b.opts().WithName("float_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 2);
ASSERT_EQ(ft.args(0).type_id(), TFT_UNSET);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, ModifyFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node = ops::SourceOp("FloatInt32Int32FT",
b.opts().WithName("float_int32_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
AddTensorFT(t, TFT_TENSOR, TFT_FLOAT);
AddTensorFT(t, TFT_TENSOR, TFT_INT32);
AddTensorFT(t, TFT_SHAPE_TENSOR, TFT_INT32);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 3);
ASSERT_EQ(ft.args(0).type_id(), TFT_TENSOR);
ASSERT_EQ(ft.args(0).args_size(), 1);
ASSERT_EQ(ft.args(0).args(0).type_id(), TFT_FLOAT);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft.args(2).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(2).args_size(), 1);
ASSERT_EQ(ft.args(2).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, ModifyUnsetFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node = ops::SourceOp("FloatInt32Int32FT",
b.opts().WithName("float_int32_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
AddTensorFT(t, TFT_UNSET, TFT_FLOAT);
AddTensorFT(t, TFT_UNSET, TFT_INT32);
AddTensorFT(t, TFT_UNSET, TFT_INT32);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_int32_int32");
ASSERT_TRUE(node->def().has_experimental_type());
const FullTypeDef& ft = node->def().experimental_type();
ASSERT_EQ(ft.type_id(), TFT_PRODUCT);
ASSERT_EQ(ft.args_size(), 3);
ASSERT_EQ(ft.args(0).type_id(), TFT_UNSET);
ASSERT_EQ(ft.args(0).args_size(), 0);
ASSERT_EQ(ft.args(1).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(1).args_size(), 1);
ASSERT_EQ(ft.args(1).args(0).type_id(), TFT_INT32);
ASSERT_EQ(ft.args(2).type_id(), TFT_SHAPE_TENSOR);
ASSERT_EQ(ft.args(2).args_size(), 1);
ASSERT_EQ(ft.args(2).args(0).type_id(), TFT_INT32);
}
TEST_F(Int32FulltypeTest, NotCreateFTFloat) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatWithoutInt32",
b.opts().WithName("float_without_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "float_without_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, NotCreateFTString) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("StringWithoutInt32",
b.opts().WithName("string_without_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g));
Node* node = GetNodeByName(g, "string_without_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, NotCreateFTIntsOnDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("FloatInt32", b.opts().WithName("float_int32"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Int32FulltypeAnnotate(&g, true));
Node* node = GetNodeByName(g, "float_int32");
ASSERT_FALSE(node->def().has_experimental_type());
}
TEST_F(Int32FulltypeTest, BadTensorFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
FullTypeDef& t = *node->mutable_def()->mutable_experimental_type();
t.add_args()->set_type_id(TFT_UNSET);
t.add_args()->set_type_id(TFT_TENSOR);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("which has 0 args instead of 1."));
}
TEST_F(Int32FulltypeTest, BadFTWithoutProduct) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_FLOAT);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("does not start with TFT_PRODUCT."));
}
TEST_F(Int32FulltypeTest, BadProductFT) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* node =
ops::SourceOp("FloatInt32", b.opts().WithName("float_without_int32"));
node->mutable_def()->mutable_experimental_type()->set_type_id(TFT_PRODUCT);
TF_EXPECT_OK(BuildGraph(b, &g));
}
const auto& status = Int32FulltypeAnnotate(&g);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
::testing::HasSubstr("has 0 outputs but output_types has 2 outputs."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/int32_fulltype.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/int32_fulltype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a51674b-4549-47f9-b5af-fc0064e0296e | cpp | tensorflow/tensorflow | lower_functional_ops | tensorflow/core/common_runtime/lower_functional_ops.cc | tensorflow/core/common_runtime/lower_functional_ops_test.cc | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_propagation.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
constexpr const char* const kTpuReplicateAttr = "_tpu_replicate";
constexpr const char* const kXlaClusterAttr = "_xla_compile_id";
constexpr const char* const kXlaMustCompileAttr = "_XlaMustCompile";
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
string match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && !match.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForTpuCompilation(const Node* n) {
return CheckStringAttr(n, kTpuReplicateAttr);
}
bool MarkedForXlaCompilation(const Node* n) {
return CheckStringAttr(n, kXlaClusterAttr) ||
CheckBoolAttr(n, kXlaMustCompileAttr);
}
bool HasArgsOrRetvals(const Graph& g) {
for (const Node* n : g.op_nodes()) {
if (n->IsArg() || n->IsRetval()) return true;
}
return false;
}
const absl::flat_hash_set<std::string>& DevicePropagationOpList() {
static const auto op_list = new absl::flat_hash_set<std::string>(
{"Identity", "IdentityN", "Enter", "Exit", "Switch", "Merge",
"NextIteration"});
return *op_list;
}
bool IsPropagatableDevice(StringPiece device_string) {
DeviceNameUtils::ParsedName device;
return DeviceNameUtils::ParseFullName(device_string, &device) &&
device.type == DEVICE_TPU;
}
}
Status LowerFunctionalOpsPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.partition_graphs != nullptr) {
return errors::Internal(
"Lowering If/While ops should happen before partitioning.");
}
if (options.graph == nullptr) {
return absl::OkStatus();
}
Graph* g = options.graph->get();
if (g == nullptr) {
return errors::Internal(
"Lowering While op requires a graph to be available.");
}
FunctionLibraryDefinition* flib_def = options.flib_def;
if (flib_def == nullptr) {
return errors::Internal(
"Lowering If op requires a FunctionLibraryDefinition to be available.");
}
const bool lower_function_calls =
options.session_options && options.session_options->config.graph_options()
.optimizer_options()
.do_function_inlining();
bool keep_lowered_nodes_fetchable = !HasArgsOrRetvals(*g);
const bool functional_control_flow =
options.session_options &&
(options.session_options->config.experimental().executor_type() ==
"SINGLE_THREADED_EXECUTOR" ||
options.session_options->config.experimental().use_tfrt() ||
options.session_options->config.experimental()
.disable_functional_ops_lowering());
const auto used_by_xla = [](Node* node) -> bool {
return MarkedForTpuCompilation(node) || MarkedForXlaCompilation(node);
};
const auto lower_control_flow = [&](Node* node) -> bool {
return LowerUsingSwitchMergeIsOn(node) && !used_by_xla(node);
};
int num_node_ids_before_lowering = g->num_node_ids();
for (int i = 2; i < g->num_node_ids(); ++i) {
Node* n = g->FindNodeId(i);
if (n == nullptr) continue;
if (IsFunctionCall(*flib_def, *n) && !used_by_xla(n) &&
(lower_function_calls || LowerAsMultiDeviceFunctionIsOn(n))) {
TF_RETURN_IF_ERROR(RewriteFunctionCallNode(n, g, *flib_def,
keep_lowered_nodes_fetchable));
continue;
}
if (functional_control_flow) continue;
if (n->IsIfNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsCaseNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, g, keep_lowered_nodes_fetchable));
} else if (n->IsWhileNode() && lower_control_flow(n)) {
TF_RETURN_IF_ERROR(
RewriteWhileNode(n, g, flib_def, keep_lowered_nodes_fetchable));
} else {
DCHECK(!lower_control_flow(n))
<< "Node " << FormatNodeForError(*n) << " of type "
<< n->type_string() << " has '"
<< LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr
<< "' attr set but it does not support lowering.\n";
}
}
PropagateDevices(
[num_node_ids_before_lowering](const Node& n) {
return DevicePropagationOpList().contains(n.type_string()) &&
n.id() >= num_node_ids_before_lowering;
},
IsPropagatableDevice, g);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 10,
LowerFunctionalOpsPass);
} | #include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
typedef FunctionDefHelper FDH;
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
static void AssertHasSubstr(StringPiece s, StringPiece expected) {
ASSERT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
FunctionDef WhileWithIfCond(int32_t N) {
const Tensor kN = test::AsScalar<int32>(N);
return FDH::Define(
"WhileWithIfCond",
{"counter: int32", "pred: bool", "x: int32"},
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", kN}, {"dtype", DT_INT32}}},
{{"z"}, "Less", {"counter", "N"}, {{"T", DT_INT32}}},
});
}
FunctionDef WhileWithIfBody() {
NameAttrList then_func;
then_func.set_name("XTimesTwo");
NameAttrList else_func;
else_func.set_name("XTimesFour");
const Tensor kOne = test::AsScalar<int32>(1);
std::vector<DataType> input_types = {DT_INT32};
std::vector<DataType> output_types = {DT_INT32};
return FDH::Define(
"WhileWithIfBody",
{"counter: int32", "pred: bool", "x: int32"},
{"updated_counter: int32", "pred: bool", "if: int32"},
{},
{
{{"if"},
"If",
{"pred", "x"},
{{"then_branch", then_func},
{"else_branch", else_func},
{"Tcond", DT_BOOL},
{"Tin", input_types},
{"Tout", output_types},
{kLowerUsingSwitchMergeAttr, true}}},
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_INT32}}},
{{"updated_counter"}, "Add", {"counter", "one"}, {{"T", DT_INT32}}},
});
}
TEST(LowerIfWhileTest, CondInWhile) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::XTimesFour();
*f_lib_proto.add_function() = WhileWithIfCond(3);
*f_lib_proto.add_function() = WhileWithIfBody();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto counter = ops::Placeholder(root.WithOpName("counter"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(counter.node()), NodeBuilder::NodeOut(pred.node()),
NodeBuilder::NodeOut(a.node())});
Node* while_node;
AttrValue cond_func;
cond_func.mutable_func()->set_name("WhileWithIfCond");
AttrValue body_func;
body_func.mutable_func()->set_name("WhileWithIfBody");
TF_ASSERT_OK(NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_BOOL, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto* op : graph->op_nodes()) {
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 8);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(counter.node()), Input::Initializer(0));
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node, 2)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 64);
}
}
FunctionDef IfWithWhileThen() {
NameAttrList cond_func;
cond_func.set_name("LessThanOrEqualToN");
NameAttrList body_func;
body_func.set_name("XTimesTwo");
std::vector<DataType> input_and_output_types = {DT_INT32};
std::vector<TensorShape> output_shapes = {TensorShape()};
return FDH::Define(
"IfWithWhileThen",
{"x: int32"},
{"while: int32"},
{},
{
{{"while"},
"While",
{"x"},
{{"cond", cond_func},
{"body", body_func},
{"T", input_and_output_types},
{"output_shapes", output_shapes},
{kLowerUsingSwitchMergeAttr, true}}},
});
}
TEST(LowerIfWhileTest, WhileInCond) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
*f_lib_proto.add_function() = IfWithWhileThen();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue then_func;
then_func.mutable_func()->set_name("IfWithWhileThen");
AttrValue else_func;
else_func.mutable_func()->set_name("XTimesTwo");
Node* if_node;
TF_ASSERT_OK(NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", then_func)
.Attr("else_branch", else_func)
.Attr("Tout", {DT_INT32})
.Attr(kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &if_node));
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "if") {
node_called_if_count++;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "if") {
node_called_if_count++;
}
ASSERT_NE(op->type_string(), "While");
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(if_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_functional_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_functional_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17ca6bcf-5dc9-4192-89da-99d96d651d01 | cpp | tensorflow/tensorflow | lower_case_op | tensorflow/core/common_runtime/lower_case_op.cc | tensorflow/core/common_runtime/lower_case_op_test.cc | #include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class CaseBuilder {
public:
CaseBuilder(Node* case_op, const std::vector<string>& branch_fn_names,
bool keep_node_fetchable, Graph* graph);
Status CreatePivotNodes();
Status AddInputs();
Status AddOutputs();
Status BuildLoweredCaseOutput();
private:
string NewName(const string& infix);
Status AddInput(Node* src, int src_output);
std::vector<NodeOut> outputs_;
Node* control_predecessor_;
Node* case_op_;
Node* lowered_case_output_;
OutputTensor branch_index_;
int num_branches_;
std::vector<Node*> pivots_;
std::vector<Node*> call_nodes_;
Node* branch_executed_node_;
Graph* graph_;
string name_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
std::vector<NodeBuilder> branch_call_builders_;
};
CaseBuilder::CaseBuilder(Node* case_op,
const std::vector<string>& branch_fn_names,
bool keep_node_fetchable, Graph* graph)
: case_op_(case_op),
num_branches_(branch_fn_names.size()),
graph_(graph),
name_(case_op->name()),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*case_op_) {
branch_call_builders_.reserve(num_branches_);
for (int b = 0; b < num_branches_; b++) {
branch_call_builders_.emplace_back(NewName(strings::StrCat("branch", b)),
branch_fn_names[b], graph->op_registry(),
&debug_info_);
branch_call_builders_[b].Device(case_op_->requested_device());
branch_call_builders_[b].Attr(kLowerAsMultiDeviceFunctionAttr, true);
}
TF_CHECK_OK(case_op_->input_tensor(0, &branch_index_));
}
Status CaseBuilder::CreatePivotNodes() {
Node* branch_index;
TF_RETURN_IF_ERROR(NodeBuilder(NewName("branch_index"), "_SwitchN",
graph_->op_registry(), &debug_info_)
.Input(NodeOut(branch_index_))
.Input(NodeOut(branch_index_))
.Attr("num_outs", num_branches_)
.Device(case_op_->requested_device())
.Finalize(graph_, &branch_index));
control_predecessor_ = branch_index;
pivots_.resize(num_branches_, nullptr);
for (int b = 0; b < num_branches_; b++) {
TF_RETURN_IF_ERROR(NodeBuilder(NewName(strings::StrCat("pivot_", b)),
"Identity", graph_->op_registry(),
&debug_info_)
.Input(branch_index, b)
.Device(case_op_->requested_device())
.Finalize(graph_, &pivots_[b]));
}
return absl::OkStatus();
}
string CaseBuilder::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
Status CaseBuilder::AddInput(Node* src, int src_output) {
Node* input;
NodeDebugInfo debug_info(*src);
TF_RETURN_IF_ERROR(NodeBuilder(NewName(src->name()), "_SwitchN",
graph_->op_registry(), &debug_info)
.Input(src, src_output)
.Input(branch_index_)
.Device(src->requested_device())
.Attr("_class", {src->name()})
.Attr("num_outs", num_branches_)
.Finalize(graph_, &input));
for (int b = 0; b < num_branches_; b++) {
branch_call_builders_[b].Input(input, b);
}
return absl::OkStatus();
}
Status CaseBuilder::AddInputs() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(case_op_->input_edges(&edges));
for (int i = 1; i < edges.size(); ++i) {
const Edge* e = edges[i];
TF_RETURN_IF_ERROR(AddInput(e->src(), e->src_output()));
}
for (const Edge* e : case_op_->in_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(e->src(), control_predecessor_);
}
}
return absl::OkStatus();
}
Status CaseBuilder::AddOutputs() {
call_nodes_.resize(num_branches_, nullptr);
for (int b = 0; b < num_branches_; b++) {
TF_RETURN_IF_ERROR(
branch_call_builders_[b].Finalize(graph_, &call_nodes_[b]));
graph_->AddControlEdge(pivots_[b], call_nodes_[b]);
}
const int num_outputs = call_nodes_[0]->num_outputs();
std::vector<Node*> merges(num_outputs);
outputs_.resize(merges.size());
for (int i = 0; i < num_outputs; ++i) {
std::vector<NodeOut> merge_input;
merge_input.reserve(num_branches_);
for (int j = 0; j < num_branches_; j++) {
merge_input.emplace_back(call_nodes_[j], i);
}
TF_RETURN_IF_ERROR(NodeBuilder(NewName("merge"), "Merge",
graph_->op_registry(), &debug_info_)
.Input(merge_input)
.Device(case_op_->requested_device())
.Finalize(graph_, &merges[i]));
outputs_[i] = NodeOut(merges[i], 0);
}
std::vector<NodeOut> pivots(num_branches_);
for (int j = 0; j < num_branches_; j++) {
pivots[j] = NodeOut(pivots_[j]);
}
TF_RETURN_IF_ERROR(NodeBuilder(NewName("branch_executed"), "Merge",
graph_->op_registry(), &debug_info_)
.Input(pivots)
.ControlInputs(call_nodes_)
.Device(case_op_->requested_device())
.Finalize(graph_, &branch_executed_node_));
TF_RETURN_IF_ERROR(BuildLoweredCaseOutput());
for (const Edge* e : case_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(branch_executed_node_, e->dst());
} else {
graph_->AddEdge(merges[e->src_output()], 0, e->dst(), e->dst_input());
}
}
return absl::OkStatus();
}
Status CaseBuilder::BuildLoweredCaseOutput() {
NodeBuilder builder = keep_node_fetchable_ && !outputs_.empty()
? NodeBuilder(name_, "IdentityN").Input(outputs_)
: NodeBuilder(name_, "NoOp");
return builder.Device(case_op_->requested_device())
.ControlInput(branch_executed_node_)
.Finalize(graph_, &lowered_case_output_);
}
}
Status RewriteCaseNode(Node* n, Graph* g, bool keep_node_fetchable) {
VLOG(2) << "Lower Case node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* branches_attr = n->attrs().Find("branches");
if (branches_attr == nullptr) {
return errors::InvalidArgument("branch functions missing");
}
int num_branches = branches_attr->list().func_size();
std::vector<string> branch_fn_names;
branch_fn_names.reserve(num_branches);
for (int b = 0; b < num_branches; b++) {
branch_fn_names.emplace_back(branches_attr->list().func(b).name());
}
CaseBuilder cb(n, branch_fn_names, keep_node_fetchable, g);
TF_RETURN_IF_ERROR(cb.CreatePivotNodes());
TF_RETURN_IF_ERROR(cb.AddInputs());
TF_RETURN_IF_ERROR(cb.AddOutputs());
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncListAttr(const absl::Span<const char* const> names) {
AttrValue attr;
for (const char* name : names) {
attr.mutable_list()->add_func()->set_name(name);
}
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerCaseOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
*(f_lib_proto.add_function()) = test::function::XTimes16();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto branch_index =
ops::Placeholder(root.WithOpName("branch_index"), DT_INT32);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(
NodeBuilder("case", "Case", &root.graph()->flib_def())
.Input(branch_index.node())
.Input(inputs)
.Attr("branches",
FuncListAttr({"XTimesTwo", "XTimesFour", "XTimes16"}))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_case_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
if (op->name() == "case") {
++node_called_case_count;
}
}
ASSERT_EQ(node_called_case_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
node_called_case_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
ASSERT_NE(op->type_string(), "Case");
if (op->name() == "case") {
++node_called_case_count;
}
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(node_called_case_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(-1));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(0));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(1));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(2));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(20));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
}
TEST(LowerCaseOpTest, BranchFunctionsWithoutOutputs) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
const auto assign_add = [](const string& fn_name, int v) {
const Tensor tensor = test::AsScalar<int32>(v);
return FDH::Create(
fn_name, {"v: resource"}, {}, {},
{
{{"c"}, "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}},
{{"upd"},
"AssignAddVariableOp",
{"v", "c:output"},
{{"dtype", DT_INT32}}},
},
{},
{{"side_effects", "upd"}});
};
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = assign_add("AddOne", 1);
*(f_lib_proto.add_function()) = assign_add("AddTwo", 2);
*(f_lib_proto.add_function()) = assign_add("AddTen", 10);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto branch_index =
ops::Placeholder(root.WithOpName("branch_index"), DT_INT32);
auto initial_val = ops::Placeholder(root.WithOpName("initial_val"), DT_INT32);
auto var = ops::VarHandleOp(root.WithOpName("var"), DT_INT32, {});
auto init = ops::AssignVariableOp(root.WithOpName("init"), var, initial_val);
Node* case_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(var.node())});
TF_ASSERT_OK(
NodeBuilder("case", "Case", &root.graph()->flib_def())
.Input(branch_index.node())
.Input(inputs)
.ControlInput(init.operation.node())
.Attr("branches", FuncListAttr({"AddOne", "AddTwo", "AddTen"}))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", DataTypeSlice{})
.Finalize(root.graph(), &case_node));
auto read = ops::ReadVariableOp(
root.WithOpName("read").WithControlDependencies(Output(case_node)), var,
DT_INT32);
TF_ASSERT_OK(root.DoShapeInference(case_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
int node_called_case_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) ++switch_count;
if (op->IsMerge()) ++merge_count;
if (op->name() == "case") ++node_called_case_count;
ASSERT_NE(op->type_string(), "Case");
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(node_called_case_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(-5));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(0));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 11);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(1));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(2));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(31));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
TEST(LowerCaseOpTest, DoNotInlineLoweredFunction) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef x_times_four = test::function::XTimesFour();
FunctionDef x_times_16 = test::function::XTimes16();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*x_times_four.mutable_attr())["_noinline"].set_b(true);
(*x_times_16.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = x_times_two;
*(f_lib_proto.add_function()) = x_times_four;
*(f_lib_proto.add_function()) = x_times_16;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto branch_index =
ops::Placeholder(root.WithOpName("branch_index"), DT_INT32);
Node* written_case;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(
NodeBuilder("case", "Case", &root.graph()->flib_def())
.Input(branch_index.node())
.Input(inputs)
.Attr("branches",
FuncListAttr({"XTimesTwo", "XTimesFour", "XTimes16"}))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_case));
TF_ASSERT_OK(root.DoShapeInference(written_case));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int x_times_four_count = 0;
int x_times_16_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == x_times_four.signature().name()) {
x_times_four_count++;
}
if (op->type_string() == x_times_16.signature().name()) {
x_times_16_count++;
}
ASSERT_NE(op->type_string(), "Case");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(x_times_four_count, 1);
ASSERT_EQ(x_times_16_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(-2));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_case)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(0));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_case)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(1));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_case)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(2));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_case)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(branch_index.node()), Input::Initializer(31));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_case)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 160);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_case_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_case_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
284c2bee-5161-48a9-ad09-ac68672f9621 | cpp | tensorflow/tensorflow | single_threaded_executor | tensorflow/core/common_runtime/single_threaded_executor.cc | tensorflow/core/common_runtime/single_threaded_executor_test.cc | #include "tensorflow/core/common_runtime/single_threaded_executor.h"
#include <utility>
#include "tensorflow/core/common_runtime/entry.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
Status ValidateOpIsSafeForSyncExecution(
const Node& n, bool allow_control_flow_sync_execution) {
for (DataType dt : n.output_types()) {
if (IsRefType(dt)) {
return errors::Unimplemented(
"Single-threaded executor does not support reference-typed "
"edges. But saw type ",
DataTypeString(dt), " in outputs of node ", n.name());
}
}
if (n.IsSwitch()) {
return errors::FailedPrecondition(
"Single-threaded executor does not support switch op, but saw node ",
n.name(),
". Perhaps your graph contains old-style control flow primitives? "
"Try using tf.compat.v1.enable_control_flow_v2().");
}
if (n.IsControlFlow() && !allow_control_flow_sync_execution) {
return errors::FailedPrecondition(
"Single-threaded executor does not support low level control flow, "
" but saw control flow node ",
n.name(),
". Perhaps your graph contains old-style control flow primitives? "
"Try using tf.compat.v1.enable_control_flow_v2().");
}
return absl::OkStatus();
}
namespace {
typedef absl::InlinedVector<TensorValue, 4UL> TensorValueVec;
typedef absl::InlinedVector<AllocatorAttributes, 4UL> AllocatorAttributeVec;
static const string& kSingleThreadedExecutor =
*new string("SINGLE_THREADED_EXECUTOR");
class SingleThreadedExecutorImpl : public Executor {
public:
explicit SingleThreadedExecutorImpl(const LocalExecutorParams& params)
: params_(params) {}
~SingleThreadedExecutorImpl() override {
for (const KernelState& kernel_state : kernels_) {
params_.delete_kernel(kernel_state.kernel);
}
for (const ConstTensorKernelState& kernel_state : const_tensor_kernels_) {
params_.delete_kernel(kernel_state.kernel);
}
}
Status Initialize(const Graph& graph) {
std::vector<Node*> ordered_nodes;
ordered_nodes.reserve(graph.num_nodes());
GetReversePostOrder(graph, &ordered_nodes);
int ordered_nodes_size = ordered_nodes.size();
if (ordered_nodes_size != graph.num_nodes()) {
return errors::InvalidArgument("Graph had ", graph.num_nodes(),
" but reverse post-order had ",
ordered_nodes.size());
}
kernels_.reserve(ordered_nodes.size() - 2);
std::vector<Node*> nodes_with_kernels;
std::vector<Node*> nodes_with_const_tensor_kernels;
nodes_with_kernels.reserve(ordered_nodes.size() - 2);
std::map<size_t, Node*> arg_index_to_node_map;
absl::flat_hash_map<Node*, size_t> node_to_index_map;
for (Node* n : ordered_nodes) {
if (n->IsSource() || n->IsSink()) {
continue;
}
TF_RETURN_IF_ERROR(ValidateOpIsSafeForSyncExecution(
*n, params_.allow_control_flow_sync_execution));
if (n->IsArg()) {
int32_t arg_index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &arg_index));
if (arg_index < 0) {
return errors::InvalidArgument("Invalid argument index ", arg_index,
" in node ", n->name());
}
arg_index_to_node_map[arg_index] = n;
continue;
}
OpKernel* kernel;
TF_RETURN_IF_ERROR(params_.create_kernel(n->properties(), &kernel));
const Tensor* const_tensor;
if (n->num_outputs() == 1 && (const_tensor = kernel->const_tensor())) {
const size_t kernel_index = const_tensor_kernels_.size();
const_tensor_kernels_.push_back({});
nodes_with_const_tensor_kernels.push_back(n);
ConstTensorKernelState& kernel_state =
const_tensor_kernels_[kernel_index];
kernel_state.kernel = kernel;
kernel_state.const_tensor = *const_tensor;
} else {
const size_t kernel_index = kernels_.size();
kernels_.push_back({});
nodes_with_kernels.push_back(n);
KernelState& kernel_state = kernels_[kernel_index];
kernel_state.kernel = kernel;
kernel_state.num_inputs = n->num_inputs();
kernel_state.num_outputs = n->num_outputs();
node_to_index_map[n] = kernel_index;
if (kernel_index == 0) {
kernel_state.input_start_index = 0;
} else {
const KernelState& previous_kernel_state = kernels_[kernel_index - 1];
kernel_state.input_start_index =
previous_kernel_state.input_start_index +
previous_kernel_state.num_inputs;
}
}
}
if (!arg_index_to_node_map.empty()) {
const size_t num_args = arg_index_to_node_map.rbegin()->first + 1;
arg_output_locations_.resize(num_args);
for (const auto& arg_index_node_pair : arg_index_to_node_map) {
const size_t arg_index = arg_index_node_pair.first;
const Node* arg_node = arg_index_node_pair.second;
arg_output_locations_[arg_index].reserve(arg_node->out_edges().size());
for (const Edge* e : arg_node->out_edges()) {
if (e->src_output() == Graph::kControlSlot) {
continue;
} else if (e->src_output() != 0) {
return errors::Internal("Invalid output index ", e->src_output(),
" from argument node ", arg_index);
}
arg_output_locations_[arg_index].push_back(
kernels_[node_to_index_map[e->dst()]].input_start_index +
e->dst_input());
}
}
}
for (size_t i = 0; i < const_tensor_kernels_.size(); ++i) {
Node* n = nodes_with_const_tensor_kernels[i];
ConstTensorKernelState& kernel_state = const_tensor_kernels_[i];
for (const Edge* e : n->out_edges()) {
if (e->src_output() == Graph::kControlSlot) {
continue;
} else if (e->src_output() != 0) {
return errors::Internal("Invalid output index ", e->src_output(),
" from node ", n->DebugString());
}
kernel_state.output_locations.push_back(
kernels_[node_to_index_map[e->dst()]].input_start_index +
e->dst_input());
}
bool on_host =
kernel_state.kernel->output_memory_types()[0] == HOST_MEMORY;
kernel_state.output_alloc_attr.set_on_host(on_host);
}
for (size_t i = 0; i < kernels_.size(); ++i) {
Node* n = nodes_with_kernels[i];
KernelState& kernel_state = kernels_[i];
kernel_state.output_locations.resize(kernel_state.num_outputs);
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge()) {
kernel_state.output_locations[e->src_output()].push_back(
kernels_[node_to_index_map[e->dst()]].input_start_index +
e->dst_input());
}
}
kernel_state.output_alloc_attrs.resize(kernel_state.num_outputs);
AllocatorAttributes* attrs = kernel_state.output_alloc_attrs.data();
OpKernel* op_kernel = kernel_state.kernel;
for (int out = 0; out < n->num_outputs(); out++) {
DCHECK_LT(out, op_kernel->output_memory_types().size());
bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY;
if (on_host) {
AllocatorAttributes h;
h.set_on_host(on_host);
attrs[out].Merge(h);
}
}
}
if (!kernels_.empty()) {
const KernelState& last_kernel_state = kernels_.back();
total_num_inputs_ =
last_kernel_state.input_start_index + last_kernel_state.num_inputs;
input_alloc_attrs_.resize(total_num_inputs_);
for (size_t i = 0; i < kernels_.size(); ++i) {
for (size_t j = 0; j < kernels_[i].output_locations.size(); ++j) {
for (size_t output_location : kernels_[i].output_locations[j]) {
input_alloc_attrs_[output_location] =
kernels_[i].output_alloc_attrs[j];
}
}
}
} else {
total_num_inputs_ = 0;
}
return absl::OkStatus();
}
Status Run(const Args& args) override {
std::vector<Entry> inputs(total_num_inputs_);
TensorValueVec node_inputs;
AllocatorAttributeVec input_alloc_attrs;
Device* device = params_.device;
std::unique_ptr<Device> user_device;
if (args.user_intra_op_threadpool != nullptr) {
user_device = RenamedDevice::NewRenamedDevice(
device->name(), device, false,
false, args.user_intra_op_threadpool);
device = user_device.get();
}
OpKernelContext::Params params;
params.step_id = args.step_id;
params.device = device;
params.log_memory = false;
params.rendezvous = args.rendezvous;
params.session_state = args.session_state;
params.session_metadata = params_.session_metadata;
params.tensor_store = args.tensor_store;
params.cancellation_manager = args.cancellation_manager;
params.session_config = args.session_config;
params.call_frame = args.call_frame;
params.function_library = params_.function_library;
params.resource_manager = device->resource_manager();
params.step_container = args.step_container;
params.collective_executor = args.collective_executor;
params.stack_trace = args.stack_trace;
params.slice_reader_cache = nullptr;
Args::Runner runner_copy = args.runner;
params.runner = &runner_copy;
params.run_all_kernels_inline = args.run_all_kernels_inline;
params.stats_collector = args.stats_collector;
params.executor_type = &kSingleThreadedExecutor;
params.frame_iter = FrameAndIter(0, 0);
params.is_input_dead = false;
device->TryGetDeviceContext(¶ms.op_device_context).IgnoreError();
auto context_cleanup = gtl::MakeCleanup([¶ms] {
if (params.op_device_context != nullptr) {
params.op_device_context->Unref();
}
});
params.forward_from_array = nullptr;
const size_t received_args =
args.call_frame ? args.call_frame->num_args() : 0;
if (TF_PREDICT_FALSE(arg_output_locations_.size() > received_args)) {
return errors::InvalidArgument("Expected ", arg_output_locations_.size(),
" arguments, but only received ",
received_args, ".");
}
for (size_t i = 0; i < arg_output_locations_.size(); ++i) {
const size_t num_destinations = arg_output_locations_[i].size();
if (num_destinations > 0) {
if (args.call_frame->CanConsumeArg(i)) {
Entry& first_input = inputs[arg_output_locations_[i][0]];
first_input.state = Entry::State::HAS_VALUE;
first_input.val.Init();
args.call_frame->ConsumeArg(i, first_input.val.get());
for (size_t j = 1; j < num_destinations; ++j) {
Entry& input = inputs[arg_output_locations_[i][j]];
input.state = Entry::State::HAS_VALUE;
input.val.Init(*first_input.val);
}
} else {
const Tensor* arg;
TF_RETURN_IF_ERROR(args.call_frame->GetArg(i, &arg));
for (size_t j = 0; j < num_destinations; ++j) {
Entry& input = inputs[arg_output_locations_[i][j]];
input.state = Entry::State::HAS_VALUE;
input.val.Init(*arg);
}
}
}
}
for (const ConstTensorKernelState& kernel_state : const_tensor_kernels_) {
for (size_t i = 0; i < kernel_state.output_locations.size(); ++i) {
Entry& input = inputs[kernel_state.output_locations[i]];
input.state = Entry::State::HAS_CONST_TENSOR;
input.const_tensor = &kernel_state.const_tensor;
}
}
for (size_t i = 0; i < kernels_.size(); ++i) {
const KernelState& kernel_state = kernels_[i];
const size_t input_start_index = kernel_state.input_start_index;
const size_t num_inputs = kernel_state.num_inputs;
const size_t num_outputs = kernel_state.num_outputs;
node_inputs.clear();
node_inputs.resize(num_inputs);
input_alloc_attrs.clear();
input_alloc_attrs.resize(num_inputs);
for (size_t j = 0; j < num_inputs; ++j) {
Entry& input = inputs[input_start_index + j];
switch (input.state) {
case Entry::State::HAS_CONST_TENSOR:
node_inputs[j].tensor = const_cast<Tensor*>(input.const_tensor);
break;
case Entry::State::HAS_VALUE:
node_inputs[j].tensor = input.val.get();
break;
default:
DCHECK(false) << "Input did not have a valid value.";
}
input_alloc_attrs[j] = input_alloc_attrs_[input_start_index + j];
}
params.inputs = node_inputs;
params.input_alloc_attrs = input_alloc_attrs;
params.op_kernel = kernel_state.kernel;
params.output_attr_array = kernel_state.output_alloc_attrs.data();
OpKernelContext ctx(¶ms, num_outputs);
device->Compute(kernel_state.kernel, &ctx);
TF_RETURN_IF_ERROR(ctx.status());
for (size_t j = 0; j < num_inputs; ++j) {
inputs[input_start_index + j].ClearVal();
}
for (size_t j = 0; j < num_outputs; ++j) {
TensorValue val = ctx.release_output(j);
const size_t num_destinations = kernel_state.output_locations[j].size();
if (num_destinations > 0) {
for (size_t k = 0; k < num_destinations - 1; ++k) {
Entry& input = inputs[kernel_state.output_locations[j][k]];
input.state = Entry::State::HAS_VALUE;
if (val.tensor != nullptr) {
input.val.Init(*val.tensor);
} else {
input.val.Init(Tensor(kernel_state.kernel->output_type(j)));
}
}
Entry& input =
inputs[kernel_state.output_locations[j][num_destinations - 1]];
input.state = Entry::State::HAS_VALUE;
if (val.tensor != nullptr) {
input.val.Init(std::move(*val.tensor));
} else {
input.val.Init(Tensor(kernel_state.kernel->output_type(j)));
}
}
delete val.tensor;
}
}
return absl::OkStatus();
}
private:
void RunAsyncInternal(const Args& args, DoneCallback done) override {
args.runner([this, args, done]() { done(Run(args)); });
}
const LocalExecutorParams params_;
size_t total_num_inputs_;
struct KernelState {
OpKernel* kernel;
size_t input_start_index;
size_t num_inputs;
size_t num_outputs;
std::vector<std::vector<size_t>>
output_locations;
std::vector<AllocatorAttributes>
output_alloc_attrs;
};
std::vector<KernelState> kernels_;
std::vector<std::vector<size_t>>
arg_output_locations_;
struct ConstTensorKernelState {
OpKernel* kernel;
Tensor const_tensor;
std::vector<size_t> output_locations;
AllocatorAttributes output_alloc_attr;
};
std::vector<ConstTensorKernelState> const_tensor_kernels_;
std::vector<AllocatorAttributes>
input_alloc_attrs_;
};
class SingleThreadedExecutorRegistrar {
public:
SingleThreadedExecutorRegistrar() {
ExecutorFactory::Register(kSingleThreadedExecutor, new Factory());
}
private:
class Factory : public ExecutorFactory {
Status NewExecutor(const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor) override {
Executor* ret;
TF_RETURN_IF_ERROR(NewSingleThreadedExecutor(params, graph, &ret));
out_executor->reset(ret);
return absl::OkStatus();
}
};
};
static SingleThreadedExecutorRegistrar registrar;
}
Status NewSingleThreadedExecutor(const LocalExecutorParams& params,
const Graph& graph, Executor** executor) {
auto impl = std::make_unique<SingleThreadedExecutorImpl>(params);
TF_RETURN_IF_ERROR(impl->Initialize(graph));
*executor = impl.release();
return absl::OkStatus();
}
} | #include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace data {
namespace {
class MockOp : public OpKernel {
public:
using OpKernel::OpKernel;
void SetCompute(std::function<void(OpKernelContext*)> compute) {
compute_ = std::move(compute);
}
void Compute(OpKernelContext* ctx) override {
OP_REQUIRES(ctx, compute_ != nullptr,
errors::FailedPrecondition("Compute() is not set"));
compute_(ctx);
}
private:
std::function<void(OpKernelContext* ctx)> compute_;
};
REGISTER_OP("Mock")
.Input("x: float")
.Output("y: float")
.Output("empty_output: string")
.SetIsStateful();
REGISTER_KERNEL_BUILDER(Name("Mock").Device(DEVICE_CPU), MockOp);
class ExecutorTest : public ::testing::Test {
protected:
ExecutorTest()
: device_(DeviceFactory::NewDevice("CPU", {},
"/job:localhost/replica:0/task:0")) {}
~ExecutorTest() override {
CHECK(rendez_->Unref());
}
void Create(std::unique_ptr<const Graph> graph,
std::function<void(OpKernelContext*)> mock_fn = nullptr) {
const int version = graph->versions().producer();
LocalExecutorParams params;
params.device = device_.get();
params.create_kernel =
[this, mock_fn = std::move(mock_fn), version](
const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
TF_RETURN_IF_ERROR(CreateNonCachedKernel(device_.get(), nullptr,
props, version, kernel));
if ((*kernel)->type_string_view() == "Mock") {
down_cast<MockOp*>(*kernel)->SetCompute(mock_fn);
}
return absl::OkStatus();
};
params.delete_kernel = [](OpKernel* kernel) {
DeleteNonCachedKernel(kernel);
};
TF_CHECK_OK(
NewExecutor("SINGLE_THREADED_EXECUTOR", params, *graph, &exec_));
runner_ = [](const std::function<void()>& fn) { fn(); };
rendez_ = NewLocalRendezvous();
}
Status Run(Rendezvous* rendez) {
Executor::Args args;
args.rendezvous = rendez;
args.runner = runner_;
return exec_->Run(args);
}
Status Run(CallFrameInterface* call_frame) {
Executor::Args args;
args.call_frame = call_frame;
args.runner = runner_;
return exec_->Run(args);
}
void TestContext(Executor::Args args,
std::function<void(OpKernelContext*)> test_fn) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
Node* arg = test::graph::Arg(g.get(), 0, DT_FLOAT);
Node* tmp;
TF_ASSERT_OK(NodeBuilder(g->NewName("n"), "Mock")
.Input(arg)
.Finalize(g.get(), &tmp));
auto ret = test::graph::Retval(g.get(), 0, tmp);
g->AddControlEdge(arg, ret);
FixupSourceAndSinkEdges(g.get());
bool mock_called = false;
Create(std::move(g), [&](OpKernelContext* ctx) {
mock_called = true;
ctx->set_output(0, ctx->input(0));
test_fn(ctx);
});
FunctionCallFrame call_frame({DT_FLOAT}, {DT_FLOAT});
TF_ASSERT_OK(call_frame.SetArgs({Tensor(DT_FLOAT, {0})}));
args.call_frame = &call_frame;
args.runner = runner_;
TF_ASSERT_OK(exec_->Run(args));
EXPECT_TRUE(mock_called);
}
std::unique_ptr<Device> device_;
std::unique_ptr<Executor> exec_ = nullptr;
Executor::Args::Runner runner_;
Rendezvous* rendez_ = nullptr;
};
Tensor V(const float val) {
Tensor tensor(DT_FLOAT, TensorShape({}));
tensor.scalar<float>()() = val;
return tensor;
}
float V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_FLOAT);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<float>()();
}
Rendezvous::ParsedKey Key(const string& sender, const uint64 incarnation,
const string& receiver, const string& name) {
Rendezvous::ParsedKey result;
TF_CHECK_OK(
Rendezvous::ParseKey(Rendezvous::CreateKey(sender, incarnation, receiver,
name, FrameAndIter(0, 0)),
&result));
return result;
}
TEST_F(ExecutorTest, UserIntraOpThreadPool) {
class DummyThreadPool : public thread::ThreadPoolInterface {
public:
void Schedule(std::function<void()> fn) override { fn(); }
int NumThreads() const override { return 1; }
int CurrentThreadId() const override { return -1; }
};
DummyThreadPool dummy_thread_pool;
Executor::Args args;
args.user_intra_op_threadpool = &dummy_thread_pool;
TestContext(args, [&](OpKernelContext* ctx) {
EXPECT_EQ(ctx->device()
->tensorflow_cpu_worker_threads()
->workers->AsEigenThreadPool(),
&dummy_thread_pool);
});
}
TEST_F(ExecutorTest, SimpleAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Arg(g.get(), 0, DT_FLOAT);
auto in1 = test::graph::Arg(g.get(), 1, DT_FLOAT);
auto tmp = test::graph::Add(g.get(), in0, in1);
auto ret = test::graph::Retval(g.get(), 0, tmp);
g->AddControlEdge(in1, ret);
FixupSourceAndSinkEdges(g.get());
Create(std::move(g));
FunctionCallFrame call_frame({DT_FLOAT, DT_FLOAT}, {DT_FLOAT});
TF_ASSERT_OK(call_frame.SetArgs({V(1.0), V(2.0)}));
TF_ASSERT_OK(Run(&call_frame));
std::vector<Tensor> retvals;
TF_ASSERT_OK(call_frame.ConsumeRetvals(&retvals, false));
EXPECT_EQ(3.0, V(retvals[0]));
const Tensor* arg_0;
TF_ASSERT_OK(call_frame.GetArg(0, &arg_0));
EXPECT_EQ(1.0, V(*arg_0));
const Tensor* arg_1;
TF_ASSERT_OK(call_frame.GetArg(1, &arg_1));
EXPECT_EQ(2.0, V(*arg_1));
}
TEST_F(ExecutorTest, EmptyOutput) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
Node* in = test::graph::Arg(g.get(), 0, DT_FLOAT);
Node* mock;
TF_ASSERT_OK(
NodeBuilder(g->NewName("n"), "Mock").Input(in).Finalize(g.get(), &mock));
test::graph::Retval(g.get(), 0, mock, 0);
test::graph::Retval(g.get(), 1, mock, 1);
FixupSourceAndSinkEdges(g.get());
Create(std::move(g),
[&](OpKernelContext* ctx) { ctx->set_output(0, ctx->input(0)); });
FunctionCallFrame call_frame({DT_FLOAT}, {DT_FLOAT, DT_STRING});
TF_ASSERT_OK(call_frame.SetArgs({V(1.0)}));
TF_ASSERT_OK(Run(&call_frame));
std::vector<Tensor> retvals;
TF_ASSERT_OK(call_frame.ConsumeRetvals(&retvals, false));
EXPECT_EQ(1.0, V(retvals[0]));
EXPECT_EQ(DT_STRING, retvals[1].dtype());
EXPECT_EQ(0, retvals[1].tensor_data().size());
}
TEST_F(ExecutorTest, SelfAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto v = test::graph::Arg(g.get(), 0, DT_FLOAT);
const int N = 10;
for (int i = 1; i <= N; ++i) {
v = test::graph::Add(g.get(), v, v);
}
test::graph::Retval(g.get(), 0, v);
FixupSourceAndSinkEdges(g.get());
Create(std::move(g));
FunctionCallFrame call_frame({DT_FLOAT}, {DT_FLOAT});
TF_ASSERT_OK(call_frame.SetArgs({V(1.0)}));
TF_ASSERT_OK(Run(&call_frame));
std::vector<Tensor> retvals;
TF_ASSERT_OK(call_frame.ConsumeRetvals(&retvals, false));
EXPECT_EQ(1024.0, V(retvals[0]));
}
void BuildTree(int N, Graph* g) {
CHECK_GT(N, 1);
auto in = test::graph::Arg(g, 0, DT_FLOAT);
std::vector<Node*> nodes;
int i = 0;
for (; i < N; ++i) {
nodes.push_back(test::graph::Identity(g, in, 0));
}
random::PhiloxRandom philox(0, 17);
random::SimplePhilox rnd(&philox);
while (nodes.size() > 1) {
int x = rnd.Uniform(nodes.size());
auto in0 = nodes[x];
nodes[x] = nodes.back();
nodes.resize(nodes.size() - 1);
x = rnd.Uniform(nodes.size());
auto in1 = nodes[x];
nodes[x] = test::graph::Add(g, in0, in1);
}
test::graph::Retval(g, 0, nodes.back());
FixupSourceAndSinkEdges(g);
}
TEST_F(ExecutorTest, RandomTree) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
BuildTree(4096, g.get());
Create(std::move(g));
FunctionCallFrame call_frame({DT_FLOAT}, {DT_FLOAT});
TF_ASSERT_OK(call_frame.SetArgs({V(1.0)}));
TF_ASSERT_OK(Run(&call_frame));
std::vector<Tensor> retvals;
TF_ASSERT_OK(call_frame.ConsumeRetvals(&retvals, false));
EXPECT_EQ(4096.0, V(retvals[0]));
}
TEST_F(ExecutorTest, OpError) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto zero = test::graph::Constant(g.get(), V(0.0));
auto inf = test::graph::Unary(g.get(), "Reciprocal", zero);
auto check = test::graph::CheckNumerics(g.get(), inf, "message");
auto two = test::graph::Constant(g.get(), V(2.0));
test::graph::Binary(g.get(), "Mul", check, two);
FixupSourceAndSinkEdges(g.get());
Create(std::move(g));
FunctionCallFrame call_frame({}, {});
EXPECT_TRUE(absl::IsInvalidArgument(Run(&call_frame)));
}
TEST_F(ExecutorTest, ControlDependenciesFromSpecialNodes) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Arg(g.get(), 0, DT_FLOAT);
auto one = test::graph::Constant(g.get(), V(2.0));
auto add = test::graph::Add(g.get(), in0, one);
auto ret = test::graph::Retval(g.get(), 0, add);
g->AddControlEdge(in0, add);
g->AddControlEdge(one, ret);
FixupSourceAndSinkEdges(g.get());
Create(std::move(g));
FunctionCallFrame call_frame({DT_FLOAT}, {DT_FLOAT});
TF_ASSERT_OK(call_frame.SetArgs({V(1.0)}));
TF_ASSERT_OK(Run(&call_frame));
std::vector<Tensor> retvals;
TF_ASSERT_OK(call_frame.ConsumeRetvals(&retvals, false));
EXPECT_EQ(3.0, V(retvals[0]));
}
void BM_executor(::testing::benchmark::State& state) {
const int width = state.range(0);
const int depth = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
random::PhiloxRandom philox(1729, 17);
random::SimplePhilox rand(&philox);
uint64 cur = 0;
uint32 r = 1 + rand.Rand32() % width;
std::vector<Node*> ready_nodes;
for (int i = 0; i < r; ++i) {
ready_nodes.push_back(test::graph::NoOp(g, {}));
++cur;
}
std::random_device random_device;
std::mt19937 rng(random_device());
for (int i = 0; i < depth; ++i) {
std::shuffle(ready_nodes.begin(), ready_nodes.end(), rng);
r = 1 + rand.Rand32() % (ready_nodes.size());
std::vector<Node*> control_inputs;
for (int j = 0; j < r; ++j) {
control_inputs.push_back(ready_nodes.back());
ready_nodes.pop_back();
}
Node* n = test::graph::NoOp(g, control_inputs);
++cur;
r = 1 + rand.Rand32() % width;
for (int j = 0; j < r; ++j) {
ready_nodes.push_back(test::graph::NoOp(g, {n}));
++cur;
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR", false)
.Run(state);
state.SetLabel(strings::StrCat("Nodes = ", cur));
state.SetItemsProcessed(cur * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(16, 1024);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(32, 8192);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 16);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(8192, 32);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 1024);
void BM_const_identity(::testing::benchmark::State& state) {
const int width = state.range(0);
const int outputs_per_const = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
for (int i = 0; i < width; ++i) {
Tensor i_t(i);
Node* const_node = test::graph::Constant(g, i_t);
for (int j = 0; j < outputs_per_const; ++j) {
test::graph::Identity(g, const_node);
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR",
false)
.Run(state);
state.SetLabel(strings::StrCat("Nodes = ", (1 + outputs_per_const) * width));
state.SetItemsProcessed((1 + outputs_per_const) * width *
static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_const_identity)->UseRealTime()->ArgPair(1, 1);
BENCHMARK(BM_const_identity)->UseRealTime()->ArgPair(1, 100);
BENCHMARK(BM_const_identity)->UseRealTime()->ArgPair(100, 1);
BENCHMARK(BM_const_identity)->UseRealTime()->ArgPair(100, 100);
#if 0
#define ALICE "/job:j/replica:0/task:0/cpu:0"
#define BOB "/job:j/replica:0/task:0/gpu:0"
static void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
Node* x = test::graph::Recv(g, "x", "float", ALICE, 1, BOB);
Node* y = test::graph::Recv(g, "y", "float", ALICE, 1, BOB);
Node* sum = test::graph::Add(g, x, y);
Node* z = test::graph::Send(g, sum, "z", BOB, 1, ALICE);
FixupSourceAndSinkEdges(g);
Tensor val(DT_FLOAT, TensorShape({}));
val.scalar<float>()() = 3.14;
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR", false)
.RunWithArgs({{x, val}, {y, val}}, {z}, state);
state.SetItemsProcessed(state.iterations());
}
BENCHMARK(BM_FeedInputFetchOutput);
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/single_threaded_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/single_threaded_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c8a63f1d-c176-4b51-8f7a-1bb28e6cba6d | cpp | tensorflow/tensorflow | permuter | tensorflow/core/common_runtime/permuter.cc | tensorflow/core/common_runtime/permuter_test.cc | #include "tensorflow/core/common_runtime/permuter.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Permuter::Permuter()
: col_ctx_(nullptr), col_params_(nullptr), done_(nullptr), counter_(0) {}
StatusCallback Permuter::CheckCounterAndCallDone() {
return [this](const Status& s) {
mu_.lock();
status_.Update(s);
int counter = ++counter_;
Status status = status_;
mu_.unlock();
if (counter == 2) done_(status);
};
}
Status Permuter::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
DCHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void Permuter::Run(StatusCallback done) {
if (col_params_->instance.permutation.size() !=
col_params_->instance.devices.size()) {
done(errors::Internal("Permutation must be the same size as devices"));
}
done_ = std::move(done);
DispatchSend(col_params_->default_rank,
col_params_->instance.permutation[col_params_->default_rank],
col_ctx_->input, CheckCounterAndCallDone());
for (int i = 0; i < col_params_->instance.permutation.size(); ++i) {
if (col_params_->default_rank == col_params_->instance.permutation[i]) {
DispatchRecv(i, col_params_->instance.permutation[i], col_ctx_->output,
CheckCounterAndCallDone());
}
}
}
void Permuter::DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done) {
string send_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchSend " << send_buf_key << " from_device "
<< col_ctx_->device_name << " to_device "
<< col_params_->instance.devices[target_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->instance.devices[target_rank],
col_params_->group.members[target_rank].task, send_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
col_ctx_->op_ctx->cancellation_manager(), done);
}
void Permuter::DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done) {
string recv_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
VLOG(1) << "DispatchRecv " << recv_buf_key << " to_device "
<< col_ctx_->device_name << " from_device "
<< col_params_->instance.devices[src_rank]
<< " target_rank=" << target_rank << " src_rank=" << src_rank;
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->instance.devices[src_rank],
col_params_->group.members[src_rank].task,
col_params_->group.members[src_rank].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
0, col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(Permute, Permuter);
}
} | #include "tensorflow/core/common_runtime/permuter.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class PermuterTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices,
const std::vector<int>& permutation, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, permutation, dtype, shape, test_env_.get()));
}
}
}
typedef std::function<void(Tensor*)> InitFunc;
void Permute(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoPermute();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < instances_.size()) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int tensor_len, int fail_after) {
std::vector<int> permutation(num_workers * num_devices);
std::iota(permutation.begin(), permutation.end(), 0);
for (int i = 0; i < permutation.size(); i += 2) {
if (permutation.size() == i + 1) {
std::swap(permutation[i], permutation[0]);
continue;
}
std::next_permutation(permutation.begin() + i,
permutation.begin() + i + 2);
}
Init(num_workers, num_devices, permutation, dtype,
TensorShape({tensor_len}), device_type, fail_after);
gtl::InlinedVector<T, 4> expected(tensor_len * num_devices * num_workers,
0.0);
for (int di = 0; di < instances_.size(); ++di) {
instances_[di]->InitTensor(
[&permutation, &expected, di, tensor_len](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
t->flat<T>()(i) = value;
expected[permutation[di] * tensor_len + i] = value;
}
});
}
Permute(fail_after);
for (int di = 0; di < instances_.size(); ++di) {
if (!instances_[di]->status_.ok()) {
ASSERT_GT(fail_after, 0);
ASSERT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
continue;
}
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(
test::AsTensor<T>(
absl::MakeSpan(expected).subspan(di * tensor_len, tensor_len)),
instances_[di]->output_tensor_);
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, std::vector<int> permutation, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env),
input_tensor_(dtype, shape),
output_tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "Permute",
PERMUTE_COLLECTIVE, dtype, shape);
col_params_->instance.permutation = std::move(permutation);
for (const CollGroupMember& member : col_params_->group.members) {
col_params_->instance.devices.push_back(member.device.name());
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
}
void InitTensor(const InitFunc& f) { f(&input_tensor_); }
void DoPermute() {
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
};
#define DEF_TEST(B, T, W, D, L, A) \
TEST_F(PermuterTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_BOOL: { \
RunTest<bool>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 3, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 2, 3, 0)
DEF_TEST(FLOAT, CPU, 2, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 4095, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 1045991, 0)
DEF_TEST(BOOL, CPU, 1, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1, 0)
DEF_TEST(BOOL, CPU, 2, 4, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 4, 128, 0)
DEF_TEST(INT32, CPU, 2, 4, 128, 0)
DEF_TEST(INT64, CPU, 2, 4, 128, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 1)
DEF_TEST(FLOAT, CPU, 2, 4, 128, 5)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 7, 3, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 33, 0)
DEF_TEST(FLOAT, GPU, 1, 3, 64, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1045991, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1, 0)
DEF_TEST(BOOL, GPU, 1, 4, 1001, 0)
DEF_TEST(DOUBLE, GPU, 1, 8, 1001, 0)
DEF_TEST(INT64, GPU, 1, 8, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 128, 6)
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/permuter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/permuter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
344e36fc-a0af-4770-9bfd-4f12ae7e6008 | cpp | tensorflow/tensorflow | optimized_function_graph_info | tensorflow/core/common_runtime/optimized_function_graph_info.cc | tensorflow/core/common_runtime/optimized_function_graph_info_test.cc | #include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include <memory>
#include <utility>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/op.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
OptimizedFunctionGraph OptimizedFunctionGraphInfo::ToProto(
const OptimizedFunctionGraphInfo& info) {
OptimizedFunctionGraph proto;
proto.set_name(info.name);
GraphDef* function_graph_def = proto.mutable_function_graph();
info.function_graph->ToGraphDef(function_graph_def);
*function_graph_def->mutable_library() = info.lib_def.ToProto();
*proto.mutable_ret_types() = {info.ret_types.begin(), info.ret_types.end()};
proto.set_num_return_nodes(info.num_return_nodes);
*proto.mutable_node_name_to_control_ret() = {
info.node_name_to_control_ret.begin(),
info.node_name_to_control_ret.end()};
proto.set_optimization_time_usecs(info.optimization_duration_usecs);
proto.set_source(info.optimization_source);
return proto;
}
absl::StatusOr<OptimizedFunctionGraphInfo>
OptimizedFunctionGraphInfo::FromProto(OptimizedFunctionGraph&& proto) {
FunctionLibraryDefinition lib_def(OpRegistry::Global());
FunctionDefLibrary proto_library;
std::swap(proto_library, *proto.mutable_function_graph()->mutable_library());
TF_RETURN_IF_ERROR(lib_def.AddLibrary(std::move(proto_library)));
auto graph = std::make_unique<Graph>(OpRegistry::Global());
graph->mutable_flib_def()->set_default_registry(&lib_def);
GraphConstructorOptions options;
options.allow_internal_ops = true;
options.expect_device_spec = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
options, std::move(*proto.mutable_function_graph()), graph.get()));
graph->mutable_flib_def()->set_default_registry(nullptr);
graph->mutable_flib_def()->Clear();
const int num_ret_types = proto.ret_types_size();
DataTypeVector data_type_vector(num_ret_types);
for (int i = 0; i < num_ret_types; ++i) {
data_type_vector[i] = static_cast<DataType>(proto.ret_types().at(i));
}
return OptimizedFunctionGraphInfo(
proto.name(), std::move(graph), std::move(lib_def),
{proto.node_name_to_control_ret().begin(),
proto.node_name_to_control_ret().end()},
std::move(data_type_vector), proto.num_return_nodes(),
proto.optimization_time_usecs(), proto.source());
}
} | #include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/substitute.h"
#include "third_party/protobuf/text_format.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::EqualsProto;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::proto::Partially;
using ::tsl::testing::StatusIs;
REGISTER_OP("OneOutput").Output("y: float");
REGISTER_OP("OneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float");
constexpr absl::string_view kFunctionName = "test_func";
constexpr absl::string_view kLibraryPb =
R"pb(library {
function {
signature {
name: "NonZero"
input_arg { name: "x" type_attr: "T" }
output_arg { name: "y" type_attr: "T" }
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_STRING
}
}
}
}
node_def {
name: "y"
op: "Identity"
input: "x"
attr {
key: "T"
value { placeholder: "T" }
}
}
ret { key: "y" value: "y:output:0" }
}
})pb";
absl::StatusOr<OptimizedFunctionGraphInfo>
CreateSimpleOptimizedFunctionGraphInfo() {
NodeDef node_def;
TF_RETURN_IF_ERROR(NodeDefBuilder("A", "OneOutput").Finalize(&node_def));
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Status status;
graph->AddNode(node_def, &status);
TF_RETURN_IF_ERROR(status);
FunctionLibraryDefinition lib_def(OpRegistry::Global(), FunctionDefLibrary());
TF_RETURN_IF_ERROR(lib_def.AddFunctionDef(test::function::NonZero()));
return OptimizedFunctionGraphInfo(
std::string(kFunctionName), std::move(graph), std::move(lib_def),
{{"A", "B"}}, {DT_FLOAT, DT_DOUBLE}, 1, 5, OptimizedFunctionGraph::JIT);
}
TEST(OptimizedFunctionGraphUtilsTest, ToProtoProducesCorrectResult) {
TF_ASSERT_OK_AND_ASSIGN(OptimizedFunctionGraphInfo test_info,
CreateSimpleOptimizedFunctionGraphInfo());
const OptimizedFunctionGraph test_result =
OptimizedFunctionGraphInfo::ToProto(test_info);
EXPECT_THAT(test_result,
Partially(EqualsProto(absl::Substitute(
R"pb(
name: "test_func"
function_graph { node { name: "A" op: "OneOutput" } $0 }
node_name_to_control_ret { key: "A" value: "B" }
ret_types: DT_FLOAT
ret_types: DT_DOUBLE
num_return_nodes: 1
source: JIT
optimization_time_usecs: 5
)pb",
kLibraryPb))));
}
TEST(OptimizedFunctionGraphUtilsTest,
FromProtoProducesReturnsErrorIfGraphInvalid) {
OptimizedFunctionGraph proto;
proto2::TextFormat::ParseFromString(
R"pb(
name: "test_func",
function_graph { node { name: 'B' op: 'OneOutput' } $0 }
)pb",
&proto);
EXPECT_THAT(OptimizedFunctionGraphInfo::FromProto(std::move(proto)),
StatusIs(tsl::error::INVALID_ARGUMENT,
"Node 'B' is missing a device specification"));
}
TEST(OptimizedFunctionGraphUtilsTest, FromProtoProducesCorrectResult) {
OptimizedFunctionGraph proto;
proto2::TextFormat::ParseFromString(
absl::Substitute(
R"pb(
name: "test_func",
function_graph {
node { name: 'B' op: 'OneInputTwoOutputs' device: ':CPU' } $0
}
node_name_to_control_ret { key: "B" value: "A" }
ret_types: DT_FLOAT
ret_types: DT_DOUBLE
ret_types: DT_BOOL
num_return_nodes: 2
optimization_time_usecs: 15
source: 1
)pb",
kLibraryPb),
&proto);
const absl::StatusOr<OptimizedFunctionGraphInfo> test_result =
OptimizedFunctionGraphInfo::FromProto(std::move(proto));
TF_EXPECT_OK(test_result.status());
GraphDef test_result_graph_def;
test_result->function_graph->ToGraphDef(&test_result_graph_def);
EXPECT_THAT(test_result_graph_def,
Partially(EqualsProto(R"pb(node {
name: 'B'
op: 'OneInputTwoOutputs'
device: ':CPU'
})pb")));
EXPECT_EQ(test_result->function_graph->flib_def().Find("NonZero"), nullptr);
EXPECT_NE(test_result->lib_def.Find("NonZero"), nullptr);
EXPECT_THAT(test_result->ret_types,
ElementsAre(DT_FLOAT, DT_DOUBLE, DT_BOOL));
EXPECT_THAT(test_result->node_name_to_control_ret,
UnorderedElementsAre(Pair("B", "A")));
EXPECT_EQ(test_result->num_return_nodes, 2);
EXPECT_EQ(test_result->optimization_duration_usecs, 15);
EXPECT_EQ(test_result->optimization_source, OptimizedFunctionGraph::AOT);
}
TEST(OptimizedFunctionGraphUtilsTest,
FromProtoProducesCorrectResultWithFunctionCall) {
OptimizedFunctionGraph proto;
proto2::TextFormat::ParseFromString(
absl::Substitute(
R"pb(
name: "test_func",
function_graph {
node {
name: 'B'
op: 'NonZero'
device: ':CPU'
attr {
key: "T"
value { type: DT_FLOAT }
}
} $0
}
node_name_to_control_ret { key: "B" value: "A" }
ret_types: DT_FLOAT
ret_types: DT_DOUBLE
ret_types: DT_BOOL
num_return_nodes: 2
optimization_time_usecs: 15
source: 1
)pb",
kLibraryPb),
&proto);
const absl::StatusOr<OptimizedFunctionGraphInfo> test_result =
OptimizedFunctionGraphInfo::FromProto(std::move(proto));
TF_EXPECT_OK(test_result.status());
GraphDef test_result_graph_def;
test_result->function_graph->ToGraphDef(&test_result_graph_def);
EXPECT_THAT(test_result_graph_def,
Partially(EqualsProto(R"pb(node {
name: 'B'
op: 'NonZero'
device: ':CPU'
attr {
key: "T"
value { type: DT_FLOAT }
}
})pb")));
EXPECT_EQ(test_result->function_graph->flib_def().Find("NonZero"), nullptr);
EXPECT_NE(test_result->lib_def.Find("NonZero"), nullptr);
EXPECT_THAT(test_result->ret_types,
ElementsAre(DT_FLOAT, DT_DOUBLE, DT_BOOL));
EXPECT_THAT(test_result->node_name_to_control_ret,
UnorderedElementsAre(Pair("B", "A")));
EXPECT_EQ(test_result->num_return_nodes, 2);
EXPECT_EQ(test_result->optimization_duration_usecs, 15);
EXPECT_EQ(test_result->optimization_source, OptimizedFunctionGraph::AOT);
}
TEST(OptimizedFunctionGraphUtilsTest, MoveTest) {
TF_ASSERT_OK_AND_ASSIGN(OptimizedFunctionGraphInfo test_info,
CreateSimpleOptimizedFunctionGraphInfo());
OptimizedFunctionGraphInfo moved_result = std::move(test_info);
GraphDef moved_result_graph_def;
moved_result.function_graph->ToGraphDef(&moved_result_graph_def);
EXPECT_EQ(moved_result.name, kFunctionName);
EXPECT_THAT(
moved_result_graph_def,
Partially(EqualsProto(R"pb(node { name: 'A' op: 'OneOutput' })pb")));
EXPECT_NE(moved_result.lib_def.Find("NonZero"), nullptr);
EXPECT_THAT(moved_result.ret_types, ElementsAre(DT_FLOAT, DT_DOUBLE));
EXPECT_THAT(moved_result.node_name_to_control_ret,
UnorderedElementsAre(Pair("A", "B")));
EXPECT_EQ(moved_result.num_return_nodes, 1);
EXPECT_EQ(moved_result.optimization_duration_usecs, 5);
EXPECT_EQ(moved_result.optimization_source, OptimizedFunctionGraph::JIT);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimized_function_graph_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimized_function_graph_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87fe2bee-23db-45e0-a461-fab9ad7c0991 | cpp | tensorflow/tensorflow | null_request_cost_accessor | tensorflow/core/common_runtime/null_request_cost_accessor.cc | tensorflow/core/common_runtime/null_request_cost_accessor_test.cc | #include "tensorflow/core/common_runtime/null_request_cost_accessor.h"
namespace tensorflow {
RequestCost* NullRequestCostAccessor::GetRequestCost() const { return nullptr; }
REGISTER_REQUEST_COST_ACCESSOR("null", NullRequestCostAccessor);
} | #include "tensorflow/core/common_runtime/null_request_cost_accessor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(NullRequestCostAccessorTest, Basic) {
NullRequestCostAccessor accessor;
EXPECT_EQ(accessor.GetRequestCost(), nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/null_request_cost_accessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/null_request_cost_accessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dde88d8c-a50e-4c90-9f5c-0a007cee16b9 | cpp | tensorflow/tensorflow | colocate_predecessor_trees_pass | tensorflow/core/common_runtime/colocate_predecessor_trees_pass.cc | tensorflow/core/common_runtime/colocate_predecessor_trees_pass_test.cc | #include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kClassAttr = "_class";
constexpr absl::string_view kFill = "Fill";
bool IsValidFillOp(const Node& node) {
if (node.type_string() != kFill) {
return false;
}
if (node.IsArg()) {
return false;
}
if (node.has_assigned_device_name()) {
return false;
}
if (!node.requested_device().empty()) {
return false;
}
if (HasNodeAttr(node.def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
bool IsValidIdentityNode(const Node& node) {
if (!node.IsIdentity()) {
return false;
}
if (node.requested_device().empty()) {
return false;
}
auto device_name = node.requested_device();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
if (parsed_device_name.type != DEVICE_CPU) {
return false;
}
if (node.IsArg()) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
std::optional<std::string> GetColocateStringName(const Node& fill_node) {
std::string device = "";
std::string colocation_prefix = "loc:@";
std::string colocation_name = "";
for (auto output_node : fill_node.out_nodes()) {
if (!IsValidIdentityNode(*output_node)) return std::nullopt;
if (device.empty()) {
device = output_node->requested_device();
colocation_name = absl::StrCat(colocation_prefix, output_node->name());
} else if (device != output_node->requested_device()) {
return std::nullopt;
}
}
if (colocation_name.empty()) return std::nullopt;
return colocation_name;
}
bool AreAllInNodesQualifiedConst(const Node& node) {
for (auto in_node : node.in_nodes()) {
if (!in_node->IsConstant()) {
return false;
}
if (in_node->IsArg()) {
return false;
}
if (in_node->has_assigned_device_name()) {
return false;
}
if (!in_node->requested_device().empty()) {
return false;
}
if (HasNodeAttr(in_node->def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), in_node->def())) {
return false;
}
}
return true;
}
bool ShouldRunPass(const GraphOptimizationPassOptions& options) {
if (!flags::Global().enable_tf2min_ici_weight.value()) {
VLOG(1) << "ColocatePredecessorTreesPass is disabled.";
return false;
}
VLOG(1) << "ColocatePredecessorTreesPass is enabled.";
if (options.graph == nullptr) {
LOG(INFO) << "No graph in colocate_predecessor_trees_pass.\n";
return false;
}
return true;
}
void LogGraphProperties(bool is_graph_changed, bool has_valid_fill_op,
bool has_colocation_name, bool has_qualified_const,
Graph* graph,
const GraphOptimizationPassOptions& options) {
if (is_graph_changed) {
VLOG(1) << "Graph is changed by ColocatePredecessorTreesPass.";
VLOG(1) << DumpGraphToFile("graph_changed_after_colocate_predecessor_trees",
*graph, options.flib_def);
} else {
VLOG(1) << "Graph is not changed by ColocatePredecessorTreesPass.";
VLOG(1) << "has_valid_fill_op: " << has_valid_fill_op;
VLOG(1) << "has_colocation_name: " << has_colocation_name;
VLOG(1) << "has_qualified_const: " << has_qualified_const;
VLOG(1) << DumpGraphToFile(
"graph_not_changed_after_colocate_predecessor_trees", *graph,
options.flib_def);
}
}
}
Status ColocatePredecessorTreesPass::Run(
const GraphOptimizationPassOptions& options) {
if (!ShouldRunPass(options)) {
return absl::OkStatus();
}
Graph* graph = options.graph->get();
VLOG(1) << DumpGraphToFile("graph_before_colocate_predecessor_trees", *graph,
options.flib_def);
bool is_graph_changed = false;
bool has_valid_fill_op = false;
bool has_colocation_name = false;
bool has_qualified_const = false;
for (Node* node : graph->nodes()) {
if (!IsValidFillOp(*node)) {
continue;
}
has_valid_fill_op = true;
auto colocation_name = GetColocateStringName(*node);
if (!colocation_name.has_value()) continue;
has_colocation_name = true;
if (!AreAllInNodesQualifiedConst(*node)) continue;
has_qualified_const = true;
is_graph_changed = true;
node->AddAttr(std::string(kClassAttr), {*colocation_name});
for (auto in_node : node->in_nodes()) {
in_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
for (auto out_node : node->out_nodes()) {
out_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
}
LogGraphProperties(is_graph_changed, has_valid_fill_op, has_colocation_name,
has_qualified_const, graph, options);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 50,
ColocatePredecessorTreesPass);
} | #include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:1";
const char kClassAttr[] = "_class";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
TEST(ColocatePredecessorTreesPassTest, ICIFlagFalse) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, SimpleExample) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, PropagateTwoTrees) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
Node* const_2 = ops::SourceOp("Const", builder.opts()
.WithName("const_2")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_3 = ops::SourceOp("Const", builder.opts()
.WithName("const_3")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill_1 = ops::BinaryOp("Fill", const_2, const_3,
builder.opts().WithName("fill_1"));
ops::UnaryOp("Identity", fill_1, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_2")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_3")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info_1 = "loc:@identity_1";
TF_EXPECT_OK(
GetNode(*graph, "const_2")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "const_3")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "fill_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
}
TEST(ColocatePredecessorTreesPassTest, RootHasMultipleOutputs) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_0"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_1"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_2"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_0")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_2")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, ConstHasDeviceAttr) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "const_0")->set_requested_device(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, ConstHasColocationInfo) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 =
ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0))
.WithAttr("_class", {"loc:@fill"}));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(const_0->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(const_1->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(fill->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(identity->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, InputArg) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* arg_0 = ops::SourceOp("_Arg", builder.opts()
.WithName("arg_0")
.WithAttr("T", DT_INT32)
.WithAttr("index", 0));
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", arg_0, const_0, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "arg_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/colocate_predecessor_trees_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/colocate_predecessor_trees_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f0971d1-663f-46fc-8015-b54e915c2489 | cpp | tensorflow/tensorflow | dynamic_device_mgr | tensorflow/core/common_runtime/dynamic_device_mgr.cc | tensorflow/core/common_runtime/dynamic_device_mgr_test.cc | #include <atomic>
#include <iterator>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
DynamicDeviceMgr::DynamicDeviceMgr() : cpu_device_(nullptr) {}
DynamicDeviceMgr::DynamicDeviceMgr(
std::vector<std::unique_ptr<Device>>&& devices)
: cpu_device_(nullptr) {
Status status = AddDevices(std::move(devices));
CHECK(status.ok());
mutex_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
Device* d = it.first;
if (d->device_type() == DEVICE_CPU && d->parsed_name().id == 0) {
cpu_device_ = d;
break;
}
}
}
DynamicDeviceMgr::DynamicDeviceMgr(std::unique_ptr<Device>&& device)
: DynamicDeviceMgr([&device] {
std::vector<std::unique_ptr<Device>> vector;
vector.push_back(std::move(device));
return vector;
}()) {}
DynamicDeviceMgr::~DynamicDeviceMgr() {
mutex_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
it.first->ClearResourceMgr();
}
}
void DynamicDeviceMgr::ListDeviceAttributes(
std::vector<DeviceAttributes>* devices) const {
tf_shared_lock l(devices_mu_);
devices->reserve(dynamic_devices_.size());
for (const auto& it : dynamic_devices_) {
devices->emplace_back(it.first->attributes());
}
}
std::vector<Device*> DynamicDeviceMgr::ListDevices() const {
tf_shared_lock l(devices_mu_);
std::vector<Device*> devices;
devices.reserve(dynamic_devices_.size());
for (const auto& it : dynamic_devices_) {
devices.emplace_back(it.first);
}
return devices;
}
string DynamicDeviceMgr::DebugString() const {
string out;
tf_shared_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
strings::StrAppend(&out, it.first->name(), "\n");
}
return out;
}
string DynamicDeviceMgr::DeviceMappingString() const {
string out;
tf_shared_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
auto d = it.first;
if (!d->attributes().physical_device_desc().empty()) {
strings::StrAppend(&out, d->name(), " -> ",
d->attributes().physical_device_desc(), "\n");
}
}
return out;
}
Status DynamicDeviceMgr::LookupDevice(StringPiece name, Device** device) const {
tf_shared_lock l(devices_mu_);
auto iter = device_map_.find(string(name));
if (iter == device_map_.end()) {
std::vector<StringPiece> device_names;
device_names.reserve(device_map_.size());
for (auto&& itr : device_map_) {
device_names.push_back(itr.first);
}
VLOG(1) << "Unknown device: " << name
<< " all devices: " << absl::StrJoin(device_names, ", ");
return errors::InvalidArgument(name, " unknown device.");
}
*device = iter->second;
return absl::OkStatus();
}
bool DynamicDeviceMgr::ContainsDevice(int64_t device_incarnation) const {
tf_shared_lock l(devices_mu_);
return device_incarnation_set_.contains(device_incarnation);
}
void DynamicDeviceMgr::ClearContainers(
absl::Span<const string> containers) const {
Status s;
tf_shared_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
auto d = it.first;
if (containers.empty()) {
s.Update(d->resource_manager()->Cleanup(
d->resource_manager()->default_container()));
} else {
for (const string& c : containers) {
s.Update(d->resource_manager()->Cleanup(c));
}
}
if (!s.ok()) {
LOG(WARNING) << s;
}
}
}
int DynamicDeviceMgr::NumDeviceType(const string& type) const {
tf_shared_lock l(devices_mu_);
auto iter = device_type_counts_.find(type);
if (iter != device_type_counts_.end()) return iter->second;
return 0;
}
int DynamicDeviceMgr::NumDevices() const {
tf_shared_lock l(devices_mu_);
return dynamic_devices_.size();
}
Status DynamicDeviceMgr::AddDevices(
std::vector<std::unique_ptr<Device>> devices) {
mutex_lock l(devices_mu_);
for (auto& d : devices) {
if (device_map_.find(d->name()) != device_map_.end()) {
return errors::InvalidArgument(
"Trying to add device ", d->name(),
" to manager but its name conflicts with an existing device.");
}
for (const string& name :
DeviceNameUtils::GetNamesForDeviceMappings(d->parsed_name())) {
device_map_[name] = d.get();
}
for (const string& name :
DeviceNameUtils::GetLocalNamesForDeviceMappings(d->parsed_name())) {
device_map_[name] = d.get();
}
device_type_counts_[d->device_type()]++;
device_incarnation_set_.insert(d->attributes().incarnation());
dynamic_devices_.emplace(d.get(), std::move(d));
}
return absl::OkStatus();
}
Status DynamicDeviceMgr::RemoveDevices(const std::vector<Device*>& devices) {
mutex_lock l(devices_mu_);
for (const auto& d : devices) {
if (d == cpu_device_) {
TF_RETURN_IF_ERROR(
errors::InvalidArgument("Can not remove HostCPU device ", d->name()));
}
const auto it = dynamic_devices_.find(d);
if (it == dynamic_devices_.end()) {
return errors::InvalidArgument("Unknown device ", d->name());
}
}
for (const auto& d : devices) {
for (const string& name :
DeviceNameUtils::GetNamesForDeviceMappings(d->parsed_name())) {
device_map_.erase(name);
}
for (const string& name :
DeviceNameUtils::GetLocalNamesForDeviceMappings(d->parsed_name())) {
device_map_.erase(name);
}
device_type_counts_[d->device_type()]--;
device_incarnation_set_.erase(d->attributes().incarnation());
auto it = dynamic_devices_.find(d);
if (it == dynamic_devices_.end()) {
return errors::InvalidArgument("Unknown device ", d->name());
}
CHECK(it != dynamic_devices_.end());
stale_devices_.add(std::move(it->second));
dynamic_devices_.erase(it);
}
return absl::OkStatus();
}
Status DynamicDeviceMgr::RemoveDevicesByName(
const std::vector<string>& device_names) {
std::vector<Device*> devices_to_remove;
for (const string& name : device_names) {
Device* device;
TF_RETURN_IF_ERROR(LookupDevice(name, &device));
devices_to_remove.emplace_back(device);
}
return RemoveDevices(devices_to_remove);
}
Device* DynamicDeviceMgr::HostCPU() const {
Device* device = cpu_device_.load(std::memory_order_relaxed);
if (device != nullptr) return device;
mutex_lock l(devices_mu_);
for (const auto& it : dynamic_devices_) {
Device* d = it.first;
if (d->device_type() == DEVICE_CPU && d->parsed_name().id == 0) {
cpu_device_ = d;
break;
}
}
return cpu_device_.load(std::memory_order_relaxed);
}
} | #include <memory>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static Device* CreateDevice(const char* type, const char* name,
Notification* n = nullptr) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
class FakeDeviceWithDestructorNotification : public FakeDevice {
public:
FakeDeviceWithDestructorNotification(const DeviceAttributes& attr,
Notification* n)
: FakeDevice(attr), n_(n) {}
~FakeDeviceWithDestructorNotification() override { n_->Notify(); }
private:
Notification* n_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
do {
attr.set_incarnation(random::New64());
} while (attr.incarnation() == 0);
if (n) {
return new FakeDeviceWithDestructorNotification(attr, n);
}
return new FakeDevice(attr);
}
TEST(DynamicDeviceMgrTest, AddDeviceToMgr) {
std::unique_ptr<Device> d0(CreateDevice("CPU", "/device:CPU:1"));
std::unique_ptr<Device> d1(CreateDevice("CPU", "/device:CPU:0"));
auto dm = std::make_unique<DynamicDeviceMgr>();
EXPECT_EQ(dm->ListDevices().size(), 0);
std::vector<std::unique_ptr<Device>> added_devices;
added_devices.emplace_back(std::move(d0));
added_devices.emplace_back(std::move(d1));
TF_CHECK_OK(dm->AddDevices(std::move(added_devices)));
EXPECT_EQ(dm->ListDevices().size(), 2);
EXPECT_EQ(dm->ListDevices()[0]->name(), "/device:CPU:0");
EXPECT_EQ(dm->ListDevices()[1]->name(), "/device:CPU:1");
}
TEST(DynamicDeviceMgrTest, RemoveDeviceFromMgr) {
std::unique_ptr<Device> d0(CreateDevice("CPU", "/device:CPU:0"));
std::unique_ptr<Device> d1(CreateDevice("CPU", "/device:CPU:1"));
Device* d1_ptr = d1.get();
const int64_t d1_incarnation = d1->attributes().incarnation();
auto dm = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
devices.emplace_back(std::move(d1));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 2);
std::vector<Device*> removed_devices{d1_ptr};
TF_CHECK_OK(dm->RemoveDevices(removed_devices));
EXPECT_EQ(dm->ListDevices().size(), 1);
EXPECT_FALSE(dm->ContainsDevice(d1_incarnation));
EXPECT_EQ(d1_ptr->name(), "/device:CPU:1");
EXPECT_EQ(d1_ptr->device_type(), "CPU");
}
TEST(DynamicDeviceMgrTest, RemoveDeviceFromMgrBuffer) {
Notification n;
std::unique_ptr<Device> d0(CreateDevice("CPU", "/device:CPU:0", &n));
Device* d0_ptr = d0.get();
std::vector<std::unique_ptr<Device>> added_devices;
added_devices.emplace_back(std::move(d0));
auto dm = std::make_unique<DynamicDeviceMgr>();
TF_CHECK_OK(dm->AddDevices(std::move(added_devices)));
std::vector<Device*> removed_devices{d0_ptr};
TF_CHECK_OK(dm->RemoveDevices(removed_devices));
for (int i = 0; i < kStaleDeviceBufferSize; i++) {
added_devices.clear();
removed_devices.clear();
std::unique_ptr<Device> d(CreateDevice("CPU", "/device:CPU:0"));
Device* d_ptr = d.get();
added_devices.emplace_back(std::move(d));
TF_CHECK_OK(dm->AddDevices(std::move(added_devices)));
removed_devices.emplace_back(d_ptr);
TF_CHECK_OK(dm->RemoveDevices(removed_devices));
}
n.WaitForNotification();
}
TEST(DynamicDeviceMgrTest, RemoveDeviceByNameFromMgr) {
std::unique_ptr<Device> d0(CreateDevice("CPU", "/device:CPU:0"));
std::unique_ptr<Device> d1(CreateDevice("CPU", "/device:CPU:1"));
string d1_name = "/device:CPU:1";
auto dm = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
devices.emplace_back(std::move(d1));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 2);
std::vector<string> removed_devices{d1_name};
TF_CHECK_OK(dm->RemoveDevicesByName(removed_devices));
EXPECT_EQ(dm->ListDevices().size(), 1);
}
TEST(DynamicDeviceMgrTest, AddRepeatedDeviceToMgr) {
std::unique_ptr<Device> d0(CreateDevice("CPU", "/device:CPU:0"));
std::unique_ptr<Device> d1(CreateDevice("CPU", "/device:CPU:0"));
auto dm = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 1);
std::vector<std::unique_ptr<Device>> added_devices;
added_devices.emplace_back(std::move(d1));
Status s = dm->AddDevices(std::move(added_devices));
EXPECT_TRUE(
absl::StrContains(s.message(), "name conflicts with an existing device"));
}
TEST(DynamicDeviceMgrTest, RemoveNonExistingDeviceFromMgr) {
std::unique_ptr<Device> d0(CreateDevice("GPU", "/device:GPU:0"));
std::unique_ptr<Device> d1(CreateDevice("CPU", "/device:CPU:1"));
Device* d0_ptr = d0.get();
Device* d1_ptr = d1.get();
auto dm = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 1);
std::vector<Device*> removed_devices{d0_ptr, d1_ptr};
Status s = dm->RemoveDevices(removed_devices);
EXPECT_TRUE(absl::StrContains(s.message(), "Unknown device"));
EXPECT_EQ(dm->ListDevices().size(), 1);
}
TEST(DynamicDeviceMgrTest, RemoveNonExistingDeviceByNameFromMgr) {
std::unique_ptr<Device> d0(CreateDevice("GPU", "/device:GPU:0"));
string d0_name = "/device:GPU:0";
string d1_name = "/device:CPU:0";
auto dm = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 1);
std::vector<string> removed_devices{d0_name, d1_name};
Status s = dm->RemoveDevicesByName(removed_devices);
EXPECT_TRUE(absl::StrContains(s.message(), "unknown device"));
EXPECT_EQ(dm->ListDevices().size(), 1);
}
TEST(DynamicDeviceMgrTest, HostCPU) {
auto dm = std::make_unique<DynamicDeviceMgr>();
std::unique_ptr<Device> gpu(CreateDevice("GPU", "/device:GPU:0"));
Device* gpu_ptr = gpu.get();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(gpu));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 1);
EXPECT_EQ(dm->HostCPU(), nullptr);
std::unique_ptr<Device> cpu0(CreateDevice("CPU", "/device:CPU:0"));
Device* cpu0_ptr = cpu0.get();
devices.clear();
devices.emplace_back(std::move(cpu0));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 2);
EXPECT_EQ(dm->HostCPU(), cpu0_ptr);
std::unique_ptr<Device> cpu1(CreateDevice("CPU", "/device:CPU:1"));
Device* cpu1_ptr = cpu1.get();
devices.clear();
devices.emplace_back(std::move(cpu1));
TF_CHECK_OK(dm->AddDevices(std::move(devices)));
EXPECT_EQ(dm->ListDevices().size(), 3);
EXPECT_EQ(dm->HostCPU(), cpu0_ptr);
std::vector<Device*> removed{gpu_ptr, cpu0_ptr};
EXPECT_TRUE(absl::StrContains(dm->RemoveDevices(removed).message(),
"Can not remove HostCPU device"));
EXPECT_EQ(dm->ListDevices().size(), 3);
EXPECT_EQ(dm->HostCPU(), cpu0_ptr);
removed = std::vector<Device*>{cpu1_ptr};
TF_CHECK_OK(dm->RemoveDevices(removed));
EXPECT_EQ(dm->ListDevices().size(), 2);
EXPECT_EQ(dm->HostCPU(), cpu0_ptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/dynamic_device_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/dynamic_device_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2acfe0d1-f7e4-4526-b22b-1e7ab437fdaa | cpp | tensorflow/tensorflow | no_op_cost_measurement | tensorflow/core/common_runtime/no_op_cost_measurement.cc | tensorflow/core/common_runtime/no_op_cost_measurement_test.cc | #include "tensorflow/core/common_runtime/no_op_cost_measurement.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
namespace tensorflow {
absl::Duration NoOpCostMeasurement::GetTotalCost() { return absl::Duration(); }
absl::string_view NoOpCostMeasurement::GetCostType() const {
return kNoOpCostName;
}
REGISTER_COST_MEASUREMENT(kNoOpCostName, NoOpCostMeasurement);
} | #include "tensorflow/core/common_runtime/no_op_cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(NoOpCostMeasurementTest, Basic) {
CostMeasurement::Context context;
NoOpCostMeasurement measurement(context);
EXPECT_EQ(measurement.GetTotalCost(), absl::ZeroDuration());
EXPECT_EQ(measurement.GetCostType(), "no_op");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/no_op_cost_measurement.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/no_op_cost_measurement_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3f596921-1e1e-44b7-9ecd-c858da785106 | cpp | tensorflow/tensorflow | pool_allocator | tensorflow/core/common_runtime/pool_allocator.cc | tensorflow/core/common_runtime/gpu/pool_allocator_test.cc | #include "tensorflow/core/common_runtime/pool_allocator.h"
#include <errno.h>
#ifndef _MSC_VER
#include <strings.h>
#include <sys/mman.h>
#endif
#include <map>
#include <utility>
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/numa.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
PoolAllocator::PoolAllocator(size_t pool_size_limit, bool auto_resize,
SubAllocator* allocator,
RoundUpInterface* size_rounder, string name)
: name_(std::move(name)),
has_size_limit_(pool_size_limit > 0),
auto_resize_(auto_resize),
pool_size_limit_(pool_size_limit),
allocator_(allocator),
size_rounder_(size_rounder) {
if (auto_resize) {
CHECK_LT(size_t{0}, pool_size_limit)
<< "size limit must be > 0 if auto_resize is true.";
}
}
PoolAllocator::~PoolAllocator() { Clear(); }
namespace {
struct ChunkPrefix {
size_t num_bytes;
void* chunk_ptr;
};
static const int kPoolAlignment = sizeof(ChunkPrefix);
void* PrepareChunk(void* chunk, size_t alignment, size_t num_bytes) {
ChunkPrefix* cp = reinterpret_cast<ChunkPrefix*>(chunk);
cp->num_bytes = num_bytes;
cp->chunk_ptr = chunk;
void* user_ptr = reinterpret_cast<void*>(cp + 1);
if (alignment > kPoolAlignment) {
size_t aligned_ptr = reinterpret_cast<size_t>(user_ptr) + alignment;
user_ptr = reinterpret_cast<void*>(aligned_ptr & ~(alignment - 1));
(reinterpret_cast<ChunkPrefix*>(user_ptr) - 1)->chunk_ptr = chunk;
}
CHECK_GE(user_ptr, reinterpret_cast<ChunkPrefix*>(chunk) + 1);
return user_ptr;
}
ChunkPrefix* FindPrefix(void* user_ptr) {
ChunkPrefix* cp = reinterpret_cast<ChunkPrefix*>(user_ptr) - 1;
return reinterpret_cast<ChunkPrefix*>(cp->chunk_ptr);
}
}
void* PoolAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
if (num_bytes == 0) return nullptr;
if (alignment > kPoolAlignment) {
num_bytes += alignment;
}
num_bytes += sizeof(ChunkPrefix);
num_bytes = size_rounder_->RoundUp(num_bytes);
PtrRecord* pr = nullptr;
if (has_size_limit_) {
{
mutex_lock lock(mutex_);
auto iter = pool_.find(num_bytes);
if (iter == pool_.end()) {
allocated_count_++;
} else {
get_from_pool_count_++;
pr = iter->second;
RemoveFromList(pr);
pool_.erase(iter);
}
}
}
if (pr != nullptr) {
void* r = pr->ptr;
delete pr;
return PrepareChunk(r, alignment, num_bytes);
} else {
size_t bytes_received;
void* ptr = allocator_->Alloc(kPoolAlignment, num_bytes, &bytes_received);
return PrepareChunk(ptr, alignment, bytes_received);
}
}
void PoolAllocator::DeallocateRaw(void* ptr) {
if (ptr == nullptr) return;
ChunkPrefix* cp = FindPrefix(ptr);
CHECK_LE((void*)cp, (void*)ptr);
if (!has_size_limit_ && !auto_resize_) {
allocator_->Free(cp, cp->num_bytes);
} else {
mutex_lock lock(mutex_);
++put_count_;
while (pool_.size() >= pool_size_limit_) {
EvictOne();
}
PtrRecord* pr = new PtrRecord;
pr->num_bytes = cp->num_bytes;
pr->ptr = cp;
AddToList(pr);
pool_.insert(std::make_pair(cp->num_bytes, pr));
}
}
void PoolAllocator::Clear() {
if (has_size_limit_) {
mutex_lock lock(mutex_);
for (auto iter : pool_) {
PtrRecord* pr = iter.second;
allocator_->Free(pr->ptr, pr->num_bytes);
delete pr;
}
pool_.clear();
get_from_pool_count_ = 0;
put_count_ = 0;
allocated_count_ = 0;
evicted_count_ = 0;
lru_head_ = nullptr;
lru_tail_ = nullptr;
}
}
void PoolAllocator::RemoveFromList(PtrRecord* pr) {
if (pr->prev == nullptr) {
DCHECK_EQ(lru_head_, pr);
lru_head_ = nullptr;
} else {
pr->prev->next = pr->next;
}
if (pr->next == nullptr) {
DCHECK_EQ(lru_tail_, pr);
lru_tail_ = pr->prev;
} else {
pr->next->prev = pr->prev;
if (lru_head_ == nullptr) {
lru_head_ = pr->next;
}
}
}
void PoolAllocator::AddToList(PtrRecord* pr) {
pr->prev = nullptr;
if (lru_head_ == nullptr) {
CHECK(lru_tail_ == nullptr);
lru_tail_ = pr;
pr->next = nullptr;
} else {
pr->next = lru_head_;
pr->next->prev = pr;
}
lru_head_ = pr;
}
void PoolAllocator::EvictOne() {
DCHECK(lru_tail_ != nullptr);
PtrRecord* prec = lru_tail_;
RemoveFromList(prec);
auto iter = pool_.find(prec->num_bytes);
while (iter->second != prec) {
++iter;
DCHECK(iter != pool_.end());
}
pool_.erase(iter);
allocator_->Free(prec->ptr, prec->num_bytes);
delete prec;
++evicted_count_;
static const double kTolerable = 2e-3;
static const int kCheckInterval = 1000;
static const double kIncreaseFactor = 1.1;
static const int kMinPoolSize = 100;
if (0 == evicted_count_ % kCheckInterval) {
const double eviction_rate =
evicted_count_ / static_cast<double>(put_count_);
const int64_t alloc_request_count = allocated_count_ + get_from_pool_count_;
const double alloc_rate =
(alloc_request_count == 0)
? 0.0
: allocated_count_ / static_cast<double>(alloc_request_count);
const bool kShouldLog = false;
if (kShouldLog) {
LOG(INFO) << "PoolAllocator: After " << alloc_request_count
<< " get requests, put_count=" << put_count_
<< " evicted_count=" << evicted_count_
<< " eviction_rate=" << eviction_rate
<< " and unsatisfied allocation rate=" << alloc_rate;
}
if (auto_resize_ && (eviction_rate > kTolerable) &&
(alloc_rate > kTolerable)) {
size_t new_size_limit = (pool_size_limit_ < kMinPoolSize)
? kMinPoolSize
: (kIncreaseFactor * pool_size_limit_);
if (kShouldLog) {
LOG(INFO) << "Raising pool_size_limit_ from " << pool_size_limit_
<< " to " << new_size_limit;
}
pool_size_limit_ = new_size_limit;
put_count_ = 0;
allocated_count_ = 0;
evicted_count_ = 0;
get_from_pool_count_ = 0;
}
}
}
void* BasicCPUAllocator::Alloc(size_t alignment, size_t num_bytes,
size_t* bytes_received) {
tsl::profiler::TraceMe traceme("BasicCPUAllocator::Alloc");
void* ptr = nullptr;
*bytes_received = num_bytes;
if (num_bytes > 0) {
if (numa_node_ == port::kNUMANoAffinity) {
ptr = port::AlignedMalloc(num_bytes, static_cast<int>(alignment));
} else {
ptr =
port::NUMAMalloc(numa_node_, num_bytes, static_cast<int>(alignment));
}
VisitAlloc(ptr, numa_node_, num_bytes);
}
return ptr;
}
void BasicCPUAllocator::Free(void* ptr, size_t num_bytes) {
tsl::profiler::TraceMe traceme("BasicCPUAllocator::Free");
if (num_bytes > 0) {
VisitFree(ptr, numa_node_, num_bytes);
if (numa_node_ == port::kNUMANoAffinity) {
port::AlignedFree(ptr);
} else {
port::NUMAFree(ptr, num_bytes);
}
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/pool_allocator.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/common_runtime/device/device_host_allocator.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(PoolAllocatorTest, ZeroSizeBuffers) {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
PoolAllocator pool(2 , false ,
new DeviceHostAllocator(
platform->ExecutorForDevice(0).value(),
0 , {}, {}),
new NoopRounder, "pool");
EXPECT_EQ(nullptr, pool.AllocateRaw(4 , 0 ));
pool.DeallocateRaw(nullptr);
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(0, pool.put_count());
EXPECT_EQ(0, pool.allocated_count());
EXPECT_EQ(0, pool.evicted_count());
}
TEST(PoolAllocatorTest, ZeroSizePool) {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
PoolAllocator pool(0 , false ,
new DeviceHostAllocator(
platform->ExecutorForDevice(0).value(),
0 , {}, {}),
new NoopRounder, "pool");
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(0, pool.put_count());
EXPECT_EQ(0, pool.allocated_count());
EXPECT_EQ(0, pool.evicted_count());
for (int i = 0; i < 3; ++i) {
void* p0 = pool.AllocateRaw(4, 0);
void* p4 = pool.AllocateRaw(4, 4);
void* p12 = pool.AllocateRaw(4, 12);
EXPECT_EQ(nullptr, p0);
EXPECT_NE(nullptr, p4);
EXPECT_NE(nullptr, p12);
pool.DeallocateRaw(p0);
pool.DeallocateRaw(p4);
pool.DeallocateRaw(p12);
}
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(0, pool.put_count());
EXPECT_EQ(0, pool.allocated_count());
EXPECT_EQ(0, pool.evicted_count());
}
TEST(PoolAllocatorTest, Alignment) {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
PoolAllocator pool(0 , false ,
new DeviceHostAllocator(
platform->ExecutorForDevice(0).value(),
0 , {}, {}),
new NoopRounder, "pool");
for (int i = 0; i < 16; ++i) {
size_t alignment = 1 << i;
void* p = pool.AllocateRaw(alignment, 111);
EXPECT_TRUE(p != nullptr);
EXPECT_EQ(0, reinterpret_cast<int64_t>(p) & (alignment - 1))
<< "ptr: " << p << " alignment " << alignment;
}
}
TEST(PoolAllocatorTest, AutoResize) {
PoolAllocator pool(2 , true ,
new BasicCPUAllocator(0 , {}, {}),
new NoopRounder, "pool");
for (int i = 0; i < 10; ++i) {
void* p = pool.AllocateRaw(4, 64 << i);
pool.DeallocateRaw(p);
}
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(10, pool.allocated_count());
EXPECT_EQ(10, pool.put_count());
EXPECT_EQ(8, pool.evicted_count());
EXPECT_EQ(2, pool.size_limit());
for (int j = 0; j < 120; ++j) {
for (int i = 0; i < 10; ++i) {
void* p = pool.AllocateRaw(4, 64 << i);
pool.DeallocateRaw(p);
}
}
EXPECT_EQ(100, pool.size_limit());
}
TEST(PoolAllocatorTest, CudaHostAllocator) {
int alloc_count = 0;
int64_t alloc_size = 0;
SubAllocator::Visitor alloc_visitor =
[&alloc_count, &alloc_size](void* ptr, int numa_node, int64_t size) {
++alloc_count;
alloc_size += size;
};
int free_count = 0;
int64_t free_size = 0;
SubAllocator::Visitor free_visitor =
[&free_count, &free_size](void* ptr, int numa_node, int64_t size) {
++free_count;
free_size += size;
};
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
DeviceHostAllocator* sub_allocator = new DeviceHostAllocator(
platform->ExecutorForDevice(0).value(), 0 ,
{alloc_visitor}, {free_visitor});
PoolAllocator pool(2 , false ,
sub_allocator, new NoopRounder, "pool");
EXPECT_EQ(0, alloc_count);
EXPECT_EQ(0, alloc_size);
EXPECT_EQ(0, free_count);
EXPECT_EQ(0, free_size);
void* p1_16 = pool.AllocateRaw(4, 16);
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(1, pool.allocated_count());
EXPECT_NE(nullptr, p1_16);
EXPECT_EQ(1, alloc_count);
static const int kChunkPrefixSize = 16;
EXPECT_EQ(16 + (alloc_count * kChunkPrefixSize), alloc_size);
pool.DeallocateRaw(p1_16);
EXPECT_EQ(1, pool.put_count());
void* p2_16 = pool.AllocateRaw(4, 16);
EXPECT_EQ(1, pool.get_from_pool_count());
EXPECT_EQ(1, pool.allocated_count());
EXPECT_EQ(p1_16, p2_16);
pool.DeallocateRaw(p2_16);
EXPECT_EQ(2, pool.put_count());
EXPECT_EQ(1, alloc_count);
EXPECT_EQ(16 + (alloc_count * kChunkPrefixSize), alloc_size);
EXPECT_EQ(0, free_count);
void* p3_4 = pool.AllocateRaw(4, 4);
EXPECT_EQ(2, pool.allocated_count());
EXPECT_NE(p1_16, p3_4);
EXPECT_NE(nullptr, p3_4);
pool.DeallocateRaw(p3_4);
EXPECT_EQ(3, pool.put_count());
void* p4_2 = pool.AllocateRaw(4, 2);
EXPECT_NE(nullptr, p4_2);
EXPECT_EQ(0, pool.evicted_count());
EXPECT_EQ(3, alloc_count);
EXPECT_EQ(16 + 4 + 2 + (alloc_count * kChunkPrefixSize), alloc_size);
EXPECT_EQ(0, free_count);
pool.DeallocateRaw(p4_2);
EXPECT_EQ(4, pool.put_count());
EXPECT_EQ(1, pool.evicted_count());
EXPECT_EQ(3, alloc_count);
EXPECT_EQ(16 + 4 + 2 + (alloc_count * kChunkPrefixSize), alloc_size);
EXPECT_EQ(1, free_count);
EXPECT_EQ(16 + (free_count * kChunkPrefixSize), free_size);
void* p5_4 = pool.AllocateRaw(4, 4);
EXPECT_NE(nullptr, p5_4);
pool.DeallocateRaw(p5_4);
void* p6_2 = pool.AllocateRaw(4, 2);
EXPECT_NE(nullptr, p6_2);
pool.DeallocateRaw(p6_2);
EXPECT_EQ(3, pool.get_from_pool_count());
EXPECT_EQ(6, pool.put_count());
EXPECT_EQ(3, pool.allocated_count());
EXPECT_EQ(1, pool.evicted_count());
EXPECT_EQ(3, alloc_count);
EXPECT_EQ(16 + 4 + 2 + (alloc_count * kChunkPrefixSize), alloc_size);
EXPECT_EQ(1, free_count);
EXPECT_EQ(16 + (free_count * kChunkPrefixSize), free_size);
pool.Clear();
EXPECT_EQ(0, pool.get_from_pool_count());
EXPECT_EQ(0, pool.put_count());
EXPECT_EQ(0, pool.allocated_count());
EXPECT_EQ(0, pool.evicted_count());
EXPECT_EQ(3, alloc_count);
EXPECT_EQ(16 + 4 + 2 + (alloc_count * kChunkPrefixSize), alloc_size);
EXPECT_EQ(3, free_count);
EXPECT_EQ(16 + 4 + 2 + (free_count * kChunkPrefixSize), free_size);
}
TEST(PoolAllocatorTest, Pow2Rounder) {
Pow2Rounder rounder;
EXPECT_EQ(1, rounder.RoundUp(1));
EXPECT_EQ(2, rounder.RoundUp(2));
EXPECT_EQ(16, rounder.RoundUp(9));
EXPECT_EQ(16, rounder.RoundUp(16));
EXPECT_EQ(65536, rounder.RoundUp(41234));
EXPECT_EQ(65536, rounder.RoundUp(65535));
EXPECT_EQ(65536, rounder.RoundUp(65536));
}
TEST(PoolAllocatorTest, Name) {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
PoolAllocator pool(2 , false ,
new DeviceHostAllocator(
platform->ExecutorForDevice(0).value(),
0 , {}, {}),
new NoopRounder, "pool");
EXPECT_EQ("pool", pool.Name());
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/pool_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/pool_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac538130-9704-4263-b4e3-40f5340a8cbc | cpp | tensorflow/tensorflow | device_set | tensorflow/core/common_runtime/device_set.cc | tensorflow/core/common_runtime/device_set_test.cc | #include "tensorflow/core/common_runtime/device_set.h"
#include <set>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
DeviceSet::DeviceSet() = default;
DeviceSet::~DeviceSet() = default;
void DeviceSet::AddDevice(Device* device) {
mutex_lock l(devices_mu_);
devices_.push_back(device);
prioritized_devices_.clear();
prioritized_device_types_.clear();
for (const string& name :
DeviceNameUtils::GetNamesForDeviceMappings(device->parsed_name())) {
device_by_name_.insert({name, device});
}
matching_device_cache_.clear();
}
void DeviceSet::FindMatchingDevices(const DeviceNameUtils::ParsedName& spec,
std::vector<Device*>* devices) const {
{
mutex_lock l(devices_mu_);
auto match = matching_device_cache_.find(spec);
if (match != matching_device_cache_.end()) {
*devices = match->second;
}
}
devices->clear();
for (Device* d : devices_) {
if (DeviceNameUtils::IsCompleteSpecification(spec, d->parsed_name())) {
devices->push_back(d);
}
}
mutex_lock l(devices_mu_);
matching_device_cache_.insert({spec, *devices});
}
Device* DeviceSet::FindDeviceByName(const string& name) const {
return gtl::FindPtrOrNull(device_by_name_, name);
}
int DeviceSet::DeviceTypeOrder(const DeviceType& d) {
return DeviceFactory::DevicePriority(d.type_string());
}
static bool DeviceTypeComparator(const DeviceType& a, const DeviceType& b) {
auto a_priority = DeviceSet::DeviceTypeOrder(a);
auto b_priority = DeviceSet::DeviceTypeOrder(b);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
return StringPiece(a.type()) < StringPiece(b.type());
}
std::vector<DeviceType> DeviceSet::PrioritizedDeviceTypeList() const {
std::vector<DeviceType> result;
std::set<string> seen;
for (Device* d : devices_) {
const auto& t = d->device_type();
if (seen.insert(t).second) {
result.emplace_back(t);
}
}
std::sort(result.begin(), result.end(), DeviceTypeComparator);
return result;
}
void DeviceSet::SortPrioritizedDeviceTypeVector(
PrioritizedDeviceTypeVector* vector) {
if (vector == nullptr) return;
auto device_sort = [](const PrioritizedDeviceTypeVector::value_type& a,
const PrioritizedDeviceTypeVector::value_type& b) {
if (a.second != b.second) {
return a.second > b.second;
}
return DeviceTypeComparator(a.first, b.first);
};
std::sort(vector->begin(), vector->end(), device_sort);
}
void DeviceSet::SortPrioritizedDeviceVector(PrioritizedDeviceVector* vector) {
auto device_sort = [](const std::pair<Device*, int32>& a,
const std::pair<Device*, int32>& b) {
if (a.second != b.second) {
return a.second > b.second;
}
const string& a_type_name = a.first->device_type();
const string& b_type_name = b.first->device_type();
if (a_type_name != b_type_name) {
auto a_priority = DeviceFactory::DevicePriority(a_type_name);
auto b_priority = DeviceFactory::DevicePriority(b_type_name);
if (a_priority != b_priority) {
return a_priority > b_priority;
}
}
if (a.first->IsLocal() != b.first->IsLocal()) {
return a.first->IsLocal();
}
return StringPiece(a.first->name()) < StringPiece(b.first->name());
};
std::sort(vector->begin(), vector->end(), device_sort);
}
namespace {
void UpdatePrioritizedVectors(
const std::vector<Device*>& devices,
PrioritizedDeviceVector* prioritized_devices,
PrioritizedDeviceTypeVector* prioritized_device_types) {
if (prioritized_devices->size() != devices.size()) {
for (Device* d : devices) {
prioritized_devices->emplace_back(
d, DeviceSet::DeviceTypeOrder(DeviceType(d->device_type())));
}
DeviceSet::SortPrioritizedDeviceVector(prioritized_devices);
}
if (prioritized_device_types != nullptr &&
prioritized_device_types->size() != devices.size()) {
std::set<DeviceType> seen;
for (const std::pair<Device*, int32>& p : *prioritized_devices) {
DeviceType t(p.first->device_type());
if (seen.insert(t).second) {
prioritized_device_types->emplace_back(t, p.second);
}
}
}
}
}
const PrioritizedDeviceVector& DeviceSet::prioritized_devices() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
nullptr);
return prioritized_devices_;
}
const PrioritizedDeviceTypeVector& DeviceSet::prioritized_device_types() const {
mutex_lock l(devices_mu_);
UpdatePrioritizedVectors(devices_, &prioritized_devices_,
&prioritized_device_types_);
return prioritized_device_types_;
}
} | #include "tensorflow/core/common_runtime/device_set.h"
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static Device* Dev(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
class DeviceSetTest : public ::testing::Test {
public:
Device* AddDevice(const char* type, const char* name) {
Device* d = Dev(type, name);
owned_.emplace_back(d);
devices_.AddDevice(d);
return d;
}
const DeviceSet& device_set() const { return devices_; }
std::vector<DeviceType> types() const {
return devices_.PrioritizedDeviceTypeList();
}
private:
DeviceSet devices_;
std::vector<std::unique_ptr<Device>> owned_;
};
class DummyFactory : public DeviceFactory {
public:
Status ListPhysicalDevices(std::vector<string>* devices) override {
return absl::OkStatus();
}
Status CreateDevices(const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) override {
return absl::OkStatus();
}
};
REGISTER_LOCAL_DEVICE_FACTORY("d1", DummyFactory);
REGISTER_LOCAL_DEVICE_FACTORY("d2", DummyFactory, 51);
REGISTER_LOCAL_DEVICE_FACTORY("d3", DummyFactory, 49);
TEST_F(DeviceSetTest, PrioritizedDeviceTypeList) {
EXPECT_EQ(50, DeviceSet::DeviceTypeOrder(DeviceType("d1")));
EXPECT_EQ(51, DeviceSet::DeviceTypeOrder(DeviceType("d2")));
EXPECT_EQ(49, DeviceSet::DeviceTypeOrder(DeviceType("d3")));
EXPECT_EQ(std::vector<DeviceType>{}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
EXPECT_EQ(std::vector<DeviceType>{DeviceType("d1")}, types());
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ((std::vector<DeviceType>{DeviceType("d2"), DeviceType("d1")}),
types());
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ((std::vector<DeviceType>{
DeviceType("d2"),
DeviceType("d1"),
DeviceType("d3"),
}),
types());
}
TEST_F(DeviceSetTest, prioritized_devices) {
Device* d1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51),
std::make_pair(d1, 50)}));
Device* d3 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_devices(),
(PrioritizedDeviceVector{std::make_pair(d2, 51), std::make_pair(d1, 50),
std::make_pair(d3, 49)}));
}
TEST_F(DeviceSetTest, prioritized_device_types) {
AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50)}));
AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
EXPECT_EQ(
device_set().prioritized_device_types(),
(PrioritizedDeviceTypeVector{std::make_pair(DeviceType("d2"), 51),
std::make_pair(DeviceType("d1"), 50),
std::make_pair(DeviceType("d3"), 49)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceVector) {
Device* d1_0 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:0");
Device* d2_0 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:0");
Device* d3_0 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:0");
Device* d1_1 = AddDevice("d1", "/job:a/replica:0/task:0/device:d1:1");
Device* d2_1 = AddDevice("d2", "/job:a/replica:0/task:0/device:d2:1");
Device* d3_1 = AddDevice("d3", "/job:a/replica:0/task:0/device:d3:1");
PrioritizedDeviceVector sorted{
std::make_pair(d3_1, 30), std::make_pair(d1_0, 10),
std::make_pair(d2_0, 20), std::make_pair(d3_0, 30),
std::make_pair(d1_1, 20), std::make_pair(d2_1, 10)};
device_set().SortPrioritizedDeviceVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceVector{
std::make_pair(d3_0, 30), std::make_pair(d3_1, 30),
std::make_pair(d2_0, 20), std::make_pair(d1_1, 20),
std::make_pair(d2_1, 10), std::make_pair(d1_0, 10)}));
}
TEST_F(DeviceSetTest, SortPrioritizedDeviceTypeVector) {
PrioritizedDeviceTypeVector sorted{std::make_pair(DeviceType("d3"), 20),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d2"), 30)};
device_set().SortPrioritizedDeviceTypeVector(&sorted);
EXPECT_EQ(sorted, (PrioritizedDeviceTypeVector{
std::make_pair(DeviceType("d2"), 30),
std::make_pair(DeviceType("d1"), 20),
std::make_pair(DeviceType("d3"), 20)}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_set.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_set_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19fab0f1-fdbb-4a45-9421-72aa1b6cb629 | cpp | tensorflow/tensorflow | optimization_registry | tensorflow/core/common_runtime/optimization_registry.cc | tensorflow/core/common_runtime/optimization_registry_test.cc | #include "tensorflow/core/common_runtime/optimization_registry.h"
#include <string>
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
OptimizationPassRegistry* OptimizationPassRegistry::Global() {
static OptimizationPassRegistry* global_optimization_registry =
new OptimizationPassRegistry;
return global_optimization_registry;
}
void OptimizationPassRegistry::Register(
Grouping grouping, int phase, std::unique_ptr<GraphOptimizationPass> pass) {
groups_[grouping][phase].push_back(std::move(pass));
}
Status OptimizationPassRegistry::RunGrouping(
Grouping grouping, const GraphOptimizationPassOptions& options) {
const char* grouping_name = GetGroupingName(grouping);
auto dump_graph = [&](std::string func_name, const std::string& group,
const std::string& tag, bool bypass_filter) {
if (func_name.empty()) func_name = "unknown_graph";
if (options.graph) {
DEBUG_DATA_DUMPER()->DumpGraph(func_name, group, tag,
options.graph->get(), options.flib_def,
bypass_filter);
}
if (options.partition_graphs) {
for (auto& part : *options.partition_graphs) {
DEBUG_DATA_DUMPER()->DumpGraph(func_name + "_partition_" + part.first,
group, tag, part.second.get(),
options.flib_def, bypass_filter);
}
}
};
dump_graph(options.debug_filename_prefix, kDebugGroupMain,
strings::StrCat("before_opt_group_", grouping_name),
VLOG_IS_ON(3));
auto group = groups_.find(grouping);
if (group != groups_.end()) {
static const char* kGraphOptimizationCategory = "GraphOptimizationPass";
tensorflow::metrics::ScopedCounter<2> group_timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kGraphOptimizationCategory, "*"});
for (auto& phase : group->second) {
VLOG(1) << "Running optimization phase " << phase.first;
for (auto& pass : phase.second) {
VLOG(1) << "Running optimization pass: " << pass->name();
if (options.graph) {
VLOG(1) << "Graph #nodes " << (*options.graph)->num_nodes()
<< " #edges " << (*options.graph)->num_edges();
}
tensorflow::metrics::ScopedCounter<2> pass_timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kGraphOptimizationCategory, pass->name()});
Status s = pass->Run(options);
if (!s.ok()) return s;
pass_timings.ReportAndStop();
dump_graph(options.debug_filename_prefix, kDebugGroupGraphOptPass,
strings::StrCat("after_opt_group_", grouping_name, "_phase_",
phase.first, "_", pass->name()),
VLOG_IS_ON(5));
}
}
group_timings.ReportAndStop();
}
VLOG(1) << "Finished optimization of a group " << grouping;
if (options.graph && group != groups_.end()) {
VLOG(1) << "Graph #nodes " << (*options.graph)->num_nodes() << " #edges "
<< (*options.graph)->num_edges();
}
dump_graph(options.debug_filename_prefix, kDebugGroupMain,
strings::StrCat("after_opt_group_", grouping_name),
VLOG_IS_ON(3) || (VLOG_IS_ON(2) &&
grouping == Grouping::POST_REWRITE_FOR_EXEC));
return absl::OkStatus();
}
void OptimizationPassRegistry::LogGrouping(Grouping grouping, int vlog_level) {
auto group = groups_.find(grouping);
if (group != groups_.end()) {
for (auto& phase : group->second) {
for (auto& pass : phase.second) {
VLOG(vlog_level) << "Registered optimization pass grouping " << grouping
<< " phase " << phase.first << ": " << pass->name();
}
}
}
}
void OptimizationPassRegistry::LogAllGroupings(int vlog_level) {
for (auto group = groups_.begin(); group != groups_.end(); ++group) {
LogGrouping(group->first, vlog_level);
}
}
} | #include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestOptimization : public GraphOptimizationPass {
public:
static int count_;
Status Run(const GraphOptimizationPassOptions& options) override {
++count_;
return absl::OkStatus();
}
};
int TestOptimization::count_ = 0;
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 1,
TestOptimization);
TEST(OptimizationRegistry, OptimizationPass) {
EXPECT_EQ(0, TestOptimization::count_);
GraphOptimizationPassOptions options;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
options.graph = &graph;
std::unique_ptr<FunctionLibraryDefinition> flib_def(
new FunctionLibraryDefinition(OpRegistry::Global(),
FunctionDefLibrary()));
options.flib_def = flib_def.get();
EXPECT_EQ(absl::OkStatus(),
OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, options));
EXPECT_EQ(1, TestOptimization::count_);
}
class UpdateFuncLibPass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override {
return options.flib_def->AddFunctionDef(test::function::WXPlusB());
}
};
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 1,
UpdateFuncLibPass);
class OptimizationPassTest : public ::testing::Test {
public:
OptimizationPassTest() {
FunctionDefLibrary func_def_lib;
*func_def_lib.add_function() = test::function::XTimesTwo();
flib_def_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), func_def_lib));
}
void RunPass() {
GraphOptimizationPassOptions options;
options.flib_def = flib_def_.get();
EXPECT_EQ(absl::OkStatus(),
OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, options));
}
const FunctionDef* GetFunctionDef(const string& func) const {
return flib_def_->Find(func);
}
private:
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
};
TEST_F(OptimizationPassTest, UpdateFuncLibPass) {
RunPass();
auto f1 = GetFunctionDef("XTimesTwo");
ASSERT_NE(f1, nullptr);
EXPECT_EQ(test::function::XTimesTwo().DebugString(), f1->DebugString());
auto f2 = GetFunctionDef("WXPlusB");
ASSERT_NE(f2, nullptr);
EXPECT_EQ(test::function::WXPlusB().DebugString(), f2->DebugString());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimization_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimization_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50095472-91e4-4cbb-baa3-ba1f4cebcf97 | cpp | tensorflow/tensorflow | collective_param_resolver_local | tensorflow/core/common_runtime/collective_param_resolver_local.cc | tensorflow/core/common_runtime/collective_param_resolver_local_test.cc | #include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <stddef.h>
#include <algorithm>
#include <tuple>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
CollectiveParamResolverLocal::CollectiveParamResolverLocal(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverInterface* dev_resolver,
NcclCommunicatorInterface* nccl_communicator, const string& task_name)
: nccl_(config.experimental().collective_nccl()),
dev_mgr_(dev_mgr),
dev_resolver_(dev_resolver),
nccl_communicator_(nccl_communicator),
task_name_(task_name),
gpu_ring_order_(
config.gpu_options().experimental().collective_ring_order()) {}
void CollectiveParamResolverLocal::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
namespace {
const char* GetCollectiveName(const CollectiveParams* cp, bool nccl) {
switch (cp->instance.type) {
case BROADCAST_COLLECTIVE:
return nccl ? "NcclBroadcast" : "HierarchicalTreeBroadcast";
case REDUCTION_COLLECTIVE:
return nccl ? "NcclReduce" : "RingReduce";
case GATHER_COLLECTIVE:
return nccl ? "NcclGather" : "RingGather";
case PERMUTE_COLLECTIVE:
return "Permute";
case ALL_TO_ALL_COLLECTIVE:
return nccl ? "NcclAllToAll" : "AllToAll";
case REDUCE_SCATTER_COLLECTIVE:
return nccl ? "NcclReduceScatter" : "undef";
default:
return "undef";
}
}
string TaskNameFromDeviceName(const string& device_name) {
DeviceNameUtils::ParsedName parsed_device;
CHECK(DeviceNameUtils::ParseFullName(device_name, &parsed_device));
string task_name;
CHECK(DeviceNameUtils::GetTaskName(parsed_device, &task_name));
return task_name;
}
struct RankFormatter {
void operator()(std::string* out, CollGroupMember m) const {
out->append(std::to_string(m.rank));
}
};
Status CheckUserSpecifiedRanks(const std::vector<CollGroupMember> members) {
absl::flat_hash_set<int> user_ranks = {};
bool at_least_one_member_with_no_rank = false;
bool at_least_one_member_with_user_rank = false;
for (const auto& m : members) {
if (m.rank == -1) {
at_least_one_member_with_no_rank = true;
} else {
at_least_one_member_with_user_rank = true;
user_ranks.insert(m.rank);
}
}
auto received_ranks = absl::StrJoin(members, ",", RankFormatter());
if (at_least_one_member_with_no_rank && at_least_one_member_with_user_rank) {
return errors::InvalidArgument(
"Only part of the group members have user given rank specified.",
"Received ranks: ", received_ranks);
}
if (at_least_one_member_with_user_rank &&
user_ranks.size() < members.size()) {
return errors::InvalidArgument(
"Duplicate ranks specified for group members. Received ranks: ",
received_ranks);
}
return absl::OkStatus();
}
}
void CollectiveParamResolverLocal::CompleteGroupLocal(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, StatusCallback done) {
VLOG(1) << "CompleteGroup device=" << device.name() << ": "
<< group_params->ToString();
std::vector<StatusCallback> to_be_called;
GroupRec* gr = nullptr;
Status status;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_params->group_key);
if (it == group_table_.end()) {
gr = new GroupRec;
mutex_lock grl(gr->mu);
gr->group.group_key = group_params->group_key;
gr->group.group_size = group_params->group_size;
gr->group.device_type = group_params->device_type;
if (nccl_communicator_ != nullptr) {
gr->group.runtime_details.communicator_key =
nccl_communicator_->GenerateCommunicatorKey();
}
group_table_[gr->group.group_key].reset(gr);
VLOG(2) << "New group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size
<< " runtime_details=" << gr->group.runtime_details.ToString();
} else {
gr = it->second.get();
}
}
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
done(status);
return;
}
if (cancel_mgr != nullptr) {
CancellationToken token = cancel_mgr->get_cancellation_token();
bool is_cancelled = !cancel_mgr->RegisterCallback(
token, std::bind(&CollectiveParamResolverLocal::CancelGroup, this,
group_params->group_key));
if (is_cancelled) {
done(errors::Cancelled("CompleteGroup is cancelled before it starts"));
return;
}
done = [cancel_mgr, token,
original_done = std::move(done)](const Status& status) {
cancel_mgr->TryDeregisterCallback(token);
original_done(status);
};
}
{
mutex_lock gr_lock(gr->mu);
VLOG(2) << "gr device_type=" << gr->group.device_type
<< " cp device_type=" << group_params->device_type
<< " current device=" << device.name();
if (gr->status.ok()) {
if (group_params->device_type != gr->group.device_type) {
gr->status = errors::Internal(
"Device ", device.name(),
" is joining a group with incompatible device type",
gr->group.device_type.type_string(),
" (group_key=", gr->group.group_key, ")");
} else if (group_params->group_size != gr->group.group_size) {
gr->status = errors::Internal(
"Device ", device.name(), " is joining a group with size",
group_params->group_size, ", but that group has size ",
gr->group.group_size, " (group_key=", gr->group.group_key, ")");
}
}
bool new_device = false;
if (gr->status.ok()) {
auto it = gr->incarnations_by_device_name.find(device.name());
if (it == gr->incarnations_by_device_name.end()) {
if (gr->group.members.size() == gr->group.group_size) {
gr->status =
errors::Internal("Device ", device.name(),
" is joining a group that is already full",
" (group_key=", gr->group.group_key, ")");
} else {
gr->incarnations_by_device_name[device.name()] = device.incarnation();
CollGroupMember member;
member.device = device;
if (group_params->user_specified_rank == -1 ||
(group_params->user_specified_rank >= 0 &&
group_params->user_specified_rank < gr->group.group_size)) {
member.rank = group_params->user_specified_rank;
} else {
gr->status = errors::InvalidArgument(
"User Provided rank is invalid. It should be between [0, "
"group_size)");
}
gr->group.members.push_back(std::move(member));
new_device = true;
if (VLOG_IS_ON(1)) {
string dev_buf;
for (const auto& m : gr->group.members) {
strings::StrAppend(&dev_buf, ",", m.device.name());
}
VLOG(1) << "CompleteGroupLocal group_key=" << gr->group.group_key
<< " group_size=" << gr->group.group_size << " (current"
<< " devices)=(" << dev_buf << ") (number of"
<< " devices pending)="
<< (gr->group.group_size - gr->group.members.size());
}
}
} else {
if (it->second != device.incarnation()) {
gr->status = errors::FailedPrecondition(
"Device ", device.name(),
" current incarnation doesn't match with one in the group. This "
"usually means this worker has restarted but the collective "
"leader hasn't, or this worker connects to a wrong cluster.");
}
}
}
if (gr->status.ok()) {
VLOG(2) << "group_size " << gr->group.group_size << " set size "
<< gr->group.members.size() << " gr " << gr;
if (gr->group.members.size() < gr->group.group_size) {
gr->pending_done.push_back(std::move(done));
gr->pending_params.push_back(group_params);
return;
}
CHECK_EQ(gr->group.members.size(), gr->group.group_size);
auto st = CheckUserSpecifiedRanks(gr->group.members);
if (!st.ok()) {
gr->status = st;
}
if (new_device) {
FinishGroup(gr);
}
*group_params = gr->group;
for (auto* params : gr->pending_params) {
*params = gr->group;
}
}
to_be_called.swap(gr->pending_done);
gr->pending_params.clear();
status = gr->status;
}
done(status);
for (int i = 0; i < to_be_called.size(); ++i) {
to_be_called[i](status);
}
}
namespace {
struct DevRec {
string task;
string device;
int original_rank;
int local_rank;
int global_rank;
const DeviceLocality* locality;
};
typedef std::unordered_map<string, DevRec> TaskDeviceMap;
typedef std::unordered_map<string, TaskDeviceMap> GlobalDeviceMap;
GlobalDeviceMap BuildDevRecs(const CollGroupParams& gp) {
GlobalDeviceMap gdm;
CHECK_EQ(gp.members.size(), gp.members.size());
for (int i = 0; i < gp.members.size(); ++i) {
TaskDeviceMap& tdm = gdm[gp.members[i].task];
DevRec* dr = &tdm[gp.members[i].device.name()];
dr->task = gp.members[i].task;
dr->device = gp.members[i].device.name();
dr->original_rank = i;
dr->local_rank = 0;
dr->global_rank = 0;
dr->locality = &gp.members[i].device.locality();
}
return gdm;
}
bool ParseRingOrder(const string& gpu_ring_order_str, TaskDeviceMap* tdm) {
std::vector<string> split_gpu_ring_order_str =
str_util::Split(gpu_ring_order_str, ',');
if (split_gpu_ring_order_str.size() != tdm->size()) return false;
gtl::FlatMap<int32, int32> gpu_ranks;
for (int32_t rank = 0;
rank < static_cast<int32>(split_gpu_ring_order_str.size()); ++rank) {
int32_t tmp;
if (strings::safe_strto32(split_gpu_ring_order_str[rank], &tmp)) {
gpu_ranks[tmp] = rank;
} else {
return false;
}
}
for (auto& tdm_it : *tdm) {
DeviceNameUtils::ParsedName parsed_name;
DevRec* dr = &tdm_it.second;
if (!DeviceNameUtils::ParseFullName(dr->device, &parsed_name)) {
return false;
}
auto rank_it = gpu_ranks.find(parsed_name.id);
if (rank_it == gpu_ranks.end()) return false;
dr->local_rank = rank_it->second;
}
VLOG(2) << "Assigned local ranks based on ring order " << gpu_ring_order_str;
return true;
}
void OrderTaskDeviceMap(const string& gpu_ring_order, TaskDeviceMap* tdm) {
CHECK_GT(tdm->size(), 0);
if (ParseRingOrder(gpu_ring_order, tdm)) return;
int least_rank = -1;
string next_device;
std::set<string> selected;
for (const auto& it : *tdm) {
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
DeviceNameUtils::ParsedName parsed_name;
CHECK(DeviceNameUtils::ParseFullName(next_device, &parsed_name));
int next_rank = 0;
while (true) {
selected.insert(next_device);
auto next_dev_it = tdm->find(next_device);
CHECK(next_dev_it != tdm->end());
DevRec* dr = &next_dev_it->second;
dr->local_rank = next_rank;
++next_rank;
if (selected.size() == tdm->size()) {
break;
}
const InterconnectLink* best_link = nullptr;
if (parsed_name.type == "GPU") {
for (const InterconnectLink& il : dr->locality->links().link()) {
parsed_name.id = il.device_id();
string endpoint_device =
DeviceNameUtils::ParsedNameToString(parsed_name);
if (selected.find(endpoint_device) != selected.end()) {
continue;
}
if (tdm->find(endpoint_device) == tdm->end()) {
continue;
}
if (best_link == nullptr || il.strength() > best_link->strength()) {
best_link = &il;
}
}
}
if (best_link != nullptr) {
parsed_name.id = best_link->device_id();
next_device = DeviceNameUtils::ParsedNameToString(parsed_name);
} else {
least_rank = -1;
for (const auto& it : *tdm) {
if (selected.find(it.second.device) != selected.end()) {
continue;
}
if (least_rank < 0 || it.second.original_rank < least_rank) {
least_rank = it.second.original_rank;
next_device = it.second.device;
}
}
CHECK_GE(least_rank, 0);
}
}
}
GlobalDeviceMap EstablishGlobalRank(const CollGroupParams& gp,
const string& gpu_ring_order) {
VLOG(1) << "EstablishGlobalRank";
GlobalDeviceMap gdm = BuildDevRecs(gp);
for (auto& iter : gdm) {
TaskDeviceMap& tdm = iter.second;
OrderTaskDeviceMap(gpu_ring_order, &tdm);
}
std::set<string> tasks;
for (const CollGroupMember& member : gp.members) {
tasks.insert(member.task);
}
int next_rank = 0;
for (const string& task : tasks) {
TaskDeviceMap* tdm = &gdm[task];
for (auto& it : *tdm) {
it.second.global_rank = it.second.local_rank + next_rank;
}
next_rank += tdm->size();
}
return gdm;
}
void SetDevPerTask(CollGroupParams* gp) {
gp->num_devices_per_task.clear();
for (const CollGroupMember& member : gp->members) {
gp->num_devices_per_task[member.task]++;
}
gp->same_num_devices_per_task = false;
int dev_per_task = -1;
for (const auto& task_dev : gp->num_devices_per_task) {
if (dev_per_task == -1) {
dev_per_task = task_dev.second;
} else if (dev_per_task != task_dev.second) {
return;
}
}
gp->same_num_devices_per_task = true;
}
}
void CollectiveParamResolverLocal::FinishGroup(GroupRec* gr) {
for (CollGroupMember& member : gr->group.members) {
member.task = TaskNameFromDeviceName(member.device.name());
member.is_local = member.task == task_name_;
}
CompleteDefaultRanking(&gr->group);
SetDevPerTask(&gr->group);
gr->group.num_tasks =
static_cast<int32>(gr->group.num_devices_per_task.size());
}
void CollectiveParamResolverLocal::CancelGroup(int32 group_key) {
std::vector<StatusCallback> pending_done;
GroupRec* gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return;
}
gr = it->second.get();
}
{
mutex_lock l(gr->mu);
if (gr->group.members.size() == gr->group.group_size) {
return;
}
gr->status = errors::Cancelled("group is cancelled");
pending_done.swap(gr->pending_done);
gr->pending_params.clear();
}
for (const StatusCallback& done : pending_done) {
done(errors::Cancelled("group is cancelled"));
}
}
void CollectiveParamResolverLocal::SetDefaultRank(const string& device,
CollectiveParams* cp) {
CHECK_EQ(cp->group.group_size, cp->group.members.size()) << cp->ToString();
for (int i = 0; i < cp->group.group_size; ++i) {
if (cp->group.members[i].device.name() == device) {
cp->default_rank = i;
}
if (cp->group.members[i].rank == -1) {
cp->group.members[i].rank = i;
}
}
}
void CollectiveParamResolverLocal::InitInstanceSharedParams(
const CollectiveParams* cp, InstanceRec* ir) {
ir->shared->instance = cp->instance;
ir->shared->default_rank = -1;
}
void CollectiveParamResolverLocal::CompleteDefaultRanking(CollGroupParams* gp) {
std::sort(gp->members.begin(), gp->members.end(),
[](const CollGroupMember& lhs, const CollGroupMember& rhs) {
return DeviceNameUtils::CompareFullNames(lhs.device.name(),
rhs.device.name());
});
GlobalDeviceMap gdm = EstablishGlobalRank(*gp, gpu_ring_order_);
std::vector<CollGroupMember> new_members(gp->group_size);
for (const auto& git : gdm) {
const TaskDeviceMap& tdm = git.second;
for (const auto& tit : tdm) {
const DevRec& dr = tit.second;
new_members[dr.global_rank] = std::move(gp->members[dr.original_rank]);
}
}
if (VLOG_IS_ON(2)) {
string buf;
for (const auto& m : new_members)
strings::StrAppend(&buf, "\n", m.device.name());
VLOG(2) << "Optimized device order for group " << gp->group_key << ": "
<< buf;
}
gp->members = std::move(new_members);
}
CollectiveParamResolverLocal::InstanceRec*
CollectiveParamResolverLocal::GetOrCreateInstanceRec(CollectiveParams* cp,
bool* created) {
*created = false;
InstanceRec* irec = nullptr;
{
mutex_lock l(instance_mu_);
std::tuple<int64_t, int32_t> key = {cp->instance.step_id,
cp->instance.instance_key};
auto group_it = instance_table_.find(cp->group.group_key);
if (group_it != instance_table_.end()) {
auto instance_it = group_it->second.find(key);
if (instance_it != group_it->second.end()) {
irec = instance_it->second.get();
}
}
if (irec == nullptr) {
irec = new InstanceRec;
*created = true;
{
mutex_lock il(irec->mu);
irec->known.resize(cp->group.group_size, false);
}
InitInstanceSharedParams(cp, irec);
instance_table_[cp->group.group_key][key].reset(irec);
}
}
Status status;
{
mutex_lock l(status_mu_);
status = status_;
}
if (!status.ok()) {
mutex_lock l(irec->mu);
irec->status = status;
}
return irec;
}
Status CollectiveParamResolverLocal::LookupGroup(int32_t group_key,
CollGroupParams* group) {
mutex_lock l(group_mu_);
auto group_rec = group_table_.find(group_key);
if (group_rec == group_table_.end()) {
return errors::InvalidArgument("Group ", group_key,
" is not "
"initialized. Please call group "
"initialization op first before invoking "
"collective op.");
}
mutex_lock lock(group_rec->second->mu);
if (!group_rec->second->status.ok()) {
return errors::FailedPrecondition(
"Failed to run collective due to "
"unsuccessful group initialization. "
"Group initialization failed with error ",
group_rec->second->status.ToString());
}
*group = group_rec->second->group;
return absl::OkStatus();
}
void CollectiveParamResolverLocal::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams local " << device.name() << " for " << cp << ": "
<< cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupLocal(device, &cp->group, cancel_mgr,
[this, device, cp, done](const Status& s) {
if (s.ok()) {
CompleteInstanceLocal(device.name(), cp, done);
} else {
done(s);
}
});
} else {
const auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceLocal(device.name(), cp, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverLocal::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
done(
errors::Internal("CompleteInstance is not implemented by "
"CollectiveParamResolverLocal which is "
"intended only for non-distributed deployment."));
}
void CollectiveParamResolverLocal::AssignCollectiveType(CollectiveParams* cp) {
CollectiveImplementationInterface* col_impl;
bool use_nccl =
(nccl_ || cp->instance.impl_details.communication_hint == "nccl") &&
cp->group.device_type == DEVICE_GPU &&
CollectiveRegistry::LookupParamResolverInstance("NcclReduce", &col_impl)
.ok();
cp->instance.impl_details.collective_name = GetCollectiveName(cp, use_nccl);
VLOG(1) << "AssignCollectiveType "
<< cp->instance.impl_details.collective_name;
}
void CollectiveParamResolverLocal::CompleteInstanceLocal(
const string& device, CollectiveParams* cp, const StatusCallback& done) {
VLOG(1) << "CompleteInstanceLocal " << device
<< " instance_key: " << cp->instance.instance_key << " group_key "
<< cp->group.group_key;
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
if (!created_irec) {
if (ir->shared->instance.type != cp->instance.type ||
ir->shared->instance.data_type != cp->instance.data_type) {
done(errors::Internal("Collective instance ", cp->instance.instance_key,
" expected type ", ir->shared->instance.type,
" and data_type ", ir->shared->instance.data_type,
" but got type ", cp->instance.type,
" and data_type ", cp->instance.data_type));
return;
}
}
CompleteInstanceFromInitializedIRec(device, cp, ir, done);
}
void CollectiveParamResolverLocal::CompleteInstanceFromInitializedIRec(
const string& device, CollectiveParams* cp, InstanceRec* ir,
const StatusCallback& done) {
auto expected_shape = cp->instance.shape;
Status status;
{
mutex_lock l(ir->mu);
status = ir->status;
if (status.ok()) {
cp->instance = ir->shared->instance;
}
}
if (!status.ok()) {
done(status);
return;
}
if (expected_shape != cp->instance.shape) {
done(errors::InvalidArgument(
"Shape mismatch in the collective instance ", cp->instance.instance_key,
". Op at device ", device, " expected shape ",
expected_shape.DebugString(), " but another member in the group ",
"expected shape ", cp->instance.shape.DebugString(), ". This is likely",
" due to different input shapes at different members of the collective",
" op."));
return;
}
AssignCollectiveType(cp);
SetDefaultRank(device, cp);
CollectiveImplementationInterface* col_impl;
status = CollectiveRegistry::LookupParamResolverInstance(
cp->instance.impl_details.collective_name, &col_impl);
if (!status.ok()) {
done(status);
return;
}
if (cp->instance.type == BROADCAST_COLLECTIVE) {
WaitForGroup(ir, cp, [col_impl, ir, device, cp, done](InstanceRec* irec) {
Status s;
if (ir != irec) {
s = errors::Internal("Expected ir ", ir, " and irec ", irec,
" to be equal");
} else {
mutex_lock l(irec->mu);
s = irec->status;
cp->source_rank = irec->source_rank;
}
if (s.ok()) {
s = col_impl->InitializeCollectiveParams(cp);
}
done(s);
});
} else {
done(col_impl->InitializeCollectiveParams(cp));
}
}
void CollectiveParamResolverLocal::WaitForGroup(InstanceRec* ir,
CollectiveParams* cp,
const IRConsumer& f) {
std::vector<IRConsumer> ready_waiters;
do {
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
break;
}
CHECK_EQ(cp->group.group_size, ir->known.size());
CHECK_GE(cp->default_rank, 0);
if (!ir->known[cp->default_rank]) {
ir->known[cp->default_rank] = true;
++ir->known_count;
if (cp->is_source) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal("Instance ", cp->instance.instance_key,
" already has source ", ir->source_rank,
", received second claim from ",
cp->default_rank);
} else {
ir->source_rank = cp->default_rank;
}
}
}
if (ir->known_count < cp->group.group_size) {
ir->known_waiters.push_back(f);
return;
}
CHECK_EQ(ir->known_count, cp->group.group_size);
if (ir->source_rank < 0) {
ir->status =
errors::Internal("Instance ", cp->instance.instance_key,
" found no source for broadcast. This "
"could mean that there were group_size=",
ir->known_count, " BcastRecvs but no BcastSend.");
}
if (!ir->known_waiters.empty()) {
ready_waiters = std::move(ir->known_waiters);
}
} while (false);
f(ir);
for (auto& f : ready_waiters) {
f(ir);
}
}
void CollectiveParamResolverLocal::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverLocal already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
}
void CollectiveParamResolverLocal::StartAbortLocal(const Status& s) {
std::vector<StatusCallback> pending_done;
{
mutex_lock l(group_mu_);
for (const auto& item : group_table_) {
GroupRec* gr = item.second.get();
{
mutex_lock gl(gr->mu);
gr->status = s;
for (auto& done : gr->pending_done) {
pending_done.push_back(std::move(done));
}
gr->pending_done.clear();
gr->pending_params.clear();
}
}
}
for (const StatusCallback& done : pending_done) {
done(s);
}
std::vector<InstanceRec*> instances;
{
mutex_lock l(instance_mu_);
for (const auto& group_entry : instance_table_) {
for (const auto& item : group_entry.second) {
instances.push_back(item.second.get());
}
}
}
for (InstanceRec* ir : instances) {
std::vector<IRConsumer> known_waiters;
{
mutex_lock il(ir->mu);
ir->status = s;
known_waiters.swap(ir->known_waiters);
}
for (const IRConsumer& done : known_waiters) {
done(ir);
}
}
}
} | #include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include <atomic>
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
#define NUM_DEVS 3
class CollectiveParamResolverLocalTest : public ::testing::Test {
protected:
CollectiveParamResolverLocalTest() {
ConfigProto cp;
SessionOptions options;
task_name_ = "/job:localhost/replica:0/task:0";
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name_, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_.reset(new DeviceResolverLocal(device_mgr_.get()));
ResetParamResolver(ConfigProto());
}
void ResetParamResolver(const ConfigProto& config) {
prl_.reset(new CollectiveParamResolverLocal(
config, device_mgr_.get(), drl_.get(), nullptr,
task_name_));
}
void RunCompleteDefaultRanking(
CollGroupParams group, const std::vector<int32>& gpu_ring_order,
const std::vector<string>& expected_device_order) {
ConfigProto config;
if (!gpu_ring_order.empty()) {
config.mutable_gpu_options()
->mutable_experimental()
->set_collective_ring_order(absl::StrJoin(gpu_ring_order, ","));
}
ResetParamResolver(config);
prl_->CompleteDefaultRanking(&group);
std::vector<string> actual_device_order;
for (const CollGroupMember& member : group.members) {
actual_device_order.push_back(member.device.name());
}
EXPECT_EQ(actual_device_order, expected_device_order);
}
DeviceAttributes GetDeviceAttributes(const string& device_name) {
Device* device = nullptr;
TF_CHECK_OK(device_mgr_->LookupDevice(device_name, &device));
return device->attributes();
}
string task_name_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
std::unique_ptr<CollectiveParamResolverLocal> prl_;
};
TEST_F(CollectiveParamResolverLocalTest, CompleteDefaultRanking) {
constexpr int kNumGpus = 8;
CollGroupParams group;
group.device_type = DeviceType("GPU");
group.num_tasks = 1;
group.group_size = kNumGpus;
std::unordered_set<int> clique1 = {0, 1, 6, 7};
for (int gpu_idx = 0; gpu_idx < kNumGpus; ++gpu_idx) {
CollGroupMember member;
member.task = "/job:localhost/replica:0/task:0";
member.device.set_name(strings::StrCat(
"/job:localhost/replica:0/task:0/device:GPU:", gpu_idx));
for (int link_idx = 0; link_idx < kNumGpus; ++link_idx) {
if (gpu_idx == link_idx) continue;
bool gpu_in_clique1 = clique1.find(gpu_idx) != clique1.end();
bool link_in_clique1 = clique1.find(link_idx) != clique1.end();
if ((gpu_in_clique1 && link_in_clique1) ||
(!gpu_in_clique1 && !link_in_clique1)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(2);
} else if ((gpu_idx == 3 && link_idx == 7) ||
(gpu_idx == 7 && link_idx == 3)) {
LocalLinks* links = member.device.mutable_locality()->mutable_links();
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(link_idx);
ilink->set_strength(1);
}
}
group.members.push_back(member);
}
RunCompleteDefaultRanking(group, {1, 3, 5, 7, 6, 4, 2, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {7, 6, 5, 4, 3, 2, 1, 0},
{
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:5",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:0",
});
RunCompleteDefaultRanking(group, {},
{
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1",
"/job:localhost/replica:0/task:0/device:GPU:6",
"/job:localhost/replica:0/task:0/device:GPU:7",
"/job:localhost/replica:0/task:0/device:GPU:3",
"/job:localhost/replica:0/task:0/device:GPU:2",
"/job:localhost/replica:0/task:0/device:GPU:4",
"/job:localhost/replica:0/task:0/device:GPU:5",
});
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsReduction1Task) {
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = 7;
cp->instance.type = REDUCTION_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = false;
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->instance.impl_details.subdiv_source_rank.size(), 0);
EXPECT_FALSE(cps[i]->is_source);
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
void InitializeCollectiveParamsForBroadcast(int instance_key, int device_idx,
bool is_source,
CollectiveParams* cp) {
cp->group.group_key = 1;
cp->group.group_size = 3;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->instance.data_type = DataType(DT_FLOAT);
cp->instance.shape = TensorShape({5});
cp->instance.impl_details.subdiv_offsets.push_back(0);
cp->is_source = is_source;
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcast1Task) {
constexpr int kInstanceKey = 5;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, i == 1, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
TF_ASSERT_OK(statuses[i]);
ASSERT_EQ(cps[i]->group.members.size(), 3);
for (int j = 0; j < NUM_DEVS; ++j) {
EXPECT_EQ(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", j),
cps[i]->group.members[j].device.name());
EXPECT_TRUE(cps[i]->group.members[j].is_local);
}
EXPECT_EQ(cps[i]->is_source, (i == 1));
EXPECT_EQ(cps[i]->default_rank, i);
EXPECT_TRUE(cps[i]->group.same_num_devices_per_task);
cps[i]->Unref();
}
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsBroadcastForgotSender) {
constexpr int kInstanceKey = 8;
CollectiveParams* cps[NUM_DEVS];
Status statuses[NUM_DEVS];
Notification note[NUM_DEVS];
for (int i = 0; i < NUM_DEVS; ++i) {
cps[i] = new CollectiveParams();
CollectiveParams* cp = cps[i];
InitializeCollectiveParamsForBroadcast(kInstanceKey, i, false, cp);
Env::Default()->SchedClosure([this, i, cp, ¬e, &statuses]() {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
nullptr ,
[&statuses, ¬e, i](const Status& s) {
statuses[i] = s;
note[i].Notify();
});
});
}
for (int i = 0; i < NUM_DEVS; ++i) {
note[i].WaitForNotification();
}
for (int i = 0; i < NUM_DEVS; ++i) {
EXPECT_EQ(statuses[i].code(), error::INTERNAL);
EXPECT_EQ(statuses[i].message(),
strings::StrCat(
"Instance ", kInstanceKey,
" found no source for broadcast. This could mean that there"
" were group_size=",
NUM_DEVS, " BcastRecvs but no BcastSend."));
cps[i]->Unref();
}
}
CollectiveParams* MakeCollectiveParams(int group_key, int instance_key,
bool is_source) {
auto* cp = new CollectiveParams();
cp->group.group_key = group_key;
cp->group.group_size = NUM_DEVS;
cp->group.device_type = DeviceType("CPU");
cp->group.num_tasks = 1;
cp->instance.instance_key = instance_key;
cp->instance.type = BROADCAST_COLLECTIVE;
cp->is_source = is_source;
return cp;
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingGroup) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS - 1);
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, i, &cancel_mgr, &cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams( 100, 100,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, AbortPendingInstance) {
CancellationManager cancel_mgr;
std::vector<CollectiveParams*> cp(NUM_DEVS);
int group_key = 100;
int instance_key = 100;
{
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
BlockingCounter start(NUM_DEVS - 1);
BlockingCounter done(NUM_DEVS - 1);
for (int i = 0; i < NUM_DEVS - 1; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i, &cancel_mgr,
&cp, &start, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key + 1,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i], &cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(),
absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.DecrementCount();
cp->Unref();
});
start.DecrementCount();
});
}
start.Wait();
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
}
TEST_F(CollectiveParamResolverLocalTest, CompleteParamsAfterAbortion) {
CancellationManager cancel_mgr;
int group_key = 100;
int instance_key = 100;
{
std::vector<CollectiveParams*> cp(NUM_DEVS);
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
Env::Default()->SchedClosure([this, group_key, instance_key, i,
&cancel_mgr, &cp, &done] {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
cp[i] = MakeCollectiveParams(group_key, instance_key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp[i],
&cancel_mgr,
[&done, cp = cp[i]](const Status& s) {
EXPECT_EQ(s.code(), error::OK);
done.DecrementCount();
cp->Unref();
});
});
}
done.Wait();
}
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
auto complete_params = [this, &cancel_mgr](int group_key, int instance_key) {
string device = "/job:localhost/replica:0/task:0/device:CPU:0";
Notification done;
auto* cp = MakeCollectiveParams(group_key, instance_key,
true);
core::ScopedUnref unref(cp);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp, &cancel_mgr,
[&done](const Status& s) {
EXPECT_EQ(s.code(), absl::StatusCode::kAborted);
EXPECT_EQ(s.message(), "__aborted__");
done.Notify();
});
done.WaitForNotification();
};
complete_params(group_key, instance_key);
complete_params(group_key, instance_key + 1);
complete_params(group_key + 1, instance_key + 1);
}
TEST_F(CollectiveParamResolverLocalTest, AbortNormalCompleteParamsAsync) {
CancellationManager cancel_mgr;
std::atomic<int64_t> num_ok{0};
for (int cnt = 0; cnt < 100; ++cnt) {
BlockingCounter done(NUM_DEVS);
for (int i = 0; i < NUM_DEVS; ++i) {
string device =
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", i);
Env::Default()->SchedClosure(
[this, i, device, &num_ok, &cancel_mgr, &done] {
int key = 100;
while (true) {
Status status;
Notification n;
auto* cp =
MakeCollectiveParams( key, key,
i == 0);
prl_->CompleteParamsAsync(GetDeviceAttributes(device), cp,
&cancel_mgr,
[&status, &n](const Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
cp->Unref();
if (!status.ok()) {
EXPECT_EQ(status.code(), absl::StatusCode::kAborted);
EXPECT_EQ(status.message(), "__aborted__");
done.DecrementCount();
return;
}
++num_ok;
++key;
}
});
}
int64_t delay_ms = random::New64() % 50000;
Env::Default()->SleepForMicroseconds(delay_ms);
prl_->StartAbort(Status(absl::StatusCode::kAborted, "__aborted__"));
done.Wait();
ResetParamResolver(ConfigProto());
}
EXPECT_GT(num_ok.load(), 50);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_param_resolver_local.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_param_resolver_local_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5bb9930c-5eb7-4105-82de-1bfb82725b8c | cpp | tensorflow/tensorflow | function_optimization_registry | tensorflow/core/common_runtime/function_optimization_registry.cc | tensorflow/core/common_runtime/function_optimization_registry_test.cc | #include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <string>
#include "tensorflow/core/framework/metrics.h"
namespace tensorflow {
void FunctionOptimizationPassRegistry::Init(
std::unique_ptr<FunctionOptimizationPass> pass) {
DCHECK(!pass_) << "Only one pass should be set.";
pass_ = std::move(pass);
}
Status FunctionOptimizationPassRegistry::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
if (!pass_) return absl::OkStatus();
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"GraphOptimizationPass", "FunctionOptimizationPassRegistry"});
return pass_->Run(function_name, device_set, config_proto, function_options,
graph, flib_def, control_ret_node_names,
control_rets_updated);
}
FunctionOptimizationPassRegistry& FunctionOptimizationPassRegistry::Global() {
static FunctionOptimizationPassRegistry* kGlobalRegistry =
new FunctionOptimizationPassRegistry;
return *kGlobalRegistry;
}
} | #include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <memory>
#include <string>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class PassingFunctionPass : public FunctionOptimizationPass {
public:
static bool ran_;
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) override {
ran_ = true;
return absl::OkStatus();
}
};
bool PassingFunctionPass::ran_ = false;
TEST(FunctionOptimizationPassRegistry, PassNoError) {
EXPECT_FALSE(PassingFunctionPass::ran_);
FunctionOptimizationPassRegistry::Global().Init(
std::make_unique<PassingFunctionPass>());
DeviceSet device_set;
ConfigProto config_proto;
FunctionOptimizationPass::FunctionOptions function_options;
Status status = FunctionOptimizationPassRegistry::Global().Run(
"test_func", device_set, config_proto, function_options,
nullptr,
nullptr,
nullptr, nullptr);
EXPECT_EQ(status, absl::OkStatus());
EXPECT_TRUE(PassingFunctionPass::ran_);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/function_optimization_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/function_optimization_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a67ebfd9-4492-43c4-94cb-01f3b7688941 | cpp | tensorflow/tensorflow | cost_util | tensorflow/core/common_runtime/cost_util.cc | tensorflow/core/common_runtime/cost_util_test.cc | #include "tensorflow/core/common_runtime/cost_util.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include "tensorflow/core/platform/str_util.h"
namespace tensorflow {
namespace {
std::vector<std::string> GetCostMeasurementTypes() {
const char* types = std::getenv("TF_COST_MEASUREMENT_TYPE");
if (types == nullptr) return {};
return str_util::Split(types, " ,");
}
const char* GetRequestCostAccessorType() {
static const char* accessor = std::getenv("TF_REQUEST_COST_ACCESSOR_TYPE");
return accessor;
}
}
std::vector<std::unique_ptr<CostMeasurement>> CreateCostMeasurements(
const CostMeasurement::Context& context) {
static const std::vector<std::string>& types =
*new std::vector<std::string>(GetCostMeasurementTypes());
std::vector<std::unique_ptr<CostMeasurement>> measurements;
for (const auto& type : types) {
std::unique_ptr<CostMeasurement> measurement =
CostMeasurementRegistry::CreateByNameOrNull(type, context);
if (measurement != nullptr) {
measurements.push_back(std::move(measurement));
}
}
return measurements;
}
std::unique_ptr<RequestCostAccessor> CreateRequestCostAccessor() {
const char* request_cost_accessor_type = GetRequestCostAccessorType();
return request_cost_accessor_type
? RequestCostAccessorRegistry::CreateByNameOrNull(
request_cost_accessor_type)
: nullptr;
}
} | #include "tensorflow/core/common_runtime/cost_util.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestGcuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::ZeroDuration(); }
absl::string_view GetCostType() const override { return "test_gcu"; }
};
REGISTER_COST_MEASUREMENT("test_gcu", TestGcuCostMeasurement);
class TestTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::ZeroDuration(); }
absl::string_view GetCostType() const override { return "test_tpu"; }
};
REGISTER_COST_MEASUREMENT("test_tpu", TestTpuCostMeasurement);
class TestRequestCostAccessor : public RequestCostAccessor {
public:
RequestCost* GetRequestCost() const override { return nullptr; }
};
REGISTER_REQUEST_COST_ACCESSOR("test", TestRequestCostAccessor);
TEST(CreateCostMeasurementsTest, Basic) {
setenv("TF_COST_MEASUREMENT_TYPE", "test_gcu, test_tpu, test_invalid",
1);
const CostMeasurement::Context context;
std::vector<std::unique_ptr<CostMeasurement>> measurements =
CreateCostMeasurements(context);
EXPECT_EQ(measurements.size(), 2);
EXPECT_EQ(measurements[0]->GetTotalCost(), absl::ZeroDuration());
EXPECT_EQ(measurements[0]->GetCostType(), "test_gcu");
EXPECT_EQ(measurements[1]->GetTotalCost(), absl::ZeroDuration());
EXPECT_EQ(measurements[1]->GetCostType(), "test_tpu");
}
TEST(CreateRequestCostAccessorTest, Basic) {
setenv("TF_REQUEST_COST_ACCESSOR_TYPE", "test", 1);
std::unique_ptr<RequestCostAccessor> test_req_cost_accessor =
CreateRequestCostAccessor();
ASSERT_NE(test_req_cost_accessor, nullptr);
EXPECT_EQ(test_req_cost_accessor->GetRequestCost(), nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c33ef068-c164-4cee-846d-e00f6dbb5c81 | cpp | tensorflow/tensorflow | all_to_all | tensorflow/core/common_runtime/all_to_all.cc | tensorflow/core/common_runtime/all_to_all_test.cc | #include "tensorflow/core/common_runtime/all_to_all.h"
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
AllToAll::AllToAll()
: col_ctx_(nullptr), col_params_(nullptr), done_(nullptr), counter_(0) {}
StatusCallback AllToAll::CheckCounterAndCallDone() {
return [this](const Status& s) {
Status final_status;
{
mutex_lock l(mu_);
status_.Update(s);
++counter_;
if (counter_ < 2 * col_params_->group.group_size) {
return;
}
CHECK_LE(counter_, 2 * col_params_->group.group_size);
final_status = status_;
}
if (!final_status.ok()) {
done_(final_status);
return;
}
if (col_ctx_->output->SharesBufferWith(output_buffer_)) {
done_(final_status);
} else {
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), &output_buffer_,
col_ctx_->output, 0, done_);
}
};
}
Status AllToAll::InitializeCollectiveContext(
std::shared_ptr<CollectiveContext> col_ctx) {
if (col_ctx->input->dim_size(0) != col_ctx->col_params->group.group_size) {
return errors::InvalidArgument("input to all-to-all first dimension size (",
col_ctx->input->dim_size(0),
") must be the same as the group size (",
col_ctx->col_params->group.group_size, ")");
}
DCHECK(col_ctx->dev_mgr);
col_ctx_ = col_ctx;
col_params_ = col_ctx->col_params.get();
return collective_util::InitializeDeviceAndLocality(
col_ctx->dev_mgr, col_ctx->device_name, &col_ctx->device,
&col_ctx->device_locality);
}
void AllToAll::Run(StatusCallback done) {
done_ = std::move(done);
input_chunks_.reserve(col_params_->group.group_size);
output_chunks_.reserve(col_params_->group.group_size);
if (col_ctx_->input->SharesBufferWith(*col_ctx_->output)) {
output_buffer_ = Tensor(
col_ctx_->device->GetAllocator(col_ctx_->op_ctx->output_alloc_attr(0)),
col_ctx_->output->dtype(), col_ctx_->output->shape());
} else {
output_buffer_ = *col_ctx_->output;
}
for (int i = 0; i < col_params_->group.group_size; ++i) {
input_chunks_.push_back(col_ctx_->input->SubSlice(i));
int output_index = col_params_->group.members[i].rank;
output_chunks_.push_back(output_buffer_.SubSlice(output_index));
}
for (int i = 0; i < col_params_->group.group_size; ++i) {
auto default_rank = col_params_->default_rank;
DispatchSend(default_rank, i, &input_chunks_[i], CheckCounterAndCallDone());
DispatchRecv(i, default_rank, &output_chunks_[i],
CheckCounterAndCallDone());
}
}
void AllToAll::DispatchSend(int src_rank, int target_rank, const Tensor* tensor,
const StatusCallback& done) {
string send_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
col_ctx_->col_exec->remote_access()->PostToPeer(
col_params_->group.members[target_rank].device.name(),
col_params_->group.members[target_rank].task, send_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
col_ctx_->op_ctx->cancellation_manager(), done);
}
void AllToAll::DispatchRecv(int src_rank, int target_rank, Tensor* tensor,
const StatusCallback& done) {
string recv_buf_key =
strings::StrCat(col_ctx_->exec_key, src_rank, target_rank);
col_ctx_->col_exec->remote_access()->RecvFromPeer(
col_params_->group.members[src_rank].device.name(),
col_params_->group.members[src_rank].task,
col_params_->group.members[src_rank].is_local, recv_buf_key,
col_ctx_->device, col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->output_alloc_attr(0), tensor, col_ctx_->device_locality,
0, col_ctx_->op_ctx->cancellation_manager(), done);
}
namespace {
REGISTER_COLLECTIVE(AllToAll, AllToAll);
}
} | #include "tensorflow/core/common_runtime/all_to_all.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class AllToAllTest : public ::testing::Test {
protected:
std::unique_ptr<CollectiveTestEnv> test_env_;
};
TEST_F(AllToAllTest, Success) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
TF_CHECK_OK(RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]));
counter.DecrementCount();
});
}
counter.Wait();
test::ExpectTensorEqual<double>(tensors[0],
test::AsTensor<double>({1., 4., 7.}));
test::ExpectTensorEqual<double>(tensors[1],
test::AsTensor<double>({2., 5., 8.}));
test::ExpectTensorEqual<double>(tensors[2],
test::AsTensor<double>({3., 6., 9.}));
}
TEST_F(AllToAllTest, SuccessDifferentRank) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
std::vector<std::vector<int32>> device_ranks = {{2, 1, 0}};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, &device_ranks, i, &counter]() {
auto col_params = CreateCollectiveParams(
*test_env_, i, "AllToAll", ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape(), device_ranks);
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
TF_CHECK_OK(RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]));
counter.DecrementCount();
});
}
counter.Wait();
test::ExpectTensorEqual<double>(tensors[0],
test::AsTensor<double>({7., 4., 1.}));
test::ExpectTensorEqual<double>(tensors[1],
test::AsTensor<double>({8., 5., 2.}));
test::ExpectTensorEqual<double>(tensors[2],
test::AsTensor<double>({9., 6., 3.}));
}
TEST_F(AllToAllTest, Failure) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
test_env_->remote_access->set_fail_after(1);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2., 3.}),
test::AsTensor<double>({4., 5., 6.}),
test::AsTensor<double>({7., 8., 9.}),
};
int num_failures = 0;
mutex mu;
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &mu, &num_failures, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
Status status = RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]);
if (!status.ok()) {
mutex_lock l(mu);
++num_failures;
}
counter.DecrementCount();
});
}
counter.Wait();
EXPECT_GT(num_failures, 0);
}
TEST_F(AllToAllTest, WrongFirstDimensionSize) {
test_env_ = CreateCollectiveTestEnv( 1,
3, DEVICE_CPU);
std::vector<Tensor> tensors = {
test::AsTensor<double>({1., 2.}),
test::AsTensor<double>({4., 5.}),
test::AsTensor<double>({7., 8.}),
};
BlockingCounter counter(3);
for (int i = 0; i < 3; ++i) {
SchedClosure([this, &tensors, i, &counter]() {
auto col_params = CreateCollectiveParams(*test_env_, i, "AllToAll",
ALL_TO_ALL_COLLECTIVE, DT_DOUBLE,
tensors[i].shape());
Device* device = nullptr;
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(
col_params->group.members[i].device.name(), &device));
Status status = RunCollective(test_env_.get(), col_params.get(), device,
&tensors[i], &tensors[i]);
counter.DecrementCount();
EXPECT_TRUE(errors::IsInvalidArgument(status));
});
}
counter.Wait();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/all_to_all.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/all_to_all_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcd78a0f-f497-45e9-bed7-3f0cc8ce3d72 | cpp | tensorflow/tensorflow | input_colocation_exemption_registry | tensorflow/core/common_runtime/input_colocation_exemption_registry.cc | tensorflow/core/common_runtime/input_colocation_exemption_registry_test.cc | #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include <set>
#include <string>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
InputColocationExemptionRegistry* InputColocationExemptionRegistry::Global() {
static InputColocationExemptionRegistry* registry =
new InputColocationExemptionRegistry;
return registry;
}
void InputColocationExemptionRegistry::Register(const string& op) {
auto it = ops_.find(op);
if (it != ops_.end()) {
LOG(WARNING) << "Input colocation exemption for op: " << op
<< " already registered";
} else {
ops_.insert(op);
}
}
} | #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_INPUT_COLOCATION_EXEMPTION("op 1");
REGISTER_INPUT_COLOCATION_EXEMPTION("op 2");
}
TEST(RPCFactoryRegistryTest, TestBasic) {
auto exempt_ops = InputColocationExemptionRegistry::Global()->Get();
EXPECT_EQ(exempt_ops.size(), 2);
EXPECT_NE(exempt_ops.find("op 1"), exempt_ops.end());
EXPECT_NE(exempt_ops.find("op 2"), exempt_ops.end());
EXPECT_EQ(exempt_ops.find("op 3"), exempt_ops.end());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/input_colocation_exemption_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/input_colocation_exemption_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f6a6d64-faaf-48bf-abf2-9998a1cf5baa | cpp | tensorflow/tensorflow | function_utils | tensorflow/core/grappler/optimizers/data/function_utils.cc | tensorflow/core/grappler/optimizers/data/function_utils_test.cc | #include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
namespace function_utils {
FunctionDefTensorDesc::FunctionDefTensorDesc(const string& node_name,
const string& output, int position)
: node_name(node_name), node_output(output), position(position) {
full_str = strings::StrCat(node_name, ":", node_output, ":", position);
}
FunctionDefTensorDesc::FunctionDefTensorDesc(const string& input) {
full_str = input;
StringPiece capture;
StringPiece remaining;
if (strings::Scanner(input)
.One(strings::Scanner::LETTER_DIGIT_DOT_UNDERSCORE)
.Any(strings::Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)
.GetResult(&remaining, &capture)) {
node_name = string(capture.data(), capture.size());
}
if (strings::Scanner(remaining)
.OneLiteral(":")
.RestartCapture()
.One(strings::Scanner::LETTER)
.Any(strings::Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &capture)) {
node_output = string(capture.data(), capture.size());
}
if (strings::Scanner(remaining)
.OneLiteral(":")
.RestartCapture()
.Many(strings::Scanner::DIGIT)
.GetResult(nullptr, &capture)) {
CHECK(strings::safe_strto32(capture, &position));
}
}
void ReplaceReferences(const string& from, const string& to,
FunctionDef* func) {
for (NodeDef& n : *func->mutable_node_def()) {
std::replace(n.mutable_input()->begin(), n.mutable_input()->end(), from,
to);
}
for (auto& p : *func->mutable_ret()) {
if (p.second == from) {
p.second = to;
}
}
}
void AddFunctionOutputWithUniqueName(StringPiece prefix,
StringPiece output_tensor_name,
FunctionDef* fdef, DataType dtype) {
string name = string(prefix);
int id = fdef->signature().output_arg_size();
while (ContainsFunctionOutputWithName(name, *fdef)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
auto* output = fdef->mutable_signature()->mutable_output_arg()->Add();
output->set_name(name);
output->set_type(dtype);
(*fdef->mutable_ret())[name] = string(output_tensor_name);
}
OpDef_ArgDef* AddFunctionInput(const string& name, FunctionDef* fdef,
DataType dtype) {
auto* input_arg = fdef->mutable_signature()->mutable_input_arg()->Add();
input_arg->set_type(dtype);
input_arg->set_name(name);
return input_arg;
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
FunctionDef* fd) {
NodeDef* node = fd->add_node_def();
if (!name.empty()) {
node->set_name(string(name));
} else {
SetUniqueFunctionNodeName(op, fd, node);
}
node->set_op(string(op));
for (const string& input : inputs) {
node->add_input(input);
}
for (const auto& attr : attributes) {
(*node->mutable_attr())[attr.first] = attr.second;
}
return node;
}
bool ContainsFunctionNodeWithName(StringPiece name,
const FunctionDef& function) {
return FindFunctionNodeWithName(name, function) != -1;
}
bool ContainsFunctionNodeWithOp(StringPiece op, const FunctionDef& function) {
return FindFunctionNodeWithOp(op, function) != -1;
}
bool ContainsFunctionOutputWithName(StringPiece name,
const FunctionDef& function) {
return FindFunctionOutputWithName(name, function) != -1;
}
int FindFunctionInputWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const OpDef_ArgDef& arg) { return arg.name() == name; },
function.signature().input_arg());
}
int FindFunctionOutputWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const OpDef_ArgDef& arg) { return arg.name() == name; },
function.signature().output_arg());
}
int FindFunctionNodeWithName(StringPiece name, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&name](const NodeDef& node) { return node.name() == name; },
function.node_def());
}
int FindFunctionNodeWithOp(StringPiece op, const FunctionDef& function) {
return graph_utils::GetFirstElementIndexWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; },
function.node_def());
}
void SetUniqueFunctionNodeName(StringPiece prefix, FunctionDef* function,
NodeDef* node) {
string name = string(prefix);
int id = function->node_def_size();
while (ContainsFunctionNodeWithName(name, *function)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
node->set_name(std::move(name));
}
bool IsFunctionStateful(const FunctionLibraryDefinition& library,
const FunctionDef& function_def, bool skip_assert) {
if (!function_def.signature().is_stateful()) return false;
for (const NodeDef& node_def : function_def.node_def()) {
if (IsNodeStateful(library, node_def, skip_assert)) return true;
}
return false;
}
bool IsNodeStateful(const FunctionLibraryDefinition& library,
const NodeDef& node, bool skip_assert) {
const OpDef* op_def;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!s.ok()) return true;
if (!op_def->is_stateful()) return false;
if (skip_assert && op_def->name() == "Assert") {
return false;
}
if (op_def->name() == "If") {
const FunctionDef* then_func =
library.Find(node.attr().at("then_branch").func().name());
const FunctionDef* else_func =
library.Find(node.attr().at("else_branch").func().name());
if ((then_func != nullptr &&
!IsFunctionStateful(library, *then_func, skip_assert)) &&
(else_func != nullptr &&
!IsFunctionStateful(library, *else_func, skip_assert))) {
return false;
}
}
if (op_def->name() == "While") {
const FunctionDef* cond_func =
library.Find(node.attr().at("cond").func().name());
const FunctionDef* body_func =
library.Find(node.attr().at("body").func().name());
if ((cond_func != nullptr &&
!IsFunctionStateful(library, *cond_func, skip_assert)) &&
(body_func != nullptr &&
!IsFunctionStateful(library, *body_func, skip_assert))) {
return false;
}
}
return true;
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace function_utils {
namespace {
TEST(FunctionDefTensorDesc, Parsing) {
FunctionDefTensorDesc f("Cast:y:0");
EXPECT_EQ(f.full_str, "Cast:y:0");
EXPECT_EQ(f.node_name, "Cast");
EXPECT_EQ(f.node_output, "y");
EXPECT_EQ(f.position, 0);
FunctionDefTensorDesc f2("Arg0");
EXPECT_EQ(f2.full_str, "Arg0");
EXPECT_EQ(f2.node_name, "Arg0");
EXPECT_EQ(f2.node_output, "");
EXPECT_EQ(f2.position, -1);
}
TEST(ReplaceReferencesTest, ReplaceReferencesTest) {
FunctionDef outer = FunctionDefHelper::Create(
"outer", {"arg0: int32"}, {"out: int32", "out2: int64"}, {}, {},
{{"out", "MapDefun:output:0"}, {"out2", "Cast:y:0"}});
NodeDef* derive_node =
AddNode("X", "Some_Op", {"MapDefun:output:0"}, {}, &outer);
ReplaceReferences("MapDefun:output:0", "arg0", &outer);
EXPECT_EQ(outer.ret().at("out"), "arg0");
EXPECT_EQ(derive_node->input(0), "arg0");
}
TEST(FunctionUtilsTest, AddFunctionOutputWithUniqueName) {
FunctionDef function = test::function::XTimesTwo();
AddFunctionOutputWithUniqueName("y", "two", &function, DT_INT64);
EXPECT_TRUE(ContainsFunctionOutputWithName("y/_1", function));
EXPECT_EQ(function.ret().at("y/_1"), "two");
}
TEST(FunctionUtilsTest, AddFunctionInput) {
FunctionDef fdef;
auto arg0 = AddFunctionInput("arg0", &fdef, DT_INT32);
auto arg1 = AddFunctionInput("arg1", &fdef, DT_BOOL);
EXPECT_EQ(fdef.signature().input_arg().data()[0], arg0);
EXPECT_EQ(arg0->name(), "arg0");
EXPECT_EQ(arg0->type(), DT_INT32);
EXPECT_EQ(fdef.signature().input_arg().data()[1], arg1);
EXPECT_EQ(arg1->name(), "arg1");
EXPECT_EQ(arg1->type(), DT_BOOL);
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithName(
"weird_name_that_should_not_be_there", function));
EXPECT_TRUE(ContainsFunctionNodeWithName("two", function));
}
TEST(FunctionUtilsTest, ContainsFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_FALSE(ContainsFunctionNodeWithOp("weird_op_that_should_not_be_there",
function));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Mul", function));
}
TEST(FunctionUtilsTest, ContainsFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_TRUE(ContainsFunctionOutputWithName("y", function));
EXPECT_FALSE(ContainsFunctionOutputWithName("Add:z:0", function));
}
TEST(FunctionUtilsTest, FindFunctionNodeWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithName("weird_name_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithName("two", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionNodeWithOp) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(
FindFunctionNodeWithOp("weird_op_that_should_not_be_there", function),
-1);
EXPECT_NE(FindFunctionNodeWithOp("Mul", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionInputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionInputWithName("x", function), 0);
EXPECT_EQ(FindFunctionInputWithName("not_a_name", function), -1);
}
TEST(FunctionUtilsTest, FindFunctionOutputWithName) {
FunctionDef function = test::function::XTimesTwo();
EXPECT_EQ(FindFunctionOutputWithName("y", function), 0);
EXPECT_EQ(FindFunctionOutputWithName("Add:z:0", function), -1);
}
TEST(FunctionUtilsTest, SetUniqueFunctionNodeName) {
FunctionDef function = test::function::XTimesTwo();
NodeDef node;
SetUniqueFunctionNodeName("abc", &function, &node);
for (const NodeDef& function_node : function.node_def()) {
EXPECT_NE(node.name(), function_node.name());
}
auto* new_node = function.add_node_def();
*new_node = node;
NodeDef other;
SetUniqueFunctionNodeName("abc", &function, &other);
EXPECT_NE(other.name(), new_node->name());
}
TEST(FunctionUtilsTest, AddNodeToFunctionDef) {
FunctionDef func;
const char* op_name = "xxx";
AddNode(op_name, op_name, {}, {}, &func);
const NodeDef& node1 = func.node_def(FindFunctionNodeWithName("xxx", func));
EXPECT_EQ(node1.op(), op_name);
EXPECT_EQ(node1.input_size(), 0);
EXPECT_EQ(node1.attr_size(), 0);
const std::vector<string> inputs({"input1", "input2"});
AddNode("", op_name, inputs, {}, &func);
const NodeDef& node2 =
func.node_def(FindFunctionNodeWithName("xxx/_2", func));
EXPECT_EQ(node2.op(), op_name);
EXPECT_EQ(node2.attr_size(), 0);
EXPECT_EQ(node2.input_size(), inputs.size());
for (size_t i = 0; i < inputs.size(); ++i) {
EXPECT_EQ(node2.input(i), inputs[i]);
}
AttrValue a1, a2;
a1.set_type(DT_INT32);
a2.set_type(DT_INT64);
const std::vector<std::pair<string, AttrValue>> attrs(
{{"attr1", a1}, {"attr2", a2}});
AddNode("", op_name, {}, attrs, &func);
const NodeDef& node3 =
func.node_def(FindFunctionNodeWithName("xxx/_3", func));
EXPECT_EQ(node3.op(), op_name);
EXPECT_EQ(node3.input_size(), 0);
EXPECT_EQ(node3.attr_size(), attrs.size());
for (size_t i = 0; i < attrs.size(); ++i) {
EXPECT_EQ(attrs[i].second.type(), node3.attr().at(attrs[i].first).type());
}
}
constexpr char kCondGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-20" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_19" } }
}
}
library {
function {
signature {
name: "cond_true_3"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
}
node_def { name: "NoOp" op: "NoOp" }
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^NoOp"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "cond_false_4"
input_arg { name: "identity_const" type: DT_BOOL }
output_arg { name: "identity_1" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Assert/Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert/condition"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: false
}
}
}
}
node_def {
name: "Assert/Assert/data_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "Wrong branch!!!"
}
}
}
}
node_def {
name: "Assert/Assert"
op: "Assert"
input: "Assert/Assert/condition:output:0"
input: "Assert/Assert/data_0:output:0"
attr {
key: "T"
value { list { type: DT_STRING } }
}
attr {
key: "summarize"
value { i: 3 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "identity_const"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "Identity:output:0"
input: "^Assert/Assert"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity_1" value: "Identity_1:output:0" }
}
function {
signature {
name: "__inference_test_function_19"
output_arg { name: "identity" type: DT_BOOL }
is_stateful: true
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_BOOL }
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {}
bool_val: true
}
}
}
}
node_def {
name: "cond"
op: "If"
input: "Const:output:0"
input: "Const:output:0"
attr {
key: "Tcond"
value { type: DT_BOOL }
}
attr {
key: "Tin"
value { list { type: DT_BOOL } }
}
attr {
key: "Tout"
value { list { type: DT_BOOL } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "else_branch"
value { func { name: "cond_false_4" } }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "then_branch"
value { func { name: "cond_true_3" } }
}
}
node_def {
name: "cond/Identity"
op: "Identity"
input: "cond:output:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "cond/Identity:output:0"
input: "^cond"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
constexpr char kWhileGraphProto[] = R"proto(
node {
name: "StatefulPartitionedCall"
op: "StatefulPartitionedCall"
attr {
key: "Tin"
value { list {} }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
attr {
key: "_gradient_op_type"
value { s: "PartitionedCall-35" }
}
attr {
key: "config"
value { s: "" }
}
attr {
key: "config_proto"
value { s: "" }
}
attr {
key: "executor_type"
value { s: "" }
}
attr {
key: "f"
value { func { name: "__inference_test_function_34" } }
}
}
library {
function {
signature {
name: "while_body_5"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_INT32 }
output_arg { name: "identity_1" type: DT_INT32 }
output_arg { name: "identity_2" type: DT_INT32 }
}
node_def {
name: "add/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add"
op: "Add"
input: "const"
input: "add/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "add_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "add_1"
op: "Add"
input: "while_loop_counter"
input: "add_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "add_1:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_1"
op: "Identity"
input: "add:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity_2"
op: "Identity"
input: "maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
ret { key: "identity_1" value: "Identity_1:output:0" }
ret { key: "identity_2" value: "Identity_2:output:0" }
}
function {
signature {
name: "__inference_test_function_34"
output_arg { name: "identity" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "maximum_iterations"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
node_def {
name: "Const"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while/loop_counter"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 0
}
}
}
}
node_def {
name: "while"
op: "While"
input: "while/loop_counter:output:0"
input: "Const:output:0"
input: "maximum_iterations:output:0"
attr {
key: "T"
value { list { type: DT_INT32 type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "_lower_using_switch_merge"
value { b: true }
}
attr {
key: "body"
value { func { name: "while_body_5" } }
}
attr {
key: "cond"
value { func { name: "while_cond_4" } }
}
attr {
key: "output_shapes"
value {
list {
shape {}
shape {}
shape {}
}
}
}
}
node_def {
name: "while/Identity"
op: "Identity"
input: "while:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_1"
op: "Identity"
input: "while:output:1"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "while/Identity_2"
op: "Identity"
input: "while:output:2"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "while/Identity_1:output:0"
input: "^while"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
function {
signature {
name: "while_cond_4"
input_arg { name: "while_loop_counter" type: DT_INT32 }
input_arg { name: "const" type: DT_INT32 }
input_arg { name: "less_maximum_iterations" type: DT_INT32 }
output_arg { name: "identity" type: DT_BOOL }
}
node_def {
name: "Less"
op: "Less"
input: "while_loop_counter"
input: "less_maximum_iterations"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "Less_1/y"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 3
}
}
}
}
node_def {
name: "Less_1"
op: "Less"
input: "const"
input: "Less_1/y:output:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "LogicalAnd"
op: "LogicalAnd"
input: "Less:z:0"
input: "Less_1:z:0"
}
node_def {
name: "Identity"
op: "Identity"
input: "LogicalAnd:z:0"
attr {
key: "T"
value { type: DT_BOOL }
}
}
ret { key: "identity" value: "Identity:output:0" }
}
}
versions { producer: 27 min_consumer: 12 })proto";
TEST(FunctionUtilsTest, IsFunctionStateful) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* nodeA = graph_utils::AddNode("", "A", {}, {}, &graph);
FunctionDef* function = graph_def.mutable_library()->add_function();
*function = test::function::XTimesTwo();
FunctionLibraryDefinition lib_def(OpRegistry::Global(),
*graph_def.mutable_library());
EXPECT_FALSE(IsFunctionStateful(lib_def, *function));
EXPECT_TRUE(IsNodeStateful(lib_def, *nodeA));
GraphDef graph_def_cond;
protobuf::TextFormat::ParseFromString(kCondGraphProto, &graph_def_cond);
FunctionLibraryDefinition cond_lib(OpRegistry::Global(),
graph_def_cond.library());
const FunctionDef* no_op_fnc = cond_lib.Find("cond_true_3");
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *no_op_fnc, true));
const FunctionDef* assert_func = cond_lib.Find("cond_false_4");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *assert_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *assert_func, true));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Const", *assert_func));
EXPECT_TRUE(ContainsFunctionNodeWithOp("Assert", *assert_func));
for (auto node : assert_func->node_def()) {
if (node.op() == "Const") {
EXPECT_FALSE(IsNodeStateful(lib_def, node));
}
if (node.op() == "Assert") {
EXPECT_TRUE(IsNodeStateful(lib_def, node));
EXPECT_FALSE(IsNodeStateful(lib_def, node, true));
}
}
const FunctionDef* cond_func = cond_lib.Find("__inference_test_function_19");
EXPECT_TRUE(IsFunctionStateful(cond_lib, *cond_func));
EXPECT_FALSE(IsFunctionStateful(cond_lib, *cond_func, true));
GraphDef graph_def_while;
protobuf::TextFormat::ParseFromString(kWhileGraphProto, &graph_def_while);
FunctionLibraryDefinition while_lib(OpRegistry::Global(),
graph_def_while.library());
const FunctionDef* while_function =
while_lib.Find("__inference_test_function_34");
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function));
EXPECT_FALSE(IsFunctionStateful(while_lib, *while_function, true));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/function_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/function_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91af592b-fba7-4979-b8ef-f4946b94eef3 | cpp | tensorflow/tensorflow | threadpool_device | tensorflow/core/common_runtime/threadpool_device.cc | tensorflow/core/common_runtime/threadpool_device_test.cc | #if defined(ENABLE_ONEDNN_OPENMP) && defined(ENABLE_MKL) && defined(_OPENMP)
#ifndef DNNL_AARCH64_USE_ACL
#include "external/llvm_openmp/include/omp.h"
#define EIGEN_DONT_PARALLELIZE
#else
#include "omp.h"
#endif
#endif
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/allocator_registry.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/port.h"
#include "tensorflow/core/util/util.h"
#ifdef INTEL_MKL
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
#include "tensorflow/core/platform/cpu_info.h"
#endif
namespace tensorflow {
ThreadPoolDevice::ThreadPoolDevice(const SessionOptions& options,
const string& name, Bytes memory_limit,
const DeviceLocality& locality,
Allocator* allocator)
: LocalDevice(options, Device::BuildDeviceAttributes(
name, DEVICE_CPU, memory_limit, locality)),
allocator_(allocator),
scoped_allocator_mgr_(new ScopedAllocatorMgr(name)) {
auto s = NodeFileWriter::GetNodeFileWriterIfEnabled(name, env());
if (!s.ok()) {
LOG(ERROR) << s.status();
} else {
node_file_writer_ = *s;
if (node_file_writer_) {
LOG(INFO) << "Writing NodeDefs to file: "
<< node_file_writer_->filename();
}
}
#if defined(ENABLE_ONEDNN_OPENMP) && defined(INTEL_MKL)
if (!IsMKLEnabled()) return;
#ifdef _OPENMP
const char* user_omp_threads = getenv("OMP_NUM_THREADS");
static absl::once_flag num_threads_setting_flag;
if (user_omp_threads == nullptr) {
const int mkl_intra_op = port::NumSchedulableCPUs();
const int ht = port::NumHyperthreadsPerCore();
absl::call_once(num_threads_setting_flag, omp_set_num_threads,
(mkl_intra_op + ht - 1) / ht);
}
#ifndef DNNL_AARCH64_USE_ACL
const char* user_kmp_blocktime = getenv("KMP_BLOCKTIME");
static absl::once_flag blocktime_setting_flag;
if (user_kmp_blocktime == nullptr) {
absl::call_once(blocktime_setting_flag, kmp_set_blocktime, 1);
}
#endif
#endif
#endif
}
ThreadPoolDevice::~ThreadPoolDevice() {}
Allocator* ThreadPoolDevice::GetAllocator(AllocatorAttributes attr) {
return allocator_;
}
Allocator* ThreadPoolDevice::GetScopedAllocator(AllocatorAttributes attr,
int64_t step_id) {
if (attr.scope_id > 0) {
return scoped_allocator_mgr_->GetContainer(step_id)->GetInstance(
attr.scope_id);
}
LOG(FATAL) << "Unexpected call to ThreadPoolDevice::GetScopedAllocator "
<< "attr.scope_id = " << attr.scope_id;
return allocator_;
}
Status ThreadPoolDevice::MakeTensorFromProto(
const TensorProto& tensor_proto, const AllocatorAttributes alloc_attrs,
Tensor* tensor) {
if (tensor_proto.dtype() > 0 && tensor_proto.dtype() <= DataType_MAX) {
Tensor parsed(tensor_proto.dtype());
if (parsed.FromProto(allocator_, tensor_proto)) {
*tensor = std::move(parsed);
return absl::OkStatus();
}
}
return errors::InvalidArgument("Cannot parse tensor from proto: ",
tensor_proto.DebugString());
}
void ThreadPoolDevice::CopyTensorInSameDevice(
const Tensor* input_tensor, Tensor* output_tensor,
const DeviceContext* device_context, StatusCallback done) {
if (input_tensor->NumElements() != output_tensor->NumElements()) {
done(errors::Internal(
"CPU->CPU copy shape mismatch: input=", input_tensor->shape(),
", output=", output_tensor->shape()));
return;
}
tensor::DeepCopy(*input_tensor, output_tensor);
done(absl::OkStatus());
}
namespace {
const absl::flat_hash_set<std::string>* GetOpsToLogFromEnv() {
auto* result = new absl::flat_hash_set<std::string>;
const char* env = getenv("TF_CPU_DEBUG_OPS_TO_LOG");
if (!env) {
return result;
}
std::vector<absl::string_view> ops = absl::StrSplit(env, ',');
LOG(INFO) << "Will log inputs & outputs from the following ops: ";
for (absl::string_view op : ops) {
result->insert(std::string(op));
LOG(INFO) << " |" << op << "|";
}
return result;
}
bool ShouldLogInputsAndOutputs(OpKernel* op_kernel) {
static const absl::flat_hash_set<std::string>& ops_to_log =
*GetOpsToLogFromEnv();
static const bool is_empty = ops_to_log.empty();
if (is_empty) {
return false;
}
return ops_to_log.count(op_kernel->type_string());
}
}
void ThreadPoolDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
}
op_kernel->Compute(context);
if (context->status().ok() && node_file_writer_) {
Status s = node_file_writer_->RecordNodeExecution(op_kernel, context);
if (!s.ok()) {
LOG(ERROR) << s;
context->SetStatus(s);
}
}
if (should_log_inputs_and_outputs) {
LogOutputs(op_kernel, context);
}
}
void ThreadPoolDevice::ComputeAsync(AsyncOpKernel* op_kernel,
OpKernelContext* context,
AsyncOpKernel::DoneCallback done) {
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
AsyncOpKernel::DoneCallback parent_done = done;
done = [this, parent_done, op_kernel, context]() {
LogOutputs(op_kernel, context);
parent_done();
};
}
op_kernel->ComputeAsync(context, done);
}
void ThreadPoolDevice::LogInputs(OpKernel* op_kernel,
OpKernelContext* context) {
LOG(INFO) << "Inputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_inputs(); i++) {
if (!context->has_input(i)) {
LOG(INFO) << "input # " << i << " is absent";
continue;
}
LOG(INFO) << "input # " << i;
LOG(INFO) << context->input(i).DebugString(-1);
}
LOG(INFO) << "";
}
void ThreadPoolDevice::LogOutputs(OpKernel* op_kernel,
OpKernelContext* context) {
if (!context->status().ok()) {
LOG(INFO) << op_kernel->name()
<< " failed: " << context->status().message();
return;
}
LOG(INFO) << "Outputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_outputs(); i++) {
Tensor* output = context->mutable_output(i);
if (output == nullptr) {
LOG(INFO) << "output # " << i << " is null";
} else {
LOG(INFO) << "output # " << i;
LOG(INFO) << output->DebugString(-1);
}
}
LOG(INFO) << "";
}
#ifdef INTEL_MKL
namespace {
class MklCPUAllocatorFactory : public AllocatorFactory {
public:
bool NumaEnabled() override { return false; }
Allocator* CreateAllocator() override { return new MklCPUAllocator; }
virtual SubAllocator* CreateSubAllocator(int numa_node) {
return new MklSubAllocator;
}
};
REGISTER_MEM_ALLOCATOR("MklCPUAllocator",
((IsMKLEnabled() || IsZenDnnEnabled()) ? 200 : 50),
MklCPUAllocatorFactory);
}
#endif
} | #include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const int kDimSize = 2;
void InitTensor(Tensor* tensor, float value) {
auto eigen_tensor = tensor->tensor<float, kDimSize>();
for (int i = 0; i < kDimSize; ++i) {
for (int j = 0; j < kDimSize; ++j) {
eigen_tensor(i, j) = value;
}
}
}
bool Equal(const Tensor& tensor1, const Tensor& tensor2) {
auto eigen_tensor1 = tensor1.tensor<float, kDimSize>();
auto eigen_tensor2 = tensor2.tensor<float, kDimSize>();
for (int i = 0; i < kDimSize; ++i) {
for (int j = 0; j < kDimSize; ++j) {
if (eigen_tensor1(i, j) != eigen_tensor2(i, j)) {
return false;
}
}
}
return true;
}
TEST(ThreadPoolDeviceTest, CopyTensor) {
Tensor input(DT_FLOAT, TensorShape({kDimSize, kDimSize}));
Tensor output(DT_FLOAT, TensorShape({kDimSize, kDimSize}));
InitTensor(&input, 1);
InitTensor(&output, 0);
ASSERT_FALSE(Equal(input, output));
ThreadPoolDevice device(SessionOptions(), "/device:CPU:0", Bytes(256),
DeviceLocality(), cpu_allocator());
DeviceContext* device_context = new DeviceContext;
Notification note;
device.CopyTensorInSameDevice(&input, &output, device_context,
[¬e](const Status& s) {
TF_ASSERT_OK(s);
note.Notify();
});
note.WaitForNotification();
ASSERT_TRUE(Equal(input, output));
device_context->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/threadpool_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/threadpool_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5023f7b8-1c2b-4f32-87ff-2a0af9213f23 | cpp | tensorflow/tensorflow | collective_executor_mgr | tensorflow/core/common_runtime/collective_executor_mgr.cc | tensorflow/core/common_runtime/collective_executor_mgr_test.cc | #include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/build_graph_options.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
CollectiveExecutorMgr::CollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* dev_mgr,
std::unique_ptr<DeviceResolverInterface> dev_resolver,
std::unique_ptr<ParamResolverInterface> param_resolver,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator)
: dev_mgr_(dev_mgr),
dev_resolver_(std::move(dev_resolver)),
param_resolver_(std::move(param_resolver)),
gpu_ring_order_(
config.gpu_options().experimental().collective_ring_order()),
nccl_communicator_(std::move(nccl_communicator)),
work_queue_(std::make_shared<UnboundedWorkQueue>(Env::Default(),
"collective_ops")) {}
CollectiveExecutorMgr::~CollectiveExecutorMgr() {
for (auto iter : executor_table_) {
iter.second->Unref();
}
}
CollectiveExecutor* CollectiveExecutorMgr::FindOrCreate(int64_t step_id) {
CollectiveExecutor* ce = nullptr;
{
mutex_lock l(exec_mu_);
auto it = executor_table_.find(step_id);
if (it != executor_table_.end()) {
ce = it->second;
} else {
ce = Create(step_id);
executor_table_[step_id] = ce;
}
ce->Ref();
}
return ce;
}
CollectiveExecutor* CollectiveExecutorMgr::Create(int64_t step_id) {
CollectiveRemoteAccessLocal* rma =
new CollectiveRemoteAccessLocal(dev_mgr_, dev_resolver_.get(), step_id);
return new BaseCollectiveExecutor(this, rma, step_id, dev_mgr_, work_queue_);
}
void CollectiveExecutorMgr::Cleanup(int64_t step_id) {
CollectiveExecutor* ce = nullptr;
{
mutex_lock l(exec_mu_);
auto it = executor_table_.find(step_id);
if (it != executor_table_.end()) {
ce = it->second;
executor_table_.erase(it);
}
}
if (ce) ce->Unref();
}
void CollectiveExecutorMgr::CleanupAll() {
gtl::FlatMap<int64_t, CollectiveExecutor*> executor_table;
{
mutex_lock l(exec_mu_);
std::swap(executor_table, executor_table_);
}
for (auto iter : executor_table) {
iter.second->Unref();
}
}
void CollectiveExecutorMgr::GetStepSequenceAsync(
const GetStepSequenceRequest* request, GetStepSequenceResponse* response,
const StatusCallback& done) {
done(errors::Internal(
"CollectiveExecutorMgr does not implement GetStepSequence."));
}
void CollectiveExecutorMgr::RefreshStepIdSequenceAsync(
int64_t graph_key, const StatusCallback& done) {
done(errors::Internal(
"CollectiveExecutorMgr does not implement RefreshStepIdSequence."));
}
std::unique_ptr<CollectiveExecutorMgr> CreateProdLocalCollectiveExecutorMgr(
const ConfigProto& config, const DeviceMgr* device_mgr,
std::unique_ptr<NcclCommunicatorInterface> nccl_communicator) {
auto device_resolver = std::make_unique<DeviceResolverLocal>(device_mgr);
auto param_resolver = std::make_unique<CollectiveParamResolverLocal>(
config, device_mgr, device_resolver.get(), nccl_communicator.get(),
"/job:localhost/replica:0/task:0");
return std::make_unique<CollectiveExecutorMgr>(
config, device_mgr, std::move(device_resolver), std::move(param_resolver),
std::move(nccl_communicator));
}
} | #include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
#define NUM_DEVS 3
TEST(MaybeCreateNcclCommunicatorm, ZeroGpus) {
ConfigProto cp;
(*cp.mutable_device_count())["GPU"] = 0;
EXPECT_EQ(nullptr, MaybeCreateNcclCommunicator(cp));
}
class CollectiveExecutorMgrTest : public ::testing::Test {
protected:
CollectiveExecutorMgrTest() {
ConfigProto cp;
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
string task_name = "/job:localhost/replica:0/task:0";
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, task_name, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
std::unique_ptr<DeviceResolverInterface> drl(
new DeviceResolverLocal(device_mgr_.get()));
std::unique_ptr<ParamResolverInterface> prl(
new CollectiveParamResolverLocal(cp, device_mgr_.get(), drl.get(),
nullptr,
task_name));
cme_.reset(new CollectiveExecutorMgr(cp, device_mgr_.get(), std::move(drl),
std::move(prl),
MaybeCreateNcclCommunicator(cp)));
}
std::unique_ptr<CollectiveExecutorMgr> cme_;
std::unique_ptr<DeviceMgr> device_mgr_;
};
TEST_F(CollectiveExecutorMgrTest, FindOrCreate) {
CollectiveExecutor::Handle* h =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_TRUE(h->get());
CollectiveExecutor::Handle* h2 =
new CollectiveExecutor::Handle(cme_->FindOrCreate(1), true);
EXPECT_EQ(h->get(), h2->get());
CollectiveExecutor* ce = h->get();
delete h;
delete h2;
CollectiveExecutor::Handle h3(cme_->FindOrCreate(1), true);
EXPECT_EQ(ce, h3.get());
cme_->Cleanup(1);
}
TEST_F(CollectiveExecutorMgrTest, StepSequenceRelated) {
EXPECT_EQ(CollectiveExecutor::kInvalidId, cme_->NextStepId(123));
Notification ss_note;
Status ss_status;
cme_->RefreshStepIdSequenceAsync(123,
[&ss_status, &ss_note](const Status& s) {
ss_status = s;
ss_note.Notify();
});
ss_note.WaitForNotification();
EXPECT_FALSE(ss_status.ok());
EXPECT_EQ(ss_status.message(),
"CollectiveExecutorMgr does not implement RefreshStepIdSequence.");
Notification gs_note;
Status gs_status;
GetStepSequenceRequest* req = nullptr;
GetStepSequenceResponse* resp = nullptr;
cme_->GetStepSequenceAsync(req, resp,
[&gs_status, &gs_note](const Status& s) {
gs_status = s;
gs_note.Notify();
});
gs_note.WaitForNotification();
EXPECT_FALSE(gs_status.ok());
EXPECT_EQ(gs_status.message(),
"CollectiveExecutorMgr does not implement GetStepSequence.");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_executor_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_executor_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23a4b866-72e7-4211-9fcd-13bf74a06165 | cpp | tensorflow/tensorflow | optimize_cross_host_control_deps | tensorflow/core/common_runtime/optimize_cross_host_control_deps.cc | tensorflow/core/common_runtime/optimize_cross_host_control_deps_test.cc | #include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
Status BuildNoopNode(const Node& source, StringPiece name, const string& device,
Graph* graph, Node** node) {
NodeDefBuilder builder(name, "NoOp", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
Status BuildIdentityNNode(const Node& source, StringPiece name,
const string& device, Graph* graph,
std::vector<NodeDefBuilder::NodeOut>& inputs,
Node** node) {
NodeDefBuilder builder(name, "IdentityN", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
builder.Input(inputs);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
Status BuildIdentityNode(const Node& source, StringPiece name,
const string& device, Graph* graph,
std::vector<NodeDefBuilder::NodeOut>& inputs,
Node** node) {
NodeDefBuilder builder(name, "Identity", NodeDebugInfo(source));
if (!device.empty()) {
builder.Device(device);
}
builder.Input(inputs[0]);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(*node, graph->AddNode(def));
if (!device.empty()) {
(*node)->set_assigned_device_name(device);
}
return absl::OkStatus();
}
const string& RequestedOrAssignedDevice(const Node* n) {
if (!n->assigned_device_name().empty()) {
return n->assigned_device_name();
}
return n->requested_device();
}
class DeviceLookup {
public:
DeviceLookup() = default;
static absl::StatusOr<DeviceLookup> FromGraph(Graph* graph) {
DeviceLookup lookup;
for (Node* n : graph->op_nodes()) {
string device;
TF_RETURN_IF_ERROR(DeviceNameUtils::DeviceNameToCpuDeviceName(
RequestedOrAssignedDevice(n), &device));
auto iter = lookup.device_name_to_id_.find(device);
int id;
if (iter == lookup.device_name_to_id_.end()) {
id = lookup.device_name_to_id_.size();
lookup.device_name_to_id_[device] = id;
lookup.device_id_to_name_[id] = device;
} else {
id = iter->second;
}
lookup.node_to_device_id_[n] = id;
}
for (auto& [device1, id1] : lookup.device_name_to_id_) {
for (auto& [device2, id2] : lookup.device_name_to_id_) {
bool b = DeviceNameUtils::IsSameAddressSpace(device1, device2);
lookup.is_same_address_space_[std::make_pair(id1, id2)] = b;
}
}
return lookup;
}
inline int NodeToDeviceId(const Node* node) {
return node_to_device_id_[node];
}
inline string DeviceIdToName(int id) { return device_id_to_name_[id]; }
inline bool IsSameAddressSpace(int id1, int id2) {
return is_same_address_space_[std::make_pair(id1, id2)];
}
private:
absl::flat_hash_map<int, string> device_id_to_name_;
absl::flat_hash_map<string, int> device_name_to_id_;
absl::flat_hash_map<const Node*, int> node_to_device_id_;
absl::flat_hash_map<std::pair<int, int>, bool> is_same_address_space_;
};
}
Status OptimizeCrossHostControlOutputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
for (Node* n : graph->op_nodes()) {
if (n->out_edges().size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_control_edges;
int src_device_id = lookup.NodeToDeviceId(n);
for (const Edge* edge : n->out_edges()) {
if (!edge->IsControlEdge() || edge->dst()->IsSink()) {
continue;
}
int dst_device_id = lookup.NodeToDeviceId(edge->dst());
if (lookup.IsSameAddressSpace(src_device_id, dst_device_id)) {
continue;
}
auto iter = cross_host_control_edges.find(dst_device_id);
if (iter == cross_host_control_edges.end()) {
cross_host_control_edges[dst_device_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_control_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
string device = lookup.DeviceIdToName(pair.first);
VLOG(1) << "Optmize cross host output control edge, src node: "
<< n->name()
<< " src device: " << lookup.DeviceIdToName(src_device_id)
<< " dst host device: " << device
<< " edges size: " << pair.second.size();
Node* control_after;
TF_RETURN_IF_ERROR(BuildNoopNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "control_after")),
device, graph, &control_after));
graph->AddControlEdge(n, control_after, true);
for (const Edge* edge : pair.second) {
graph->AddControlEdge(control_after, edge->dst(),
true);
graph->RemoveEdge(edge);
}
}
}
return absl::OkStatus();
}
Status OptimizeCrossHostDataOutputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
for (Node* n : graph->op_nodes()) {
if (n->out_edges().size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_edges;
int src_id = lookup.NodeToDeviceId(n);
for (const Edge* edge : n->out_edges()) {
Node* dst = edge->dst();
if (edge->IsControlEdge() || dst->IsSink()) {
continue;
}
int dst_id = lookup.NodeToDeviceId(dst);
if (lookup.IsSameAddressSpace(src_id, dst_id)) {
continue;
}
auto iter = cross_host_edges.find(dst_id);
if (iter == cross_host_edges.end()) {
cross_host_edges[dst_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
if (pair.second.empty()) {
continue;
}
int device_id = pair.first;
Node* node0 = pair.second[0]->dst();
if (std::all_of(pair.second.begin(), pair.second.end(),
[node0](const Edge* e) { return e->dst() == node0; })) {
continue;
}
string device = lookup.DeviceIdToName(device_id);
VLOG(1) << "Optimize cross host output edge, src node: " << n->name()
<< " src device: " << lookup.DeviceIdToName(src_id)
<< " dst host device: " << device
<< " edges size: " << pair.second.size();
Node* data_after;
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.reserve(pair.second.size());
const Edge* edge0 = pair.second[0];
if (std::all_of(pair.second.begin(), pair.second.end(),
[edge0](const Edge* e) {
return e->src() == edge0->src() &&
e->src_output() == edge0->src_output();
})) {
inputs.emplace_back(edge0->src()->name(), edge0->src_output(),
edge0->src()->output_type(edge0->src_output()));
TF_RETURN_IF_ERROR(BuildIdentityNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "data_after")),
device, graph, inputs, &data_after));
graph->AddEdge(edge0->src(), edge0->src_output(), data_after, 0);
int i = 0;
for (const Edge* edge : pair.second) {
graph->AddEdge(data_after, 0, edge->dst(), edge->dst_input());
graph->RemoveEdge(edge);
i++;
}
} else {
for (const Edge* edge : pair.second) {
inputs.emplace_back(edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output()));
}
TF_RETURN_IF_ERROR(BuildIdentityNNode(
*n, graph->NewName(strings::StrCat(n->name(), "/", "data_after")),
device, graph, inputs, &data_after));
int i = 0;
for (const Edge* edge : pair.second) {
graph->AddEdge(data_after, i, edge->dst(), edge->dst_input());
graph->AddEdge(edge->src(), edge->src_output(), data_after, i);
graph->RemoveEdge(edge);
i++;
}
}
}
}
return absl::OkStatus();
}
Status OptimizeCrossHostControlInputEdges(Graph* graph,
int cross_host_edges_threshold) {
TF_ASSIGN_OR_RETURN(DeviceLookup lookup, DeviceLookup::FromGraph(graph));
absl::flat_hash_map<Node*, std::vector<const Edge*>> node_control_input_edges;
for (Node* n : graph->op_nodes()) {
for (const Edge* edge : n->out_edges()) {
if (!edge->IsControlEdge() || edge->dst()->IsSink()) {
continue;
}
Node* dst = edge->dst();
auto iter = node_control_input_edges.find(dst);
if (iter == node_control_input_edges.end()) {
node_control_input_edges[dst] = {edge};
} else {
node_control_input_edges[dst].push_back(edge);
}
}
}
for (auto& pair : node_control_input_edges) {
Node* dst = pair.first;
const std::vector<const Edge*>& input_edges = pair.second;
if (input_edges.size() < cross_host_edges_threshold) {
continue;
}
absl::flat_hash_map<int, std::vector<const Edge*>> cross_host_control_edges;
int dst_device_id = lookup.NodeToDeviceId(dst);
for (const Edge* edge : input_edges) {
int src_device_id = lookup.NodeToDeviceId(edge->src());
if (lookup.IsSameAddressSpace(src_device_id, dst_device_id)) {
continue;
}
auto iter = cross_host_control_edges.find(src_device_id);
if (iter == cross_host_control_edges.end()) {
cross_host_control_edges[src_device_id] = {edge};
} else {
iter->second.push_back(edge);
}
}
for (const auto& pair : cross_host_control_edges) {
if (pair.second.size() < cross_host_edges_threshold) {
continue;
}
string src_device = lookup.DeviceIdToName(pair.first);
VLOG(1) << "Optmize cross host input control edge, dst node: "
<< dst->name()
<< " dst device: " << lookup.DeviceIdToName(dst_device_id)
<< " src host device: " << src_device
<< " edges size: " << pair.second.size();
Node* control_before;
TF_RETURN_IF_ERROR(BuildNoopNode(
*dst,
graph->NewName(strings::StrCat(dst->name(), "/", "control_before")),
src_device, graph, &control_before));
graph->AddControlEdge(control_before, dst, true);
for (const Edge* edge : pair.second) {
graph->AddControlEdge(edge->src(), control_before,
true);
graph->RemoveEdge(edge);
}
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Node* GetNodeByName(const string& name, Graph* graph) {
for (Node* node : graph->op_nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostControlOutputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto a = ops::Const(scope.WithOpName("a"), 1.0f);
a.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Const(scope.WithOpName("b").WithControlDependencies(a), 2.0f);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
auto c = ops::Const(scope.WithOpName("c").WithControlDependencies(a), 3.0f);
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:1");
auto d = ops::Const(scope.WithOpName("d").WithControlDependencies(a), 4.0f);
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:2");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 5);
Node* control_after = GetNodeByName("a/control_after/_0", &graph);
ASSERT_NE(control_after, nullptr);
EXPECT_EQ(control_after->op_def().name(), "NoOp");
EXPECT_EQ(control_after->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostDataOutputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto c1 = ops::Const(scope.WithOpName("c1"), 1.0f);
auto c2 = ops::Const(scope.WithOpName("c2"), 2.0f);
auto a = ops::IdentityN(scope.WithOpName("a"), {c1, c2});
a.operation.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Identity(scope.WithOpName("b"), a[0]);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
auto c = ops::Identity(scope.WithOpName("c"), a[1]);
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:1");
auto d = ops::Identity(scope.WithOpName("d"), a[0]);
d.node()->set_assigned_device_name("/job:worker/task:2/CPU:0");
auto e = ops::Identity(scope.WithOpName("e"), a[1]);
e.node()->set_assigned_device_name("/job:worker/task:2/CPU:1");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 9);
Node* data_after1 = GetNodeByName("a/data_after/_0", &graph);
Node* data_after2 = GetNodeByName("a/data_after/_1", &graph);
if (data_after1->assigned_device_name() ==
"/job:worker/task:2/device:CPU:0") {
std::swap(data_after1, data_after2);
}
ASSERT_NE(data_after1, nullptr);
EXPECT_EQ(data_after1->op_def().name(), "IdentityN");
EXPECT_EQ(data_after1->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
EXPECT_EQ(data_after1->def().input_size(), 2);
EXPECT_EQ(data_after1->def().input(0), "a");
EXPECT_EQ(data_after1->def().input(1), "a:1");
EXPECT_EQ(data_after1->op_def().name(), "IdentityN");
ASSERT_NE(data_after2, nullptr);
EXPECT_EQ(data_after2->op_def().name(), "IdentityN");
EXPECT_EQ(data_after2->assigned_device_name(),
"/job:worker/task:2/device:CPU:0");
EXPECT_EQ(data_after2->def().input_size(), 2);
EXPECT_EQ(data_after2->def().input(0), "a");
EXPECT_EQ(data_after2->def().input(1), "a:1");
EXPECT_EQ(data_after2->op_def().name(), "IdentityN");
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
std::unordered_map<string, const NodeDef*> map;
for (auto& node : graph_def.node()) {
map[node.name()] = &node;
}
EXPECT_EQ(map["b"]->input(0), data_after1->name());
EXPECT_EQ(map["c"]->input(0), data_after1->name() + ":1");
EXPECT_EQ(map["d"]->input(0), data_after2->name());
EXPECT_EQ(map["e"]->input(0), data_after2->name() + ":1");
}
TEST(OptimizeCrossHostControlDepsTest,
CreatesIdentityNodesWhenInputsIdentical) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto c1 = ops::Const(scope.WithOpName("c1"), 1.0f);
auto c2 = ops::Const(scope.WithOpName("c2"), 2.0f);
auto a = ops::IdentityN(scope.WithOpName("a"), {c1, c2});
a.operation.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Identity(scope.WithOpName("b"), a[0]);
auto c = ops::Identity(scope.WithOpName("c"), a[0]);
auto d = ops::Identity(scope.WithOpName("d"), a[0]);
auto e = ops::Identity(scope.WithOpName("e"), a[0]);
b.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
c.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
e.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 7);
TF_ASSERT_OK(OptimizeCrossHostDataOutputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 8);
Node* data_after = GetNodeByName("a/data_after/_0", &graph);
ASSERT_NE(data_after, nullptr);
EXPECT_EQ(data_after->op_def().name(), "Identity");
EXPECT_EQ(data_after->assigned_device_name(),
"/job:worker/task:1/device:CPU:0");
EXPECT_EQ(data_after->def().input_size(), 1);
EXPECT_EQ(data_after->def().input(0)[0], 'a');
EXPECT_EQ(data_after->op_def().name(), "Identity");
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
std::unordered_map<string, const NodeDef*> map;
for (auto& node : graph_def.node()) {
map[node.name()] = &node;
}
EXPECT_EQ(map["b"]->input(0), data_after->name());
EXPECT_EQ(map["c"]->input(0), data_after->name());
EXPECT_EQ(map["d"]->input(0), data_after->name());
EXPECT_EQ(map["e"]->input(0), data_after->name());
}
TEST(OptimizeCrossHostControlDepsTest, OptimizeCrossHostControlInputEdges) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto a = ops::Const(scope.WithOpName("a"), 1.0f);
a.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
auto b = ops::Const(scope.WithOpName("b"), 2.0f);
b.node()->set_assigned_device_name("/job:worker/task:0/CPU:1");
auto c = ops::Const(scope.WithOpName("c"), 1.0f);
c.node()->set_assigned_device_name("/job:worker/task:0/CPU:2");
auto d = ops::Const(
scope.WithOpName("d").WithControlDependencies({a.op(), b.op(), c.op()}),
4.0f);
d.node()->set_assigned_device_name("/job:worker/task:1/CPU:0");
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, 10));
ASSERT_EQ(graph.num_op_nodes(), 4);
TF_ASSERT_OK(OptimizeCrossHostControlInputEdges(
&graph, 2));
ASSERT_EQ(graph.num_op_nodes(), 5);
Node* control_before = GetNodeByName("d/control_before/_0", &graph);
ASSERT_NE(control_before, nullptr);
EXPECT_EQ(control_before->op_def().name(), "NoOp");
EXPECT_EQ(control_before->assigned_device_name(),
"/job:worker/task:0/device:CPU:0");
}
TEST(OptimizeCrossHostControlDepsTest, LargeGraph) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
constexpr int size = 750;
std::vector<Operation> layer1;
for (int i = 0; i < size; ++i) {
auto n = ops::Const(scope, 1.0f);
n.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
layer1.push_back(n.op());
}
for (int j = 0; j < size; ++j) {
auto d = ops::Const(scope.WithControlDependencies(layer1), 1.0f);
d.node()->set_assigned_device_name("/job:worker/task:0/CPU:0");
}
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
ASSERT_EQ(graph.num_op_nodes(), size * 2);
TF_ASSERT_OK(OptimizeCrossHostControlInputEdges(
&graph, size));
TF_ASSERT_OK(OptimizeCrossHostControlOutputEdges(
&graph, size));
ASSERT_EQ(graph.num_op_nodes(), size * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_cross_host_control_deps.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_cross_host_control_deps_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f01b2e57-26fb-44db-948b-b0d918a37457 | cpp | tensorflow/tensorflow | eval_const_tensor | tensorflow/core/common_runtime/eval_const_tensor.cc | tensorflow/core/common_runtime/eval_const_tensor_test.cc | #include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
using ::tensorflow::shape_inference::InferenceContext;
bool IsRank(const Node& n) { return n.type_string() == "Rank"; }
bool IsSize(const Node& n) { return n.type_string() == "Size"; }
bool IsShape(const Node& n) { return n.type_string() == "Shape"; }
bool IsStridedSlice(const Node& n) { return n.type_string() == "StridedSlice"; }
bool IsPlaceholderWithDefault(const Node& n) {
return n.type_string() == "PlaceholderWithDefault";
}
bool IsUnstack(const Node& n) { return n.type_string() == "Unpack"; }
bool HasIntAttr(const Node& n, absl::string_view name, int64_t expected) {
int64_t actual;
return TryGetNodeAttr(n.def(), name, &actual) && actual == expected;
}
std::optional<int64_t> GetIntConst(const Node& node) {
const TensorProto* proto;
Tensor tensor;
if (node.IsConstant() && TryGetNodeAttr(node.def(), "value", &proto) &&
(proto->dtype() == DT_INT32 || proto->dtype() == DT_INT64) &&
TensorShape(proto->tensor_shape()).num_elements() == 1 &&
tensor.FromProto(*proto)) {
if (proto->dtype() == DT_INT32) {
return *static_cast<const int32_t*>(tensor.data());
} else {
return *static_cast<const int64_t*>(tensor.data());
}
}
return std::nullopt;
}
std::optional<int64_t> GetSliceIndex(const Node& node, const int node_output) {
std::optional<int64_t> ix;
if (IsUnstack(node)) {
if (HasIntAttr(node, "axis", 0)) {
ix = node_output;
}
} else if (IsStridedSlice(node)) {
const Edge* edge;
if (HasIntAttr(node, "begin_mask", 0) && HasIntAttr(node, "end_mask", 0) &&
HasIntAttr(node, "ellipsis_mask", 0) &&
HasIntAttr(node, "new_axis_mask", 0) &&
HasIntAttr(node, "shrink_axis_mask", 1) &&
node.input_edge(1, &edge).ok()) {
ix = GetIntConst(*edge->src());
}
}
return ix;
}
absl::StatusOr<std::optional<Tensor>> TryInferFromShapes(
const Node& node, const int node_output, const ShapeRefiner& refiner) {
std::optional<Tensor> result;
if (node.num_inputs() == 0 || node_output >= node.num_outputs()) {
return result;
}
const auto dtype = node.output_type(node_output);
if (dtype != DT_INT32 && dtype != DT_INT64) {
return result;
}
absl::InlinedVector<int64_t, 8> data;
std::optional<TensorShape> shape;
const Edge* edge;
if (IsShape(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
for (int i = 0; i < rank; ++i) {
data.push_back(c->Value(c->Dim(c->input(0), i)));
}
shape.emplace({rank});
}
} else if (IsRank(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->RankKnown(c->input(0))) {
data.push_back(c->Rank(c->input(0)));
shape.emplace();
}
} else if (IsSize(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
int64_t size = 1;
for (int i = 0, rank = c->Rank(c->input(0)); i < rank; i++) {
size *= c->Value(c->Dim(c->input(0), i));
}
data.push_back(size);
shape.emplace();
}
} else if (node.input_edge(0, &edge).ok() && IsShape(*edge->src())) {
InferenceContext* c = refiner.GetContext(edge->src());
if (c != nullptr && c->RankKnown(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
std::optional<int64_t> ix = GetSliceIndex(node, node_output);
if (ix.has_value() && -rank <= *ix && *ix < rank &&
c->ValueKnown(c->Dim(c->input(0), *ix))) {
data.push_back(c->Value(c->Dim(c->input(0), *ix)));
shape.emplace();
}
}
}
if (!shape.has_value()) {
return result;
}
if (dtype == DT_INT32) {
for (const int64_t value : data) {
if (TF_PREDICT_FALSE(value >= std::numeric_limits<int32_t>::max())) {
return errors::InvalidArgument("Value is out of int32 range: ", value);
}
}
}
result.emplace(dtype, *shape);
if (dtype == DT_INT32) {
absl::c_copy(data, static_cast<int32_t*>(result->data()));
} else {
absl::c_copy(data, static_cast<int64_t*>(result->data()));
}
return result;
}
bool IsSupportedForEvaluation(const Node& node) {
if (node.IsConstant() || node.IsArg()) {
return true;
}
if (node.num_inputs() == 0 || IsPlaceholderWithDefault(node)) {
return false;
}
if (node.op_def().is_stateful()) {
return false;
}
if (node.IsEnter() || node.IsExit() || node.IsMerge()) {
return false;
}
if (node.IsFunctionCall()) {
return false;
}
for (const auto& [name, attr] : node.attrs()) {
if (attr.has_func() || !attr.list().func().empty()) {
return false;
}
}
return KernelDefAvailable(DEVICE_CPU, node.def());
}
struct Subgraph {
Subgraph(const OpRegistryInterface* op_registry, int32_t graph_def_version)
: graph(op_registry == nullptr ? OpRegistry::Global() : op_registry) {
VersionDef versions = graph.versions();
versions.set_producer(graph_def_version);
graph.set_versions(versions);
}
GraphRunner::NamedTensorList inputs;
Graph graph;
};
using NodeOutput = std::pair<const Node*, int>;
std::string OutputName(const NodeOutput& output) {
return strings::StrCat(output.first->name(), ":", output.second);
}
absl::StatusOr<std::unique_ptr<Subgraph>> ExtractConstantSubgraph(
const Node& target_node, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const OpRegistryInterface* op_registry, const int32_t graph_def_version) {
std::unique_ptr<Subgraph> subgraph;
if (!target_node.IsEnter() && !IsSupportedForEvaluation(target_node)) {
return subgraph;
}
std::vector<const Edge*> edges;
for (const Edge* edge : target_node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
absl::flat_hash_map<const Node*, Node*> new_by_old_node;
absl::InlinedVector<const Node*, 8> arg_nodes;
absl::flat_hash_map<NodeOutput, Tensor> const_inputs;
for (int edge_ix = 0; edge_ix < edges.size(); ++edge_ix) {
const Edge& edge = *edges[edge_ix];
const Node& node = *edge.src();
const NodeOutput node_output = {&node, edge.src_output()};
if (new_by_old_node.contains(&node) || const_inputs.contains(node_output)) {
continue;
}
if (node.IsArg()) {
arg_nodes.push_back(&node);
continue;
}
auto tensor = lookup(node, node_output.second);
if (!tensor.has_value()) {
TF_ASSIGN_OR_RETURN(
tensor, TryInferFromShapes(node, node_output.second, refiner));
}
if (tensor.has_value()) {
const_inputs.emplace(node_output, *std::move(tensor));
} else if (!IsSupportedForEvaluation(node)) {
return subgraph;
} else {
new_by_old_node.emplace(&node, nullptr);
for (const Edge* edge : node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
}
}
bool all_args_provided = true;
for (const Node* node : arg_nodes) {
auto tensor = lookup(*node, 0);
all_args_provided = all_args_provided && tensor.has_value();
if (all_args_provided) {
const_inputs.emplace(NodeOutput{node, 0}, *std::move(tensor));
}
}
if (!all_args_provided) {
return subgraph;
}
subgraph = std::make_unique<Subgraph>(op_registry, graph_def_version);
auto& inputs = subgraph->inputs;
inputs.reserve(const_inputs.size());
for (auto& [node_output, tensor] : const_inputs) {
if (!new_by_old_node.contains(node_output.first)) {
inputs.emplace_back(OutputName(node_output), std::move(tensor));
}
}
Graph& graph = subgraph->graph;
new_by_old_node[&target_node] = graph.CopyNode(&target_node);
for (const Edge* edge : edges) {
Node*& src = new_by_old_node[edge->src()];
if (src == nullptr) {
src = graph.CopyNode(edge->src());
}
Node* dst = new_by_old_node.at(edge->dst());
graph.AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
return subgraph;
}
}
absl::StatusOr<std::optional<Tensor>> EvaluateConstantTensor(
const Node& node, const int node_output, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const std::optional<EvaluateConstantTensorRunner> runner) {
std::optional<Tensor> result;
if (result = lookup(node, node_output); result.has_value()) {
return result;
}
if (node.IsArg()) {
return result;
}
if (node.IsConstant()) {
const TensorProto* proto;
TF_RETURN_IF_ERROR(GetNodeAttr(node.def(), "value", &proto));
result.emplace();
if (TF_PREDICT_FALSE(!result->FromProto(*proto))) {
return errors::InvalidArgument("Unable to evaluate a constant node");
}
return result;
}
TF_ASSIGN_OR_RETURN(result, TryInferFromShapes(node, node_output, refiner));
if (result.has_value()) {
return result;
}
if (!runner.has_value()) {
return result;
}
TF_ASSIGN_OR_RETURN(
const auto subgraph,
ExtractConstantSubgraph(node, refiner, lookup, runner->op_registry,
runner->graph_def_version));
if (subgraph != nullptr) {
GraphRunner* graph_runner = runner->graph_runner;
std::unique_ptr<GraphRunner> tmp_graph_runner;
if (graph_runner == nullptr) {
tmp_graph_runner = std::make_unique<GraphRunner>(Env::Default());
graph_runner = tmp_graph_runner.get();
}
FunctionLibraryRuntime* function_library = nullptr;
std::vector<Tensor> outputs;
auto status =
graph_runner->Run(&subgraph->graph, function_library, subgraph->inputs,
{OutputName({&node, node_output})}, &outputs);
if (status.ok()) {
result = std::move(outputs[0]);
}
}
return result;
}
} | #include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/logging_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class EvaluateConstantTensorTest : public ::testing::Test {
public:
EvaluateConstantTensorTest& WithRunner() {
runner_ = EvaluateConstantTensorRunner{
scope_.graph()->op_registry(),
scope_.graph()->versions().producer(),
};
return *this;
}
absl::StatusOr<std::optional<Tensor>> Run(const Output& output) {
TF_RETURN_IF_ERROR(scope_.status());
const auto& graph = *scope_.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_RETURN_IF_ERROR(refiner.AddNode(node));
}
auto lookup = [this](const Node& node, int index) -> std::optional<Tensor> {
requested_.insert(&node);
auto it = cache_.find(std::make_pair(&node, index));
if (it == cache_.end()) {
return std::nullopt;
}
return it->second;
};
auto runner = runner_;
runner_ = std::nullopt;
requested_.clear();
return EvaluateConstantTensor(*output.node(), output.index(), refiner,
lookup, runner);
}
void ExpectTensor(const Output& output, const Tensor& expected) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_TRUE(actual.has_value());
test::ExpectEqual(*actual, expected);
}
void ExpectNull(const Output& output) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_FALSE(actual.has_value());
}
void ExpectError(const Output& output) { EXPECT_FALSE(Run(output).ok()); }
protected:
Scope scope_ = Scope::NewRootScope();
absl::flat_hash_map<std::pair<const Node*, int>, Tensor> cache_;
absl::flat_hash_set<const Node*> requested_;
std::optional<EvaluateConstantTensorRunner> runner_ = std::nullopt;
};
template <typename T>
Output Placeholder(const Scope& scope, const PartialTensorShape& shape) {
return ops::Placeholder(scope, DataTypeToEnum<T>::value,
ops::Placeholder::Shape(shape));
}
Output Slice(const Scope& scope, const Output& input, int index) {
return ops::StridedSlice(
scope, input, ops::Const(scope, {index}), ops::Const(scope, {index + 1}),
ops::Const(scope, {1}), ops::StridedSlice::ShrinkAxisMask(1));
}
TEST_F(EvaluateConstantTensorTest, Constant) {
auto expected = test::AsTensor<float>({1, 2, 3});
auto op = ops::Const(scope_, expected);
ExpectTensor(op, expected);
}
TEST_F(EvaluateConstantTensorTest, Shape) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto shape = ops::Shape(scope_, input);
ExpectTensor(shape, test::AsTensor<int32_t>({2, 3, 5}));
}
TEST_F(EvaluateConstantTensorTest, ValueOutOfRange) {
const int64_t dim = std::numeric_limits<int32_t>::max();
auto input = Placeholder<float>(scope_, {dim});
auto shape32 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT32));
auto shape64 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT64));
ExpectError(shape32);
ExpectTensor(shape64, test::AsTensor<int64_t>({dim}));
}
TEST_F(EvaluateConstantTensorTest, PartialShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
ExpectNull(shape);
}
TEST_F(EvaluateConstantTensorTest, Rank) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto rank = ops::Rank(scope_, input);
ExpectTensor(rank, test::AsScalar<int32_t>(3));
}
TEST_F(EvaluateConstantTensorTest, Size) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto size = ops::Size(scope_, input);
ExpectTensor(size, test::AsScalar<int32_t>(2 * 3 * 5));
}
TEST_F(EvaluateConstantTensorTest, PartialSize) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto size = ops::Size(scope_, input);
ExpectNull(size);
}
TEST_F(EvaluateConstantTensorTest, SliceShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto slice0 = Slice(scope_, shape, 0);
auto slice1 = Slice(scope_, shape, 1);
auto slice2 = Slice(scope_, shape, 2);
ExpectTensor(slice0, test::AsScalar<int32_t>(2));
ExpectNull(slice1);
ExpectTensor(slice2, test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, UnpackShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto unpack = ops::Unstack(scope_, shape, 3, ops::Unstack::Axis(0));
ExpectTensor(unpack[0], test::AsScalar<int32_t>(2));
ExpectNull(unpack[1]);
ExpectTensor(unpack[2], test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, Lookup) {
auto input = Placeholder<float>(scope_, {2});
ExpectNull(input);
auto expected = test::AsTensor<float>({3, 5});
cache_.emplace(std::make_pair(input.node(), 0), expected);
ExpectTensor(input, expected);
}
TEST_F(EvaluateConstantTensorTest, ConstantFolding) {
auto input1 = Placeholder<float>(scope_, {2, -1, 5});
auto input2 = ops::_Arg(scope_, DT_INT32, 0);
auto shape = ops::Shape(scope_, input1);
auto result = ops::Add(scope_, Slice(scope_, shape, 2), input2);
ExpectNull(result);
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(input2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 + 7));
}
TEST_F(EvaluateConstantTensorTest, DoNotEvalPlaceholderWithDefault) {
auto tensor = test::AsTensor<float>({1, 2, 3});
auto result1 = ops::Identity(scope_, tensor);
auto result2 = ops::PlaceholderWithDefault(scope_, tensor, tensor.shape());
WithRunner().ExpectTensor(result1, tensor);
WithRunner().ExpectNull(result2);
}
TEST_F(EvaluateConstantTensorTest, AllArgsMustBeRequestedForConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto result = ops::Mul(scope_, arg0, ops::Add(scope_, arg1, arg2));
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
EXPECT_TRUE(requested_.contains(arg0.node()));
EXPECT_TRUE(requested_.contains(arg1.node()));
EXPECT_TRUE(requested_.contains(arg2.node()));
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(5));
cache_.emplace(std::make_pair(arg2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 * (3 + 7)));
}
TEST_F(EvaluateConstantTensorTest, NoArgsMustBeRequestedForNonConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto feed = Placeholder<int32_t>(scope_, {});
auto result = ops::Mul(scope_, arg0,
ops::Add(scope_, arg1, ops::Add(scope_, arg2, feed)));
WithRunner().ExpectNull(result);
EXPECT_FALSE(requested_.contains(arg0.node()));
EXPECT_FALSE(requested_.contains(arg1.node()));
EXPECT_FALSE(requested_.contains(arg2.node()));
EXPECT_TRUE(requested_.contains(feed.node()));
}
TEST_F(EvaluateConstantTensorTest, MissingKernel) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto print = ops::Print(scope_, arg1, {arg1.output});
auto result = ops::Add(scope_, arg0, print);
ASSERT_FALSE(KernelDefAvailable(DEVICE_CPU, print.node()->def()));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(5));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(print.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(3 + 7));
}
template <bool kEvaluated>
void BM_ConstantFolding(::testing::benchmark::State& state) {
Scope scope = Scope::NewRootScope();
auto input1 = Placeholder<float>(scope, {2, -1, 5});
auto input2 = ops::_Arg(scope, DT_INT32, 0);
auto input3 = ops::_Arg(scope, DT_INT32, 0);
auto shape = ops::Shape(scope, input1);
auto result =
ops::Mul(scope, ops::Add(scope, Slice(scope, shape, 2), input2), input3);
TF_CHECK_OK(scope.status());
const auto& graph = *scope.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_CHECK_OK(refiner.AddNode(node));
}
auto tensor2 = test::AsScalar<int32_t>(7);
auto tensor3 = test::AsScalar<int32_t>(11);
auto lookup = [&](const Node& node, int index) -> std::optional<Tensor> {
if (kEvaluated && &node == input2.node()) {
return tensor2;
}
if (&node == input3.node()) {
return tensor3;
}
return std::nullopt;
};
GraphRunner graph_runner(Env::Default());
const EvaluateConstantTensorRunner runner = {
graph.op_registry(), graph.versions().producer(), &graph_runner};
for (auto unused : state) {
auto status_or =
EvaluateConstantTensor(*result.node(), 0, refiner, lookup, runner);
TF_CHECK_OK(status_or.status());
CHECK_EQ(status_or->has_value(), kEvaluated);
}
}
BENCHMARK_TEMPLATE(BM_ConstantFolding, false);
BENCHMARK_TEMPLATE(BM_ConstantFolding, true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eval_const_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eval_const_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e651f8c4-71e9-4cab-a12a-ea760481aaca | cpp | tensorflow/tensorflow | mkl_cpu_allocator | tensorflow/core/common_runtime/mkl_cpu_allocator.cc | tensorflow/core/common_runtime/mkl_cpu_allocator_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
namespace tensorflow {
constexpr const char* MklCPUAllocator::kMaxLimitStr;
constexpr const size_t MklCPUAllocator::kDefaultMaxLimit;
}
#endif | #if defined(INTEL_MKL)
#include "tensorflow/core/common_runtime/mkl_cpu_allocator.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(MKLBFCAllocatorTest, TestMaxLimit) {
setenv(MklCPUAllocator::kMaxLimitStr, "1000", 1);
MklCPUAllocator a;
TF_EXPECT_OK(a.Initialize());
auto stats = a.GetStats();
EXPECT_EQ(stats->bytes_limit, 1000);
unsetenv(MklCPUAllocator::kMaxLimitStr);
TF_EXPECT_OK(a.Initialize());
stats = a.GetStats();
uint64 max_mem_bytes = MklCPUAllocator::kDefaultMaxLimit;
#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
max_mem_bytes =
(uint64)sysconf(_SC_PHYS_PAGES) * (uint64)sysconf(_SC_PAGESIZE);
#endif
EXPECT_EQ(stats->bytes_limit, max_mem_bytes);
setenv(MklCPUAllocator::kMaxLimitStr, "wrong-input", 1);
EXPECT_TRUE(errors::IsInvalidArgument(a.Initialize()));
setenv(MklCPUAllocator::kMaxLimitStr, "-20", 1);
EXPECT_TRUE(errors::IsInvalidArgument(a.Initialize()));
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/mkl_cpu_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/mkl_cpu_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
502f5270-0d6c-47f6-ba7d-8018f5090637 | cpp | tensorflow/tensorflow | request_cost_accessor_registry | tensorflow/core/common_runtime/request_cost_accessor_registry.cc | tensorflow/core/common_runtime/request_cost_accessor_registry_test.cc | #include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
using RegistrationMap =
absl::flat_hash_map<std::string, RequestCostAccessorRegistry::Creator>;
RegistrationMap* GetRegistrationMap() {
static RegistrationMap* registered_request_cost_accessors =
new RegistrationMap;
return registered_request_cost_accessors;
}
}
std::unique_ptr<RequestCostAccessor>
RequestCostAccessorRegistry::CreateByNameOrNull(absl::string_view name) {
const auto it = GetRegistrationMap()->find(name);
if (it == GetRegistrationMap()->end()) return nullptr;
return std::unique_ptr<RequestCostAccessor>(it->second());
}
void RequestCostAccessorRegistry::RegisterRequestCostAccessor(
absl::string_view name, Creator creator) {
const auto it = GetRegistrationMap()->find(name);
CHECK(it == GetRegistrationMap()->end())
<< "RequestCostAccessor " << name << " is registered twice.";
GetRegistrationMap()->emplace(name, std::move(creator));
}
} | #include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/request_cost_accessor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestRequestCostAccessor : public RequestCostAccessor {
public:
RequestCost* GetRequestCost() const override { return nullptr; }
};
REGISTER_REQUEST_COST_ACCESSOR("test", TestRequestCostAccessor);
TEST(RequestCostAccessorRegistryTest, Basic) {
std::unique_ptr<const RequestCostAccessor> test_accessor =
RequestCostAccessorRegistry::CreateByNameOrNull("unregistered");
EXPECT_EQ(test_accessor, nullptr);
test_accessor = RequestCostAccessorRegistry::CreateByNameOrNull("test");
EXPECT_NE(test_accessor, nullptr);
}
TEST(RequestCostAccessorRegistryDeathTest, CrashWhenRegisterTwice) {
const auto creator = []() {
return std::make_unique<TestRequestCostAccessor>();
};
EXPECT_DEATH(
RequestCostAccessorRegistry::RegisterRequestCostAccessor("test", creator),
"RequestCostAccessor test is registered twice.");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/request_cost_accessor_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/request_cost_accessor_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa345464-a4cd-4aaa-9072-1bca6967a7b8 | cpp | tensorflow/tensorflow | gradients | tensorflow/c/eager/gradients.cc | tensorflow/c/eager/gradients_test.cc | #include "tensorflow/c/eager/gradients.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/gradients_internal.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace gradients {
namespace {
int64_t ToId(const AbstractTensorHandle* t) {
return static_cast<int64_t>(reinterpret_cast<uintptr_t>(t));
}
Status ZerosLike(AbstractContext* ctx, AbstractTensorHandle* t,
AbstractTensorHandle** result) {
AbstractOperationPtr op(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op->Reset("ZerosLike", nullptr));
if (isa<tracing::TracingOperation>(op.get())) {
TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingOperation>(op.get())->SetOpName(
absl::StrCat("ZerosLike", ToId(t)).c_str()));
}
TF_RETURN_IF_ERROR(op->AddInput(t));
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs));
*result = outputs[0];
return absl::OkStatus();
}
}
Status GradientRegistry::Register(
const string& op_name, GradientFunctionFactory gradient_function_factory) {
auto iter = registry_.find(op_name);
if (iter != registry_.end()) {
const string error_msg = "Gradient already exists for op: " + op_name + ".";
return errors::AlreadyExists(error_msg);
}
registry_.insert({op_name, gradient_function_factory});
return absl::OkStatus();
}
Status GradientRegistry::Lookup(
const ForwardOperation& op,
std::unique_ptr<GradientFunction>* gradient_function) const {
auto iter = registry_.find(op.op_name);
if (iter == registry_.end()) {
const string error_msg = "No gradient defined for op: " + op.op_name + ".";
return errors::NotFound(error_msg);
}
gradient_function->reset(iter->second(op));
return absl::OkStatus();
}
TapeTensor::TapeTensor(AbstractTensorHandle* handle) : handle_(handle) {
handle_->Ref();
}
TapeTensor::TapeTensor(const TapeTensor& other) {
handle_ = other.handle_;
handle_->Ref();
}
TapeTensor::~TapeTensor() { handle_->Unref(); }
int64_t TapeTensor::GetID() const { return ToId(handle_); }
tensorflow::DataType TapeTensor::GetDType() const {
return handle_->DataType();
}
AbstractTensorHandle* TapeTensor::GetHandle() const { return handle_; }
AbstractTensorHandle* TapeTensor::ZerosLike() const { return nullptr; }
class TapeVSpace
: public eager::VSpace<AbstractTensorHandle, GradientFunction, TapeTensor> {
public:
explicit TapeVSpace(AbstractContext* ctx) : ctx_(ctx) {}
~TapeVSpace() override {}
int64_t NumElements(AbstractTensorHandle* tensor) const override;
AbstractTensorHandle* AggregateGradients(
gtl::ArraySlice<AbstractTensorHandle*> gradient_tensors) const override;
Status CallBackwardFunction(
const string& op_type, GradientFunction* gradient_function,
const std::vector<int64_t>& unneeded_gradients,
gtl::ArraySlice<AbstractTensorHandle*> output_gradients,
absl::Span<AbstractTensorHandle*> result) const override;
Status BuildOnesLike(const TapeTensor& t,
AbstractTensorHandle** result) const override;
int64_t TensorId(AbstractTensorHandle* tensor) const override;
TapeTensor TapeTensorFromGradient(AbstractTensorHandle* g) const override;
void MarkAsResult(AbstractTensorHandle* gradient) const override;
void DeleteGradient(AbstractTensorHandle* gradient) const override;
private:
AbstractContext* ctx_;
};
int64_t TapeVSpace::NumElements(AbstractTensorHandle* tensor) const {
return 1;
}
AbstractTensorHandle* TapeVSpace::AggregateGradients(
gtl::ArraySlice<AbstractTensorHandle*> gradient_tensors) const {
if (gradient_tensors.size() == 1) {
return gradient_tensors[0];
}
AbstractOperationPtr op(ctx_->CreateOperation());
Status s = op->Reset("AddN", nullptr);
if (!s.ok()) {
return nullptr;
}
s = op->AddInputList(gradient_tensors);
if (!s.ok()) {
return nullptr;
}
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
s = op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs);
if (!s.ok()) {
return nullptr;
}
return outputs[0];
}
Status TapeVSpace::CallBackwardFunction(
const string& op_type, GradientFunction* gradient_function,
const std::vector<int64_t>& unneeded_gradients,
gtl::ArraySlice<AbstractTensorHandle*> output_gradients,
absl::Span<AbstractTensorHandle*> result) const {
if (gradient_function == nullptr) {
return errors::InvalidArgument(
"Provided null gradient_function for '", op_type, "'.\n",
"If the intent is to treat this op as non-differentiable consider "
"using RegisterNotDifferentiable or "
"NotDifferentiableGradientFunction.");
}
return gradient_function->Compute(ctx_, output_gradients, result);
}
Status TapeVSpace::BuildOnesLike(const TapeTensor& t,
AbstractTensorHandle** result) const {
AbstractOperationPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(op->Reset("OnesLike", nullptr));
if (isa<tracing::TracingOperation>(op.get())) {
TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingOperation>(op.get())->SetOpName(
absl::StrCat("OnesLike", ToId(t.GetHandle())).c_str()));
}
TF_RETURN_IF_ERROR(op->AddInput(t.GetHandle()));
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs));
*result = outputs[0];
return absl::OkStatus();
}
int64_t TapeVSpace::TensorId(AbstractTensorHandle* tensor) const {
return ToId(tensor);
}
TapeTensor TapeVSpace::TapeTensorFromGradient(AbstractTensorHandle* g) const {
return TapeTensor(g);
}
void TapeVSpace::MarkAsResult(AbstractTensorHandle* gradient) const {}
void TapeVSpace::DeleteGradient(AbstractTensorHandle* gradient) const {
gradient->Unref();
}
void Tape::Watch(const AbstractTensorHandle* t) {
GradientTape::Watch(ToId(t));
}
void Tape::RecordOperation(absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle* const> outputs,
GradientFunction* gradient_function,
const string& op_name) {
std::vector<int64_t> input_ids(inputs.size());
std::vector<tensorflow::DataType> input_dtypes(inputs.size());
for (int i = 0; i < inputs.size(); i++) {
input_ids[i] = ToId(inputs[i]);
input_dtypes[i] = inputs[i]->DataType();
}
std::vector<TapeTensor> tape_tensors;
tape_tensors.reserve(outputs.size());
for (auto t : outputs) {
tape_tensors.push_back(TapeTensor(t));
}
GradientTape::RecordOperation(
op_name, tape_tensors, input_ids, input_dtypes,
[gradient_function]() -> GradientFunction* { return gradient_function; },
[](GradientFunction* ptr) {
if (ptr) {
delete ptr;
}
});
}
bool Tape::ShouldRecord(
absl::Span<const AbstractTensorHandle* const> tensors) const {
std::vector<int64_t> tensor_ids(tensors.size());
std::vector<tensorflow::DataType> tensor_dtypes(tensors.size());
for (int i = 0; i < tensors.size(); i++) {
tensor_ids[i] = ToId(tensors[i]);
tensor_dtypes[i] = tensors[i]->DataType();
}
return GradientTape::ShouldRecord(tensor_ids, tensor_dtypes);
}
void Tape::DeleteTrace(const AbstractTensorHandle* t) {
GradientTape::DeleteTrace(ToId(t));
}
std::vector<int64_t> MakeTensorIDList(
absl::Span<AbstractTensorHandle* const> tensors) {
std::vector<int64_t> ids(tensors.size());
for (int i = 0; i < tensors.size(); i++) {
ids[i] = ToId(tensors[i]);
}
return ids;
}
Status Tape::ComputeGradient(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> targets,
absl::Span<AbstractTensorHandle* const> sources,
absl::Span<AbstractTensorHandle* const> output_gradients,
absl::Span<AbstractTensorHandle*> result) {
TapeVSpace vspace(ctx);
std::vector<int64_t> target_tensor_ids = MakeTensorIDList(targets);
std::vector<int64_t> source_tensor_ids = MakeTensorIDList(sources);
tensorflow::gtl::FlatSet<int64_t> sources_set(source_tensor_ids.begin(),
source_tensor_ids.end());
std::unordered_map<int64_t, TapeTensor> sources_that_are_targets;
for (int i = 0; i < target_tensor_ids.size(); ++i) {
int64_t target_id = target_tensor_ids[i];
if (sources_set.find(target_id) != sources_set.end()) {
auto tensor = targets[i];
sources_that_are_targets.insert(
std::make_pair(target_id, TapeTensor(tensor)));
}
}
TF_RETURN_IF_ERROR(GradientTape::ComputeGradient(
vspace, target_tensor_ids, source_tensor_ids, sources_that_are_targets,
output_gradients, result, false));
return absl::OkStatus();
}
namespace internal {
Status Reset(AbstractOperation* op_, const char* op,
const char* raw_device_name, ForwardOperation* forward_op_) {
forward_op_->op_name = op;
forward_op_->attrs.Reset(op);
return op_->Reset(op, raw_device_name);
}
Status AddInput(AbstractOperation* op_, AbstractTensorHandle* input,
ForwardOperation* forward_op_) {
TF_RETURN_IF_ERROR(op_->AddInput(input));
forward_op_->inputs.push_back(input);
return absl::OkStatus();
}
Status AddInputList(AbstractOperation* op_,
absl::Span<AbstractTensorHandle* const> inputs,
ForwardOperation* forward_op_) {
TF_RETURN_IF_ERROR(op_->AddInputList(inputs));
for (auto input : inputs) {
forward_op_->inputs.push_back(input);
}
return absl::OkStatus();
}
Status SetAttrString(AbstractOperation* op_, const char* attr_name,
const char* data, size_t length,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, StringPiece(data, length));
return op_->SetAttrString(attr_name, data, length);
}
Status SetAttrInt(AbstractOperation* op_, const char* attr_name, int64_t value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, static_cast<int64_t>(value));
return op_->SetAttrInt(attr_name, value);
}
Status SetAttrFloat(AbstractOperation* op_, const char* attr_name, float value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrFloat(attr_name, value);
}
Status SetAttrBool(AbstractOperation* op_, const char* attr_name, bool value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrBool(attr_name, value);
}
Status SetAttrType(AbstractOperation* op_, const char* attr_name,
DataType value, ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrType(attr_name, value);
}
Status SetAttrShape(AbstractOperation* op_, const char* attr_name,
const int64_t* dims, const int num_dims,
ForwardOperation* forward_op_) {
if (num_dims > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Value specified for `", attr_name, "` has ",
num_dims,
" dimensions which is over the limit of ",
TensorShape::MaxDimensions(), ".");
}
TensorShapeProto proto;
if (num_dims < 0) {
proto.set_unknown_rank(true);
} else {
for (int d = 0; d < num_dims; ++d) {
proto.add_dim()->set_size(dims[d]);
}
}
forward_op_->attrs.Set(attr_name, proto);
return op_->SetAttrShape(attr_name, dims, num_dims);
}
Status SetAttrFunction(AbstractOperation* op_, const char* attr_name,
const AbstractOperation* value,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunction has not been implemented yet.");
}
Status SetAttrFunctionName(AbstractOperation* op_, const char* attr_name,
const char* value, size_t length,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunctionName has not been implemented "
"yet.");
}
Status SetAttrTensor(AbstractOperation* op_, const char* attr_name,
AbstractTensorInterface* tensor,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrTensor has not been implemented yet.");
}
Status SetAttrStringList(AbstractOperation* op_, const char* attr_name,
const void* const* values, const size_t* lengths,
int num_values, ForwardOperation* forward_op_) {
std::vector<StringPiece> v(num_values);
for (int i = 0; i < num_values; ++i) {
v[i] = StringPiece(static_cast<const char*>(values[i]), lengths[i]);
}
forward_op_->attrs.Set(attr_name, v);
return op_->SetAttrStringList(attr_name, values, lengths, num_values);
}
Status SetAttrFloatList(AbstractOperation* op_, const char* attr_name,
const float* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const float>(values, num_values));
return op_->SetAttrFloatList(attr_name, values, num_values);
}
Status SetAttrIntList(AbstractOperation* op_, const char* attr_name,
const int64_t* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(
attr_name, gtl::ArraySlice<const int64_t>(
reinterpret_cast<const int64_t*>(values), num_values));
return op_->SetAttrIntList(attr_name, values, num_values);
}
Status SetAttrTypeList(AbstractOperation* op_, const char* attr_name,
const DataType* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const DataType>(values, num_values));
return op_->SetAttrTypeList(attr_name, values, num_values);
}
Status SetAttrBoolList(AbstractOperation* op_, const char* attr_name,
const unsigned char* values, int num_values,
ForwardOperation* forward_op_) {
std::unique_ptr<bool[]> b(new bool[num_values]);
for (int i = 0; i < num_values; ++i) {
b[i] = values[i];
}
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const bool>(b.get(), num_values));
return op_->SetAttrBoolList(attr_name, values, num_values);
}
Status SetAttrShapeList(AbstractOperation* op_, const char* attr_name,
const int64_t** dims, const int* num_dims,
int num_values, ForwardOperation* forward_op_) {
std::unique_ptr<TensorShapeProto[]> proto(new TensorShapeProto[num_values]);
for (int i = 0; i < num_values; ++i) {
const auto num_dims_i = num_dims[i];
if (num_dims_i > TensorShape::MaxDimensions()) {
return errors::InvalidArgument(
strings::StrCat("Value specified for `", attr_name, "` has ",
num_dims_i, " dimensions which is over the limit of ",
TensorShape::MaxDimensions(), "."));
}
if (num_dims_i < 0) {
proto[i].set_unknown_rank(true);
} else {
const int64_t* dims_i = dims[i];
auto proto_i = &proto[i];
for (int d = 0; d < num_dims_i; ++d) {
proto_i->add_dim()->set_size(dims_i[d]);
}
}
}
forward_op_->attrs.Set(
attr_name, gtl::ArraySlice<TensorShapeProto>(proto.get(), num_values));
return op_->SetAttrShapeList(attr_name, dims, num_dims, num_values);
}
Status SetAttrFunctionList(AbstractOperation* op_, const char* attr_name,
absl::Span<const AbstractOperation*> values,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunctionList has not been "
"implemented yet.");
}
Status Execute(AbstractOperation* op_, AbstractContext* ctx,
absl::Span<AbstractTensorHandle*> retvals, int* num_retvals,
ForwardOperation* forward_op_, Tape* tape,
const GradientRegistry& registry) {
TF_RETURN_IF_ERROR(op_->Execute(retvals, num_retvals));
for (int i = 0; i < *num_retvals; i++) {
forward_op_->outputs.push_back(retvals[i]);
}
forward_op_->attrs.BuildNodeDef();
std::unique_ptr<GradientFunction> gradient_fn;
TF_RETURN_IF_ERROR(registry.Lookup(*forward_op_, &gradient_fn));
tape->RecordOperation(forward_op_->inputs, retvals, gradient_fn.release(),
op_->Name());
return absl::OkStatus();
}
}
}
} | #include "tensorflow/c/eager/gradients.h"
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/gradients_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/experimental/gradients/math_grad.h"
#include "tensorflow/c/experimental/gradients/not_differentiable.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using std::vector;
using tensorflow::TF_StatusPtr;
using tracing::TracingOperation;
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
Status RegisterGradients(GradientRegistry* registry) {
TF_RETURN_IF_ERROR(RegisterNotDifferentiable(registry, "CheckNumerics"));
return absl::OkStatus();
}
TEST_P(CppGradients, TestSetAttrString) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr t;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 1.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
t.reset(x_raw);
}
AbstractOperationPtr check_numerics_op(ctx->CreateOperation());
ForwardOperation forward_op;
Status s = Reset(check_numerics_op.get(), "CheckNumerics",
nullptr, &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
if (isa<TracingOperation>(check_numerics_op.get())) {
s = dyn_cast<TracingOperation>(check_numerics_op.get())
->SetOpName("check_numerics");
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
s = AddInput(check_numerics_op.get(), t.get(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
string message = "This is the way!";
s = SetAttrString(check_numerics_op.get(), "message", message.data(),
message.length(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
int num_retvals = 1;
std::vector<AbstractTensorHandle*> outputs(1);
GradientRegistry registry;
s = RegisterGradients(®istry);
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto tape = std::make_unique<Tape>(false);
s = Execute(check_numerics_op.get(), ctx.get(), absl::MakeSpan(outputs),
&num_retvals, &forward_op, tape.get(), registry);
ASSERT_EQ(errors::OK, s.code()) << s.message();
string read_message;
s = forward_op.attrs.Get("message", &read_message);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_EQ(read_message, message);
}
Status RecordOperationWithNullGradientFunctionModel(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
Tape tape(false);
tape.Watch(inputs[0]);
AbstractTensorHandle* neg_output;
TF_RETURN_IF_ERROR(ops::Neg(ctx, inputs[0], &neg_output, "Neg"));
tape.RecordOperation(inputs, {neg_output}, nullptr, "Neg");
return tape.ComputeGradient(ctx,
{neg_output},
inputs,
{}, outputs);
}
TEST_P(CppGradients, TestRecordOperationWithNullGradientFunctionRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
std::vector<AbstractTensorHandle*> outputs(1);
Status s = RunModel(RecordOperationWithNullGradientFunctionModel, ctx.get(),
{x.get()}, absl::MakeSpan(outputs),
!std::get<2>(GetParam()));
ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
ASSERT_EQ(
"Provided null gradient_function for 'Neg'.\nIf the intent is to treat "
"this op as non-differentiable consider using RegisterNotDifferentiable "
"or NotDifferentiableGradientFunction.",
s.message());
ASSERT_EQ(nullptr, outputs[0]);
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradients.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradients_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ffa5816-7f4b-47b4-83f7-108b7e00ea6c | cpp | tensorflow/tensorflow | lower_while_op | tensorflow/core/common_runtime/lower_while_op.cc | tensorflow/core/common_runtime/lower_while_op_test.cc | #include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class LowerWhileHelper {
public:
static Status Run(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn, int parallel_iterations,
Graph* graph, const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable) {
LowerWhileHelper helper(while_op, cond_fn, body_fn, parallel_iterations,
graph, flib_def, keep_node_fetchable);
return helper.RunInternal();
}
private:
LowerWhileHelper(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn, int parallel_iterations,
Graph* graph, const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable);
Status RunInternal();
void InitializeInputOutputToLoweredNodeMap();
Status CreateEnterNodes();
Status CreateMergeNodes();
Status CreateCondFuncCallNode();
Status CreateSwitchNodes();
Status CreateBodyFuncCallNode();
Status CreateExitNodes();
Status CreateNextIterationNodes();
Status UpdateMergeNodes();
Status UpdateConsumers();
string NewName(const string& infix);
bool IsLoopCarriedResource(int index);
Node* while_op_;
Node* cond_call_node_;
Node* loop_cond_node_;
Node* body_call_node_;
Node* lowered_while_output_;
Node* lowered_while_executed_;
Graph* graph_;
const FunctionLibraryDefinition* flib_def_;
string name_;
const int parallel_iterations_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
NodeBuilder cond_call_builder_;
NodeBuilder body_call_builder_;
std::vector<Node*> enter_nodes_;
std::vector<Node*> merge_nodes_;
std::vector<Node*> switch_nodes_;
std::vector<Node*> exit_nodes_;
std::vector<Node*> next_iterations_nodes_;
std::vector<int> op_input_output_to_lowered_node_;
bool propagate_colocation_key_;
size_t num_loop_inputs_;
};
LowerWhileHelper::LowerWhileHelper(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn,
int parallel_iterations, Graph* graph,
const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable)
: while_op_(while_op),
graph_(graph),
flib_def_(flib_def),
name_(while_op->name()),
parallel_iterations_(parallel_iterations),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*while_op_),
cond_call_builder_(NewName("cond"), cond_fn.name(), flib_def,
&debug_info_),
body_call_builder_(NewName("body"), body_fn.name(), flib_def,
&debug_info_),
num_loop_inputs_(while_op_->num_inputs()) {
cond_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : cond_fn.attr()) {
cond_call_builder_.Attr(i.first, i.second);
}
body_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : body_fn.attr()) {
body_call_builder_.Attr(i.first, i.second);
}
enter_nodes_.resize(num_loop_inputs_);
merge_nodes_.reserve(num_loop_inputs_);
switch_nodes_.reserve(num_loop_inputs_);
exit_nodes_.reserve(num_loop_inputs_);
next_iterations_nodes_.reserve(num_loop_inputs_);
op_input_output_to_lowered_node_.resize(num_loop_inputs_, -1);
propagate_colocation_key_ =
flags::Global()
.enable_colocation_key_propagation_in_while_op_lowering.value();
}
Status LowerWhileHelper::RunInternal() {
InitializeInputOutputToLoweredNodeMap();
TF_RETURN_IF_ERROR(CreateEnterNodes());
TF_RETURN_IF_ERROR(CreateMergeNodes());
TF_RETURN_IF_ERROR(CreateCondFuncCallNode());
TF_RETURN_IF_ERROR(CreateSwitchNodes());
TF_RETURN_IF_ERROR(CreateBodyFuncCallNode());
TF_RETURN_IF_ERROR(CreateExitNodes());
TF_RETURN_IF_ERROR(CreateNextIterationNodes());
TF_RETURN_IF_ERROR(UpdateMergeNodes());
TF_RETURN_IF_ERROR(UpdateConsumers());
return absl::OkStatus();
}
void LowerWhileHelper::InitializeInputOutputToLoweredNodeMap() {
int counter = 0;
for (int i = 0; i < num_loop_inputs_; i++) {
if (!IsLoopCarriedResource(i)) {
op_input_output_to_lowered_node_[i] = counter++;
}
}
}
Status LowerWhileHelper::CreateEnterNodes() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(while_op_->input_edges(&edges));
for (const Edge* edge : edges) {
Node* enter_node;
NodeBuilder builder =
NodeBuilder(NewName("enter"), "Enter", flib_def_, &debug_info_)
.Input(NodeOut(edge->src(), edge->src_output()))
.Attr("frame_name", name_)
.Attr("parallel_iterations", parallel_iterations_)
.Device(edge->src()->requested_device())
.AssignedDevice(edge->src()->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = edge->src()->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
if (IsLoopCarriedResource(edge->dst_input())) {
builder.Attr("is_constant", true);
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &enter_node));
enter_nodes_[edge->dst_input()] = enter_node;
}
std::vector<Node*> control_inputs;
for (const Edge* e : while_op_->in_edges()) {
if (e->IsControlEdge()) {
control_inputs.push_back(e->src());
}
}
if (!control_inputs.empty()) {
Node* incoming_control_node;
NodeBuilder builder = NodeBuilder(NewName("LoopControlInputs"), "NoOp",
flib_def_, &debug_info_)
.ControlInputs(control_inputs)
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &incoming_control_node));
for (Node* n : enter_nodes_) {
graph_->AddControlEdge(incoming_control_node, n);
}
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateMergeNodes() {
for (Node* enter_node : enter_nodes_) {
bool is_constant = enter_node->attrs().FindByString("is_constant")->b();
if (is_constant && enter_node->output_type(0) == DT_RESOURCE) {
continue;
}
Node* merge_node;
NodeBuilder builder =
NodeBuilder(NewName("merge"), "Merge", flib_def_, &debug_info_)
.Input({NodeOut(enter_node, 0), NodeOut(enter_node, 0)})
.Device(enter_node->requested_device())
.AssignedDevice(enter_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = enter_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &merge_node));
merge_nodes_.emplace_back(merge_node);
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateCondFuncCallNode() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
cond_call_builder_.Input(NodeOut(enter_nodes_[i], 0));
} else {
cond_call_builder_.Input(
NodeOut(merge_nodes_[op_input_output_to_lowered_node_[i]], 0));
}
}
cond_call_builder_.Device(while_op_->requested_device());
TF_RETURN_IF_ERROR(cond_call_builder_.Finalize(graph_, &cond_call_node_));
graph_->AddControlEdge(merge_nodes_[0], cond_call_node_);
NodeBuilder builder =
NodeBuilder(NewName("LoopCond"), "LoopCond", flib_def_, &debug_info_)
.Input(NodeOut(cond_call_node_, 0))
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &loop_cond_node_));
return absl::OkStatus();
}
Status LowerWhileHelper::CreateSwitchNodes() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
continue;
}
string op_name;
{
const Node* input_node;
TF_RETURN_IF_ERROR(while_op_->input_node(i, &input_node));
op_name = strings::StrCat(input_node->name(), "_switch");
}
Node* merge_node = merge_nodes_[op_input_output_to_lowered_node_[i]];
Node* switch_node;
string op_type = "Switch";
if (IsRefType(merge_node->output_type(0))) {
op_type = "RefSwitch";
}
NodeBuilder builder =
NodeBuilder(NewName(op_name), op_type, flib_def_, &debug_info_)
.Input(NodeOut(merge_node, 0))
.Input(NodeOut(loop_cond_node_, 0))
.Device(merge_node->requested_device())
.AssignedDevice(merge_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = merge_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &switch_node));
switch_nodes_.emplace_back(switch_node);
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateBodyFuncCallNode() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
body_call_builder_.Input(NodeOut(enter_nodes_[i], 0));
} else {
body_call_builder_.Input(
NodeOut(switch_nodes_[op_input_output_to_lowered_node_[i]], 1));
}
}
body_call_builder_.Device(while_op_->requested_device());
TF_RETURN_IF_ERROR(body_call_builder_.Finalize(graph_, &body_call_node_));
Node* body_control_node_;
string op_type = "Identity";
if (IsRefType(switch_nodes_[0]->output_type(1))) {
op_type = "RefIdentity";
}
NodeBuilder builder = NodeBuilder(NewName("loop_body_control"), op_type,
flib_def_, &debug_info_)
.Input(NodeOut(switch_nodes_[0], 1))
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &body_control_node_));
graph_->AddControlEdge(body_control_node_, body_call_node_);
return absl::OkStatus();
}
Status LowerWhileHelper::CreateExitNodes() {
std::vector<NodeOut> outputs;
outputs.reserve(num_loop_inputs_);
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
OutputTensor resource_tensor;
TF_RETURN_IF_ERROR(enter_nodes_[i]->input_tensor(0, &resource_tensor));
outputs.emplace_back(resource_tensor);
} else {
Node* exit_node;
NodeBuilder builder =
NodeBuilder(NewName("exit"), "Exit", flib_def_, &debug_info_)
.Input(NodeOut(switch_nodes_[op_input_output_to_lowered_node_[i]],
0))
.Device(switch_nodes_[op_input_output_to_lowered_node_[i]]
->requested_device())
.AssignedDevice(switch_nodes_[op_input_output_to_lowered_node_[i]]
->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr =
switch_nodes_[op_input_output_to_lowered_node_[i]]->attrs().Find(
kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &exit_node));
exit_nodes_.emplace_back(exit_node);
outputs.emplace_back(NodeOut(exit_node, 0));
}
}
NodeBuilder builder = NodeBuilder(NewName("LoopExecuted"), "NoOp",
OpRegistry::Global(), &debug_info_)
.ControlInputs(exit_nodes_)
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &lowered_while_executed_));
if (keep_node_fetchable_) {
TF_RETURN_IF_ERROR(
NodeBuilder(name_, "IdentityN", OpRegistry::Global(), &debug_info_)
.Input(outputs)
.Device(while_op_->requested_device())
.Finalize(graph_, &lowered_while_output_));
} else {
TF_RETURN_IF_ERROR(
NodeBuilder(name_, "NoOp", OpRegistry::Global(), &debug_info_)
.ControlInput(lowered_while_executed_)
.Device(while_op_->requested_device())
.Finalize(graph_, &lowered_while_output_));
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateNextIterationNodes() {
for (int i = 0; i < num_loop_inputs_; i++) {
Node* next_iteration;
if (IsLoopCarriedResource(i)) {
continue;
}
Node* merge_node = merge_nodes_[op_input_output_to_lowered_node_[i]];
NodeBuilder builder =
NodeBuilder(NewName("next_iteration"), "NextIteration", flib_def_,
&debug_info_)
.Input(NodeOut(body_call_node_, i))
.ControlInput(body_call_node_)
.Device(merge_node->requested_device())
.AssignedDevice(merge_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = merge_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &next_iteration));
next_iterations_nodes_.emplace_back(next_iteration);
}
return absl::OkStatus();
}
Status LowerWhileHelper::UpdateMergeNodes() {
for (int i = 0; i < merge_nodes_.size(); i++) {
TF_RETURN_IF_ERROR(
graph_->UpdateEdge(next_iterations_nodes_[i], 0, merge_nodes_[i], 1));
}
return absl::OkStatus();
}
Status LowerWhileHelper::UpdateConsumers() {
for (const Edge* e : while_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(lowered_while_executed_, e->dst());
} else {
if (IsLoopCarriedResource(e->src_output())) {
OutputTensor resource;
TF_RETURN_IF_ERROR(
enter_nodes_[e->src_output()]->input_tensor(0, &resource));
graph_->AddEdge(resource.node, resource.index, e->dst(),
e->dst_input());
} else {
int exit_node_index = op_input_output_to_lowered_node_[e->src_output()];
if (exit_node_index < 0) {
return errors::Internal(
"Expecting an Exit node for a Resource tensor.");
}
graph_->AddEdge(exit_nodes_[exit_node_index], 0, e->dst(),
e->dst_input());
}
}
}
return absl::OkStatus();
}
string LowerWhileHelper::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
bool LowerWhileHelper::IsLoopCarriedResource(int index) {
if (while_op_->input_type(index) != DT_RESOURCE) return false;
auto body_func_name = while_op_->attrs().Find("body")->func().name();
auto body_func = flib_def_->Find(body_func_name);
auto arg_name = body_func->signature().input_arg(index).name();
for (auto& ret : body_func->ret())
if (ret.second == arg_name) return true;
return false;
}
}
Status RewriteWhileNode(Node* n, Graph* g,
const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable) {
VLOG(2) << "Lower While node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* cond_attr = n->attrs().Find("cond");
if (cond_attr == nullptr) {
return errors::InvalidArgument("While cond function missing");
}
const AttrValue* body_attr = n->attrs().Find("body");
if (body_attr == nullptr) {
return errors::InvalidArgument("While body function missing");
}
const AttrValue* parallel_iterations_attr =
n->attrs().Find("parallel_iterations");
if (parallel_iterations_attr == nullptr) {
return errors::InvalidArgument("parallel_iterations attr missing");
}
if (parallel_iterations_attr->i() < 1) {
return errors::InvalidArgument("parallel_iterations must be > 0");
}
TF_RETURN_IF_ERROR(LowerWhileHelper::Run(
n, cond_attr->func(), body_attr->func(), parallel_iterations_attr->i(), g,
flib_def, keep_node_fetchable));
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerWhileOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_while_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "while") {
node_called_while_count++;
}
}
ASSERT_EQ(node_called_while_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int enter_count = 0;
int exit_count = 0;
int switch_count = 0;
int merge_count = 0;
int next_iteration_count = 0;
node_called_while_count = 0;
int less_than_or_equan_to_n_count = 0;
int x_times_two_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsEnter()) {
++enter_count;
ASSERT_EQ(op->attrs().Find("parallel_iterations")->i(), 100);
}
if (op->IsExit()) {
++exit_count;
}
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
if (op->IsNextIteration()) {
++next_iteration_count;
}
if (op->name() == "while") {
node_called_while_count++;
}
if (op->type_string() == "LessThanOrEqualToN") {
less_than_or_equan_to_n_count++;
}
if (op->type_string() == "XTimesTwo") {
x_times_two_count++;
}
if (op->name() == "C") {
ASSERT_EQ(op->in_edges().size(), 2);
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(enter_count, 1);
ASSERT_EQ(exit_count, 1);
ASSERT_EQ(switch_count, 1);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(next_iteration_count, 1);
ASSERT_EQ(node_called_while_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
static void DanglingNodeTestHelper(int expected_count) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() =
test::function::XTimesTwoWithDanglingFloorDivNode();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwoWithDanglingFloorDivNode");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int mul_count = 0;
int floor_div_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == "Mul") {
mul_count++;
}
if (op->type_string() == "FloorDiv") {
floor_div_count++;
}
}
ASSERT_EQ(mul_count, 1);
ASSERT_EQ(floor_div_count, expected_count);
}
TEST(LowerWhileOpTest, DanglingNode) { DanglingNodeTestHelper(1); }
TEST(LowerWhileOpTest, DanglingNodeWithPruning) {
flags::Global().enable_function_pruning_before_inlining.reset(true);
DanglingNodeTestHelper(0);
flags::Global().enable_function_pruning_before_inlining.reset(false);
}
TEST(LowerWhileOpTest, ForwardAssignedInputDevice) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
Node* placeholder;
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Finalize(graph.get(), &placeholder));
const string assigned_device_name = "/job:localhost/replica:0/task:0/gpu:0";
placeholder->set_assigned_device_name(assigned_device_name);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(placeholder)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(exit_consumers, 1);
}
TEST(LowerWhileOpTest, ForwardRequestedInputDevice) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
const string gpu_0_device = "/job:localhost/replica:0/task:0/gpu:0";
const string gpu_1_device = "/job:localhost/replica:0/task:0/gpu:1";
const string gpu_2_device = "/job:localhost/replica:0/task:0/gpu:2";
Node* gpu_0_ph;
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Device(gpu_0_device)
.Finalize(graph.get(), &gpu_0_ph));
Node* control_in;
TF_CHECK_OK(NodeBuilder("control_in", "Placeholder")
.Attr("dtype", type)
.Device(gpu_1_device)
.Finalize(graph.get(), &control_in));
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(gpu_0_ph)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.ControlInput(control_in)
.Device(gpu_2_device)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
Node* control_out;
TensorProto proto;
proto.set_dtype(DT_FLOAT);
TensorShape empty_shape({0});
empty_shape.AsProto(proto.mutable_tensor_shape());
TF_ASSERT_OK(NodeBuilder("control_out", "Const")
.ControlInput(while_node)
.Attr("dtype", DT_FLOAT)
.Attr("value", proto)
.Finalize(graph.get(), &control_out));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(exit_consumers, 1);
const Node* loop_control_inputs_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopControlInputs")) {
loop_control_inputs_node = op;
}
}
ASSERT_NE(loop_control_inputs_node, nullptr);
ASSERT_EQ(loop_control_inputs_node->requested_device(), gpu_2_device);
const Node* loop_executed_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopExecuted")) {
loop_executed_node = op;
}
}
ASSERT_NE(loop_executed_node, nullptr);
ASSERT_EQ(loop_executed_node->requested_device(), gpu_2_device);
}
TEST(LowerWhileOpTest, ForwardColocationKeyAttribute) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
const string gpu_0_device = "/job:localhost/replica:0/task:0/gpu:0";
const string gpu_1_device = "/job:localhost/replica:0/task:0/gpu:1";
const string gpu_2_device = "/job:localhost/replica:0/task:0/gpu:2";
Node* gpu_0_ph;
AttrValue gpu_0_colocation_attr;
gpu_0_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_0_device");
AttrValue gpu_1_colocation_attr;
gpu_1_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_1_device");
AttrValue gpu_2_colocation_attr;
gpu_2_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_2_device");
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Attr(kColocationAttrName, gpu_0_colocation_attr)
.Finalize(graph.get(), &gpu_0_ph));
Node* control_in;
TF_CHECK_OK(NodeBuilder("control_in", "Placeholder")
.Attr("dtype", type)
.Attr(kColocationAttrName, gpu_1_colocation_attr)
.Finalize(graph.get(), &control_in));
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(gpu_0_ph)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.ControlInput(control_in)
.Attr(kColocationAttrName, gpu_2_colocation_attr)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
Node* control_out;
TensorProto proto;
proto.set_dtype(DT_FLOAT);
TensorShape empty_shape({0});
empty_shape.AsProto(proto.mutable_tensor_shape());
TF_ASSERT_OK(NodeBuilder("control_out", "Const")
.ControlInput(while_node)
.Attr("dtype", DT_FLOAT)
.Attr("value", proto)
.Finalize(graph.get(), &control_out));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(exit_consumers, 1);
const Node* loop_control_inputs_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopControlInputs")) {
loop_control_inputs_node = op;
}
}
ASSERT_NE(loop_control_inputs_node, nullptr);
auto* coloc_attr =
loop_control_inputs_node->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_2_device");
const Node* loop_executed_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopExecuted")) {
loop_executed_node = op;
}
}
ASSERT_NE(loop_executed_node, nullptr);
coloc_attr = loop_executed_node->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_2_device");
}
TEST(LowerWhileOpTest, MultipleInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XPlusOneXTimesY();
*(f_lib_proto.add_function()) = test::function::XYXLessThanOrEqualToN(4);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(root.WithOpName("B"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(a.node()), NodeBuilder::NodeOut(b.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("XYXLessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XPlusOneXTimesY");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
}
TF_ASSERT_OK(Rewrite(&graph));
int enter_count = 0;
int exit_count = 0;
int switch_count = 0;
int merge_count = 0;
int next_iteration_count = 0;
int x_plus_one_x_times_y_count = 0;
int x_y_x_less_than_equal_to_n_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsEnter()) {
++enter_count;
}
if (op->IsExit()) {
++exit_count;
}
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
if (op->IsNextIteration()) {
++next_iteration_count;
}
if (op->type_string() == "XPlusOneXTimesY") {
x_plus_one_x_times_y_count++;
}
if (op->type_string() == "XYXLessThanOrEqualToN") {
x_y_x_less_than_equal_to_n_count++;
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(enter_count, 2);
ASSERT_EQ(exit_count, 2);
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(next_iteration_count, 2);
ASSERT_EQ(x_plus_one_x_times_y_count, 0);
ASSERT_EQ(x_y_x_less_than_equal_to_n_count, 0);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
feeds.emplace(Output(b.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(
feeds, {Output(while_node, 0), Output(while_node, 1)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 5);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 24);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
feeds.emplace(Output(b.node()), Input::Initializer(5));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(
feeds, {Output(while_node, 0), Output(while_node, 1)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 5);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 60);
}
}
TEST(LowerWhileOpTest, DoNotInlineLoweredFunctions) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef less_than_or_eq = test::function::LessThanOrEqualToN(8);
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*less_than_or_eq.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = x_times_two;
*f_lib_proto.add_function() = less_than_or_eq;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int less_than_or_eq_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == less_than_or_eq.signature().name()) {
less_than_or_eq_count++;
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(less_than_or_eq_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_while_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_while_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51ea8832-60b0-4030-bb24-35ccc1989653 | cpp | tensorflow/tensorflow | direct_session | tensorflow/core/common_runtime/direct_session.cc | tensorflow/core/common_runtime/direct_session_test.cc | #include "tensorflow/core/common_runtime/direct_session.h"
#include <algorithm>
#include <atomic>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/debugger_state_interface.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/common_runtime/local_session_selection.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/logging.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/run_handler.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/core/threadpool_options.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/nccl/collective_communicator.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/device_profiler_session.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
auto* direct_session_runs = monitoring::Counter<0>::New(
"/tensorflow/core/direct_session_runs",
"The number of times DirectSession::Run() has been called.");
Status NewThreadPoolFromThreadPoolOptions(
const SessionOptions& options,
const ThreadPoolOptionProto& thread_pool_options, int pool_number,
thread::ThreadPool** pool, bool* owned) {
int32_t num_threads = thread_pool_options.num_threads();
if (num_threads == 0) {
num_threads = NumInterOpThreadsFromSessionOptions(options);
}
const string& name = thread_pool_options.global_name();
if (name.empty()) {
VLOG(1) << "Direct session inter op parallelism threads for pool "
<< pool_number << ": " << num_threads;
*pool = new thread::ThreadPool(
options.env, ThreadOptions(), strings::StrCat("Compute", pool_number),
num_threads, !options.config.experimental().disable_thread_spinning(),
nullptr);
*owned = true;
return absl::OkStatus();
}
typedef std::pair<int32, thread::ThreadPool*> MapValue;
static std::map<string, MapValue>* global_pool_map =
new std::map<string, MapValue>;
static mutex* mu = new mutex();
mutex_lock l(*mu);
MapValue* mvalue = &(*global_pool_map)[name];
if (mvalue->second == nullptr) {
mvalue->first = thread_pool_options.num_threads();
mvalue->second = new thread::ThreadPool(
options.env, ThreadOptions(), strings::StrCat("Compute", pool_number),
num_threads, !options.config.experimental().disable_thread_spinning(),
nullptr);
} else {
if (mvalue->first != thread_pool_options.num_threads()) {
return errors::InvalidArgument(
"Pool ", name,
" configured previously with num_threads=", mvalue->first,
"; cannot re-configure with num_threads=",
thread_pool_options.num_threads());
}
}
*owned = false;
*pool = mvalue->second;
return absl::OkStatus();
}
thread::ThreadPool* GlobalThreadPool(const SessionOptions& options,
int32_t num_threads) {
static thread::ThreadPool* const thread_pool =
NewThreadPoolFromSessionOptions(options, num_threads);
return thread_pool;
}
string GetRendezvousKey(const string& tensor_name,
const DeviceAttributes& device_info,
const FrameAndIter& frame_iter) {
return strings::StrCat(device_info.name(), ";",
strings::FpToString(device_info.incarnation()), ";",
device_info.name(), ";", tensor_name, ";",
frame_iter.frame_id, ":", frame_iter.iter_id);
}
}
class DirectSessionFactory : public SessionFactory {
public:
DirectSessionFactory() {}
bool AcceptsOptions(const SessionOptions& options) override {
return options.target.empty() &&
!options.config.experimental().use_tfrt() &&
GetDefaultLocalSessionImpl() == LocalSessionImpl::kDirectSession;
}
Status NewSession(const SessionOptions& options,
Session** out_session) override {
const auto& experimental_config = options.config.experimental();
if (experimental_config.has_session_metadata()) {
if (experimental_config.session_metadata().version() < 0) {
return errors::InvalidArgument(
"Session version shouldn't be negative: ",
experimental_config.session_metadata().DebugString());
}
const string key = GetMetadataKey(experimental_config.session_metadata());
mutex_lock l(sessions_lock_);
if (!session_metadata_keys_.insert(key).second) {
return errors::InvalidArgument(
"A session with the same name and version has already been "
"created: ",
experimental_config.session_metadata().DebugString());
}
}
if (options.config.graph_options().build_cost_model() > 0) {
EnableCPUAllocatorFullStats();
}
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
DirectSession* session = new DirectSession(
options, new StaticDeviceMgr(std::move(devices)), this);
{
mutex_lock l(sessions_lock_);
sessions_.push_back(session);
}
*out_session = session;
return absl::OkStatus();
}
Status Reset(const SessionOptions& options,
const std::vector<string>& containers) override {
std::vector<DirectSession*> sessions_to_reset;
{
mutex_lock l(sessions_lock_);
std::swap(sessions_to_reset, sessions_);
}
Status s;
for (auto session : sessions_to_reset) {
s.Update(session->Reset(containers));
}
for (auto session : sessions_to_reset) {
s.Update(session->Close());
}
return s;
}
void Deregister(const DirectSession* session) {
mutex_lock l(sessions_lock_);
sessions_.erase(std::remove(sessions_.begin(), sessions_.end(), session),
sessions_.end());
if (session->options().config.experimental().has_session_metadata()) {
session_metadata_keys_.erase(GetMetadataKey(
session->options().config.experimental().session_metadata()));
}
}
private:
static string GetMetadataKey(const SessionMetadata& metadata) {
return absl::StrCat(metadata.name(), "/", metadata.version());
}
mutex sessions_lock_;
std::vector<DirectSession*> sessions_ TF_GUARDED_BY(sessions_lock_);
absl::flat_hash_set<string> session_metadata_keys_
TF_GUARDED_BY(sessions_lock_);
};
class DirectSessionRegistrar {
public:
DirectSessionRegistrar() {
SessionFactory::Register("DIRECT_SESSION", new DirectSessionFactory());
}
};
static DirectSessionRegistrar registrar;
std::atomic_int_fast64_t DirectSession::step_id_counter_(1);
static RunHandlerPool* GetOrCreateRunHandlerPool(
const SessionOptions& options) {
int num_inter_threads = 0;
int num_intra_threads = 0;
static const int env_num_inter_threads = NumInterOpThreadsFromEnvironment();
static const int env_num_intra_threads = NumIntraOpThreadsFromEnvironment();
if (env_num_inter_threads > 0) {
num_inter_threads = env_num_inter_threads;
}
if (env_num_intra_threads > 0) {
num_intra_threads = env_num_intra_threads;
}
if (num_inter_threads == 0) {
if (options.config.session_inter_op_thread_pool_size() > 0) {
num_inter_threads =
options.config.session_inter_op_thread_pool(0).num_threads();
}
if (num_inter_threads == 0) {
num_inter_threads = NumInterOpThreadsFromSessionOptions(options);
}
}
if (num_intra_threads == 0) {
num_intra_threads = options.config.intra_op_parallelism_threads();
if (num_intra_threads == 0) {
num_intra_threads = port::MaxParallelism();
}
}
static RunHandlerPool* pool = [&]() {
LOG(INFO) << "Creating run-handler pool with "
"[num_inter_threads, num_intra_threads] as ["
<< num_inter_threads << "," << num_intra_threads << "]";
return new RunHandlerPool(num_inter_threads, num_intra_threads);
}();
return pool;
}
bool DirectSession::ShouldUseRunHandlerPool(
const RunOptions& run_options) const {
if (options_.config.use_per_session_threads()) return false;
if (options_.config.session_inter_op_thread_pool_size() > 0 &&
run_options.inter_op_thread_pool() > 0)
return false;
return true;
}
DirectSession::DirectSession(const SessionOptions& options,
const DeviceMgr* device_mgr,
DirectSessionFactory* const factory)
: options_(options),
device_mgr_(device_mgr),
factory_(factory),
cancellation_manager_(new CancellationManager()),
operation_timeout_in_ms_(options_.config.operation_timeout_in_ms()) {
const int thread_pool_size =
options_.config.session_inter_op_thread_pool_size();
if (thread_pool_size > 0) {
for (int i = 0; i < thread_pool_size; ++i) {
thread::ThreadPool* pool = nullptr;
bool owned = false;
init_error_.Update(NewThreadPoolFromThreadPoolOptions(
options_, options_.config.session_inter_op_thread_pool(i), i, &pool,
&owned));
thread_pools_.emplace_back(pool, owned);
}
} else if (options_.config.use_per_session_threads()) {
thread_pools_.emplace_back(NewThreadPoolFromSessionOptions(options_),
true );
} else {
static const int env_num_threads = NumInterOpThreadsFromEnvironment();
if (options_.config.inter_op_parallelism_threads() < 0 ||
(options_.config.inter_op_parallelism_threads() == 0 &&
env_num_threads < 0)) {
run_in_caller_thread_ = true;
}
thread_pools_.emplace_back(
GlobalThreadPool(options, run_in_caller_thread_ ? 1 : 0),
false );
}
const Status status =
ReadBoolFromEnvVar("TF_SYNC_ON_FINISH", true, &sync_on_finish_);
if (!status.ok()) {
LOG(ERROR) << status.message();
}
session_handle_ =
strings::StrCat("direct", strings::FpToString(random::New64()));
if (options.config.log_device_placement()) {
const string mapping_str = device_mgr_->DeviceMappingString();
string msg;
if (mapping_str.empty()) {
msg = "Device mapping: no known devices.";
} else {
msg = strings::StrCat("Device mapping:\n", mapping_str);
}
if (!logging::LogToListeners(msg)) {
LOG(INFO) << msg;
}
}
device_set_.set_client_device(device_mgr_->HostCPU());
for (auto d : device_mgr_->ListDevices()) {
devices_.push_back(d);
device_set_.AddDevice(d);
d->op_segment()->AddHold(session_handle_);
}
}
DirectSession::~DirectSession() {
if (!closed_) Close().IgnoreError();
for (auto& it : partial_runs_) {
it.second.reset(nullptr);
}
for (auto& it : executors_) {
it.second.reset();
}
callables_.clear();
for (auto d : device_mgr_->ListDevices()) {
d->op_segment()->RemoveHold(session_handle_);
}
functions_.clear();
delete cancellation_manager_;
for (const auto& p_and_owned : thread_pools_) {
if (p_and_owned.second) delete p_and_owned.first;
}
execution_state_.reset(nullptr);
flib_def_.reset(nullptr);
}
Status DirectSession::Create(const GraphDef& graph) {
return Create(GraphDef(graph));
}
Status DirectSession::Create(GraphDef&& graph) {
TF_RETURN_IF_ERROR(init_error_);
if (graph.node_size() > 0) {
mutex_lock l(graph_state_lock_);
if (graph_created_) {
return errors::AlreadyExists(
"A Graph has already been created for this session.");
}
return ExtendLocked(std::move(graph));
}
return absl::OkStatus();
}
Status DirectSession::Extend(const GraphDef& graph) {
return Extend(GraphDef(graph));
}
Status DirectSession::Extend(GraphDef&& graph) {
TF_RETURN_IF_ERROR(CheckNotClosed());
mutex_lock l(graph_state_lock_);
return ExtendLocked(std::move(graph));
}
Status DirectSession::ExtendLocked(GraphDef&& graph) {
if (finalized_) {
return errors::FailedPrecondition("Session has been finalized.");
}
if (!(flib_def_ && execution_state_)) {
GraphExecutionStateOptions options;
options.device_set = &device_set_;
options.session_options = &options_;
options.session_handle = session_handle_;
TF_RETURN_IF_ERROR(GraphExecutionState::MakeForBaseGraph(
std::move(graph), options, &execution_state_));
flib_def_.reset(
new FunctionLibraryDefinition(execution_state_->flib_def()));
graph_created_ = true;
} else {
std::unique_ptr<GraphExecutionState> state;
TF_RETURN_IF_ERROR(execution_state_->Extend(graph, &state));
execution_state_.swap(state);
TF_RETURN_IF_ERROR(flib_def_->AddLibrary(graph.library()));
}
return absl::OkStatus();
}
Status DirectSession::Run(const NamedTensorList& inputs,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
std::vector<Tensor>* outputs) {
RunMetadata run_metadata;
return Run(RunOptions(), inputs, output_names, target_nodes, outputs,
&run_metadata);
}
Status DirectSession::CreateDebuggerState(
const CallableOptions& callable_options, int64_t global_step,
int64_t session_run_index, int64_t executor_step_index,
std::unique_ptr<DebuggerStateInterface>* debugger_state) {
TF_RETURN_IF_ERROR(DebuggerStateRegistry::CreateState(
callable_options.run_options().debug_options(), debugger_state));
std::vector<string> input_names(callable_options.feed().begin(),
callable_options.feed().end());
std::vector<string> output_names(callable_options.fetch().begin(),
callable_options.fetch().end());
std::vector<string> target_names(callable_options.target().begin(),
callable_options.target().end());
TF_RETURN_IF_ERROR(debugger_state->get()->PublishDebugMetadata(
global_step, session_run_index, executor_step_index, input_names,
output_names, target_names));
return absl::OkStatus();
}
Status DirectSession::DecorateAndPublishGraphForDebug(
const DebugOptions& debug_options, Graph* graph, Device* device) {
std::unique_ptr<DebugGraphDecoratorInterface> decorator;
TF_RETURN_IF_ERROR(
DebugGraphDecoratorRegistry::CreateDecorator(debug_options, &decorator));
TF_RETURN_IF_ERROR(decorator->DecorateGraph(graph, device));
TF_RETURN_IF_ERROR(decorator->PublishGraph(*graph, device->name()));
return absl::OkStatus();
}
Status DirectSession::RunInternal(
int64_t step_id, const RunOptions& run_options,
CallFrameInterface* call_frame, ExecutorsAndKeys* executors_and_keys,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) {
const uint64 start_time_usecs = options_.env->NowMicros();
const int64_t executor_step_count =
executors_and_keys->step_count.fetch_add(1);
RunState run_state(step_id, &devices_);
const size_t num_executors = executors_and_keys->items.size();
tsl::profiler::TraceMeProducer activity(
[&] {
if (options_.config.experimental().has_session_metadata()) {
const auto& model_metadata =
options_.config.experimental().session_metadata();
string model_id = strings::StrCat(model_metadata.name(), ":",
model_metadata.version());
return tsl::profiler::TraceMeEncode("SessionRun",
{{"id", step_id},
{"_r", 1} ,
{"model_id", model_id}});
} else {
return tsl::profiler::TraceMeEncode(
"SessionRun", {{"id", step_id}, {"_r", 1} });
}
},
tsl::profiler::ContextType::kTfExecutor, step_id,
tsl::profiler::TraceMeLevel::kInfo);
std::unique_ptr<DebuggerStateInterface> debugger_state;
if (!run_options.debug_options().debug_tensor_watch_opts().empty()) {
TF_RETURN_IF_ERROR(
CreateDebuggerState(executors_and_keys->callable_options,
run_options.debug_options().global_step(), step_id,
executor_step_count, &debugger_state));
}
if (run_metadata != nullptr &&
options_.config.experimental().has_session_metadata()) {
*run_metadata->mutable_session_metadata() =
options_.config.experimental().session_metadata();
}
#ifndef __ANDROID__
if (executors_and_keys->collective_graph_key !=
BuildGraphOptions::kNoCollectiveGraphKey) {
if (run_options.experimental().collective_graph_key() !=
BuildGraphOptions::kNoCollectiveGraphKey) {
if (run_options.experimental().collective_graph_key() !=
executors_and_keys->collective_graph_key) {
return errors::Internal(
"collective_graph_key in RunOptions ",
run_options.experimental().collective_graph_key(),
" should match collective_graph_key from optimized graph ",
executors_and_keys->collective_graph_key);
}
}
if (!collective_executor_mgr_) {
collective_executor_mgr_ = CreateProdLocalCollectiveExecutorMgr(
options_.config, device_mgr_.get(),
MaybeCreateNcclCommunicator(options_.config));
}
run_state.collective_executor.reset(new CollectiveExecutor::Handle(
collective_executor_mgr_->FindOrCreate(step_id), true ));
}
#endif
thread::ThreadPool* pool;
std::unique_ptr<thread::ThreadPool> threadpool_wrapper;
const bool inline_execution_requested =
run_in_caller_thread_ || run_options.inter_op_thread_pool() == -1;
if (inline_execution_requested) {
if (executors_and_keys->items.size() > 1) {
pool = thread_pools_[0].first;
} else {
VLOG(1) << "Executing Session::Run() synchronously!";
pool = nullptr;
}
} else if (threadpool_options.inter_op_threadpool != nullptr) {
threadpool_wrapper = std::make_unique<thread::ThreadPool>(
threadpool_options.inter_op_threadpool);
pool = threadpool_wrapper.get();
} else {
if (run_options.inter_op_thread_pool() < -1 ||
run_options.inter_op_thread_pool() >=
static_cast<int32>(thread_pools_.size())) {
return errors::InvalidArgument("Invalid inter_op_thread_pool: ",
run_options.inter_op_thread_pool());
}
pool = thread_pools_[run_options.inter_op_thread_pool()].first;
}
const int64_t call_timeout = run_options.timeout_in_ms() > 0
? run_options.timeout_in_ms()
: operation_timeout_in_ms_;
absl::optional<absl::Time> deadline;
if (call_timeout > 0) {
deadline = absl::Now() + absl::Milliseconds(call_timeout);
}
std::unique_ptr<RunHandler> handler;
if (ShouldUseRunHandlerPool(run_options) &&
run_options.experimental().use_run_handler_pool()) {
VLOG(1) << "Using RunHandler to scheduler inter-op closures.";
handler = GetOrCreateRunHandlerPool(options_)->Get(
step_id, call_timeout,
run_options.experimental().run_handler_pool_options());
if (!handler) {
return errors::DeadlineExceeded(
"Could not obtain RunHandler for request after waiting for ",
call_timeout, "ms.");
}
}
auto* handler_ptr = handler.get();
Executor::Args::Runner default_runner = nullptr;
if (pool == nullptr) {
default_runner = [](const Executor::Args::Closure& c) { c(); };
} else if (handler_ptr != nullptr) {
default_runner = [handler_ptr](Executor::Args::Closure c) {
handler_ptr->ScheduleInterOpClosure(std::move(c));
};
} else {
default_runner = [pool](Executor::Args::Closure c) {
pool->Schedule(std::move(c));
};
}
const bool can_execute_synchronously =
executors_and_keys->items.size() == 1 && call_timeout == 0;
Executor::Args args;
args.step_id = step_id;
args.call_frame = call_frame;
args.collective_executor =
(run_state.collective_executor ? run_state.collective_executor->get()
: nullptr);
args.session_config = &options_.config;
args.session_state = &session_state_;
args.session_handle = session_handle_;
args.tensor_store = &run_state.tensor_store;
args.step_container = &run_state.step_container;
args.sync_on_finish = sync_on_finish_;
args.user_intra_op_threadpool = threadpool_options.intra_op_threadpool;
args.run_all_kernels_inline = pool == nullptr;
args.start_time_usecs = start_time_usecs;
args.deadline = deadline;
const bool do_trace = (run_options.trace_level() > RunOptions::NO_TRACE);
bool update_cost_model = false;
if (options_.config.graph_options().build_cost_model() > 0) {
const int64_t build_cost_model_every =
options_.config.graph_options().build_cost_model();
const int64_t build_cost_model_after =
options_.config.graph_options().build_cost_model_after();
int64_t measure_step_count = executor_step_count - build_cost_model_after;
if (measure_step_count >= 0) {
update_cost_model =
((measure_step_count + 1) % build_cost_model_every == 0);
}
}
if (run_metadata != nullptr &&
(do_trace || update_cost_model ||
run_options.report_tensor_allocations_upon_oom())) {
run_state.collector.reset(
new StepStatsCollector(run_metadata->mutable_step_stats()));
args.stats_collector = run_state.collector.get();
}
std::unique_ptr<DeviceProfilerSession> device_profiler_session;
if (run_options.trace_level() >= RunOptions::HARDWARE_TRACE) {
device_profiler_session = DeviceProfilerSession::Create();
}
CancellationManager step_cancellation_manager(cancellation_manager_);
if (step_cancellation_manager.IsCancelled()) {
return errors::Cancelled("Run call was cancelled");
}
args.cancellation_manager = &step_cancellation_manager;
Status run_status;
auto set_threadpool_args_for_item =
[&default_runner, &handler](const PerPartitionExecutorsAndLib& item,
Executor::Args* args) {
thread::ThreadPool* device_thread_pool =
item.device->tensorflow_device_thread_pool();
if (!device_thread_pool) {
args->runner = default_runner;
} else {
args->runner = [device_thread_pool](Executor::Args::Closure c) {
device_thread_pool->Schedule(std::move(c));
};
}
if (handler != nullptr) {
args->user_intra_op_threadpool =
handler->AsIntraThreadPoolInterface();
}
};
if (can_execute_synchronously) {
PrivateIntraProcessRendezvous rendezvous(device_mgr_.get());
args.rendezvous = &rendezvous;
const auto& item = executors_and_keys->items[0];
set_threadpool_args_for_item(item, &args);
run_status = item.executor->Run(args);
} else {
core::RefCountPtr<RefCountedIntraProcessRendezvous> rendezvous(
new RefCountedIntraProcessRendezvous(device_mgr_.get()));
args.rendezvous = rendezvous.get();
Notification executors_done;
ExecutorBarrier* barrier =
new ExecutorBarrier(num_executors, rendezvous.get(),
[&run_state, &executors_done](const Status& ret) {
{
mutex_lock l(run_state.mu);
run_state.status.Update(ret);
}
executors_done.Notify();
});
for (const auto& item : executors_and_keys->items) {
set_threadpool_args_for_item(item, &args);
item.executor->RunAsync(args, barrier->Get());
}
WaitForNotification(&executors_done, &run_state, &step_cancellation_manager,
call_timeout);
{
tf_shared_lock l(run_state.mu);
run_status = run_state.status;
}
}
if (step_cancellation_manager.IsCancelled()) {
run_status.Update(errors::Cancelled("Run call was cancelled"));
}
if (run_metadata != nullptr && device_profiler_session) {
TF_RETURN_IF_ERROR(device_profiler_session->CollectData(
run_metadata->mutable_step_stats()));
}
TF_RETURN_IF_ERROR(run_status);
if (!run_state.tensor_store.empty()) {
TF_RETURN_IF_ERROR(run_state.tensor_store.SaveTensors(
{executors_and_keys->callable_options.fetch().begin(),
executors_and_keys->callable_options.fetch().end()},
&session_state_));
}
if (run_state.collector) {
run_state.collector->Finalize();
}
if (update_cost_model) {
std::unordered_map<string, const Graph*> device_to_graph;
for (const PerPartitionExecutorsAndLib& partition :
executors_and_keys->items) {
const Graph* graph = partition.graph.get();
const string& device = partition.flib->device()->name();
device_to_graph[device] = graph;
}
mutex_lock l(executor_lock_);
run_state.collector->BuildCostModel(&cost_model_manager_, device_to_graph);
if (run_metadata != nullptr) {
CostGraphDef* cost_graph = run_metadata->mutable_cost_graph();
for (const auto& item : executors_and_keys->items) {
TF_RETURN_IF_ERROR(cost_model_manager_.AddToCostGraphDef(
item.graph.get(), cost_graph));
}
}
}
if (run_options.output_partition_graphs()) {
if (options_.config.experimental().disable_output_partition_graphs()) {
return errors::InvalidArgument(
"RunOptions.output_partition_graphs() is not supported when "
"disable_output_partition_graphs is true.");
} else if (run_metadata != nullptr) {
protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs =
run_metadata->mutable_partition_graphs();
for (const PerPartitionExecutorsAndLib& exec_and_lib :
executors_and_keys->items) {
GraphDef* partition_graph_def = partition_graph_defs->Add();
exec_and_lib.graph->ToGraphDef(partition_graph_def);
}
}
}
metrics::UpdateGraphExecTime(options_.env->NowMicros() - start_time_usecs);
return absl::OkStatus();
}
Status DirectSession::Run(const RunOptions& run_options,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) {
return Run(run_options, inputs, output_names, target_nodes, outputs,
run_metadata, thread::ThreadPoolOptions());
}
Status DirectSession::Run(const RunOptions& run_options,
const NamedTensorList& inputs,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) {
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("Run()"));
direct_session_runs->GetCell()->IncrementBy(1);
std::vector<string> input_tensor_names;
input_tensor_names.reserve(inputs.size());
size_t input_size = 0;
for (const auto& it : inputs) {
input_tensor_names.push_back(it.first);
input_size += it.second.AllocatedBytes();
}
metrics::RecordGraphInputTensors(input_size);
ExecutorsAndKeys* executors_and_keys;
RunStateArgs run_state_args(run_options.debug_options());
run_state_args.collective_graph_key =
run_options.experimental().collective_graph_key();
TF_RETURN_IF_ERROR(GetOrCreateExecutors(input_tensor_names, output_names,
target_nodes, &executors_and_keys,
&run_state_args));
{
mutex_lock l(collective_graph_key_lock_);
collective_graph_key_ = executors_and_keys->collective_graph_key;
}
FunctionCallFrame call_frame(executors_and_keys->input_types,
executors_and_keys->output_types);
gtl::InlinedVector<Tensor, 4> feed_args(inputs.size());
for (const auto& it : inputs) {
if (it.second.dtype() == DT_RESOURCE) {
Tensor tensor_from_handle;
TF_RETURN_IF_ERROR(
ResourceHandleToInputTensor(it.second, &tensor_from_handle));
feed_args[executors_and_keys->input_name_to_index[it.first]] =
tensor_from_handle;
} else {
feed_args[executors_and_keys->input_name_to_index[it.first]] = it.second;
}
}
const Status s = call_frame.SetArgs(feed_args);
if (errors::IsInternal(s)) {
return errors::InvalidArgument(s.message());
} else if (!s.ok()) {
return s;
}
const int64_t step_id = step_id_counter_.fetch_add(1);
if (LogMemory::IsEnabled()) {
LogMemory::RecordStep(step_id, run_state_args.handle);
}
TF_RETURN_IF_ERROR(RunInternal(step_id, run_options, &call_frame,
executors_and_keys, run_metadata,
threadpool_options));
if (outputs) {
std::vector<Tensor> sorted_outputs;
const Status s = call_frame.ConsumeRetvals(
&sorted_outputs, false);
if (errors::IsInternal(s)) {
return errors::InvalidArgument(s.message());
} else if (!s.ok()) {
return s;
}
const bool unique_outputs =
output_names.size() == executors_and_keys->output_name_to_index.size();
std::vector<int> first_indices;
if (!unique_outputs) {
first_indices.reserve(output_names.size());
for (const auto& name : output_names) {
first_indices.push_back(
std::find(output_names.begin(), output_names.end(), name) -
output_names.begin());
}
}
outputs->clear();
size_t output_size = 0;
outputs->reserve(sorted_outputs.size());
for (int i = 0; i < output_names.size(); ++i) {
const string& output_name = output_names[i];
if (first_indices.empty() || first_indices[i] == i) {
outputs->emplace_back(
std::move(sorted_outputs[executors_and_keys
->output_name_to_index[output_name]]));
} else {
outputs->push_back((*outputs)[first_indices[i]]);
}
output_size += outputs->back().AllocatedBytes();
}
metrics::RecordGraphOutputTensors(output_size);
}
return absl::OkStatus();
}
Status DirectSession::PRunSetup(const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
string* handle) {
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("PRunSetup()"));
thread::ThreadPool* pool = thread_pools_[0].first;
ExecutorsAndKeys* executors_and_keys;
DebugOptions debug_options;
RunStateArgs run_state_args(debug_options);
run_state_args.is_partial_run = true;
TF_RETURN_IF_ERROR(GetOrCreateExecutors(input_names, output_names,
target_nodes, &executors_and_keys,
&run_state_args));
Executor::Args args;
args.step_id = step_id_counter_.fetch_add(1);
PartialRunState* run_state =
new PartialRunState(input_names, output_names, args.step_id, &devices_);
run_state->rendez.reset(new IntraProcessRendezvous(device_mgr_.get()));
{
mutex_lock l(executor_lock_);
if (!partial_runs_
.emplace(run_state_args.handle,
std::unique_ptr<PartialRunState>(run_state))
.second) {
return errors::Internal("The handle '", run_state_args.handle,
"' created for this partial run is not unique.");
}
}
const size_t num_executors = executors_and_keys->items.size();
ExecutorBarrier* barrier = new ExecutorBarrier(
num_executors, run_state->rendez.get(), [run_state](const Status& ret) {
if (!ret.ok()) {
mutex_lock l(run_state->mu);
run_state->status.Update(ret);
}
run_state->executors_done.Notify();
});
args.rendezvous = run_state->rendez.get();
args.cancellation_manager = cancellation_manager_;
args.collective_executor = nullptr;
args.session_config = &options_.config;
args.runner = [this, pool](Executor::Args::Closure c) {
pool->Schedule(std::move(c));
};
args.session_state = &session_state_;
args.session_handle = session_handle_;
args.tensor_store = &run_state->tensor_store;
args.step_container = &run_state->step_container;
if (LogMemory::IsEnabled()) {
LogMemory::RecordStep(args.step_id, run_state_args.handle);
}
args.sync_on_finish = sync_on_finish_;
if (options_.config.graph_options().build_cost_model()) {
run_state->collector.reset(new StepStatsCollector(nullptr));
args.stats_collector = run_state->collector.get();
}
for (auto& item : executors_and_keys->items) {
item.executor->RunAsync(args, barrier->Get());
}
*handle = run_state_args.handle;
return absl::OkStatus();
}
Status DirectSession::PRun(const string& handle, const NamedTensorList& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
TF_RETURN_IF_ERROR(CheckNotClosed());
std::vector<string> parts = str_util::Split(handle, ';');
const string& key = parts[0];
ExecutorsAndKeys* executors_and_keys;
PartialRunState* run_state;
{
mutex_lock l(executor_lock_);
auto exc_it = executors_.find(key);
if (exc_it == executors_.end()) {
return errors::InvalidArgument(
"Must run 'setup' before performing partial runs!");
}
executors_and_keys = exc_it->second.get();
auto prun_it = partial_runs_.find(handle);
if (prun_it == partial_runs_.end()) {
return errors::InvalidArgument(
"Must run 'setup' before performing partial runs!");
}
run_state = prun_it->second.get();
for (const auto& input : inputs) {
auto it = run_state->pending_inputs.find(input.first);
if (it == run_state->pending_inputs.end()) {
return errors::InvalidArgument(
"The feed ", input.first,
" was not specified in partial_run_setup.");
} else if (it->second) {
return errors::InvalidArgument("The feed ", input.first,
" has already been fed.");
}
}
for (const auto& output : output_names) {
auto it = run_state->pending_outputs.find(output);
if (it == run_state->pending_outputs.end()) {
return errors::InvalidArgument(
"The fetch ", output, " was not specified in partial_run_setup.");
} else if (it->second) {
return errors::InvalidArgument("The fetch ", output,
" has already been fetched.");
}
}
}
TF_RETURN_IF_ERROR(
CheckFetch(inputs, output_names, executors_and_keys, run_state));
Status s =
SendPRunInputs(inputs, executors_and_keys, run_state->rendez.get());
if (s.ok()) {
s = RecvPRunOutputs(output_names, executors_and_keys, run_state, outputs);
}
if (s.ok()) {
s = run_state->tensor_store.SaveTensors(output_names, &session_state_);
}
{
mutex_lock l(executor_lock_);
bool done = true;
if (s.ok()) {
{
mutex_lock l(run_state->mu);
if (!run_state->status.ok()) {
LOG(WARNING) << "An error unrelated to this prun has been detected. "
<< run_state->status;
}
}
for (const auto& input : inputs) {
auto it = run_state->pending_inputs.find(input.first);
it->second = true;
}
for (const auto& name : output_names) {
auto it = run_state->pending_outputs.find(name);
it->second = true;
}
done = run_state->PendingDone();
}
if (done) {
WaitForNotification(&run_state->executors_done, run_state,
cancellation_manager_, operation_timeout_in_ms_);
partial_runs_.erase(handle);
}
}
return s;
}
Status DirectSession::ResourceHandleToInputTensor(const Tensor& resource_tensor,
Tensor* retrieved_tensor) {
if (resource_tensor.dtype() != DT_RESOURCE) {
return errors::InvalidArgument(strings::StrCat(
"ResourceHandleToInputTensor() received non-DT_RESOURCE Tensor: ",
resource_tensor.dtype()));
}
const ResourceHandle& resource_handle =
resource_tensor.scalar<ResourceHandle>()();
if (resource_handle.container() ==
SessionState::kTensorHandleResourceTypeName) {
return session_state_.GetTensor(resource_handle.name(), retrieved_tensor);
} else {
return errors::InvalidArgument(strings::StrCat(
"Invalid resource type hash code: ", resource_handle.hash_code(),
"(name: ", resource_handle.name(),
" type: ", resource_handle.maybe_type_name(),
"). Perhaps a resource tensor was being provided as a feed? That is "
"not currently allowed. Please file an issue at "
"https:
"short code snippet that leads to this error message."));
}
}
Status DirectSession::SendPRunInputs(const NamedTensorList& inputs,
const ExecutorsAndKeys* executors_and_keys,
IntraProcessRendezvous* rendez) {
Status s;
Rendezvous::ParsedKey parsed;
for (const auto& input : inputs) {
auto it =
executors_and_keys->input_name_to_rendezvous_key.find(input.first);
if (it == executors_and_keys->input_name_to_rendezvous_key.end()) {
return errors::Internal("'", input.first, "' is not a pre-defined feed.");
}
const string& input_key = it->second;
s = Rendezvous::ParseKey(input_key, &parsed);
if (!s.ok()) {
rendez->StartAbort(s);
return s;
}
if (input.second.dtype() == DT_RESOURCE) {
Tensor tensor_from_handle;
s = ResourceHandleToInputTensor(input.second, &tensor_from_handle);
if (s.ok()) {
s = rendez->Send(parsed, Rendezvous::Args(), tensor_from_handle, false);
}
} else {
s = rendez->Send(parsed, Rendezvous::Args(), input.second, false);
}
if (!s.ok()) {
rendez->StartAbort(s);
return s;
}
}
return absl::OkStatus();
}
Status DirectSession::RecvPRunOutputs(
const std::vector<string>& output_names,
const ExecutorsAndKeys* executors_and_keys, PartialRunState* run_state,
std::vector<Tensor>* outputs) {
Status s;
if (!output_names.empty()) {
outputs->resize(output_names.size());
}
Rendezvous::ParsedKey parsed;
for (size_t output_offset = 0; output_offset < output_names.size();
++output_offset) {
const string& output_name = output_names[output_offset];
auto it =
executors_and_keys->output_name_to_rendezvous_key.find(output_name);
if (it == executors_and_keys->output_name_to_rendezvous_key.end()) {
return errors::Internal("'", output_name,
"' is not a pre-defined fetch.");
}
const string& output_key = it->second;
Tensor output_tensor;
bool is_dead;
s = Rendezvous::ParseKey(output_key, &parsed);
if (s.ok()) {
s = run_state->rendez->Recv(parsed, Rendezvous::Args(), &output_tensor,
&is_dead, operation_timeout_in_ms_);
if (is_dead && s.ok()) {
s = errors::InvalidArgument("The tensor returned for ", output_name,
" was not valid.");
}
}
if (!s.ok()) {
run_state->rendez->StartAbort(s);
outputs->clear();
return s;
}
(*outputs)[output_offset] = output_tensor;
}
return absl::OkStatus();
}
Status DirectSession::CheckFetch(const NamedTensorList& feeds,
const std::vector<string>& fetches,
const ExecutorsAndKeys* executors_and_keys,
const PartialRunState* run_state) {
const Graph* graph = executors_and_keys->graph.get();
const NameNodeMap* name_to_node = &executors_and_keys->name_to_node;
std::unordered_set<TensorId, TensorId::Hasher> pending_feeds;
{
mutex_lock l(executor_lock_);
for (const auto& input : run_state->pending_inputs) {
if (input.second) continue;
TensorId id(ParseTensorName(input.first));
auto it = name_to_node->find(id.first);
if (it == name_to_node->end()) {
return errors::NotFound("Feed ", input.first, ": not found");
}
pending_feeds.insert(id);
}
}
for (const auto& it : feeds) {
TensorId id(ParseTensorName(it.first));
pending_feeds.erase(id);
}
std::vector<const Node*> stack;
for (const string& fetch : fetches) {
TensorId id(ParseTensorName(fetch));
auto it = name_to_node->find(id.first);
if (it == name_to_node->end()) {
return errors::NotFound("Fetch ", fetch, ": not found");
}
stack.push_back(it->second);
}
std::vector<bool> visited(graph->num_node_ids(), false);
while (!stack.empty()) {
const Node* n = stack.back();
stack.pop_back();
for (const Edge* in_edge : n->in_edges()) {
const Node* in_node = in_edge->src();
if (pending_feeds.count({in_node->name(), in_edge->src_output()}) > 0) {
return errors::InvalidArgument("Fetch ", in_node->name(), ":",
in_edge->src_output(),
" can't be computed from the feeds"
" that have been fed so far.");
}
if (!visited[in_node->id()]) {
visited[in_node->id()] = true;
stack.push_back(in_node);
}
}
}
return absl::OkStatus();
}
Status DirectSession::CreateExecutors(
const CallableOptions& callable_options,
std::unique_ptr<ExecutorsAndKeys>* out_executors_and_keys,
std::unique_ptr<FunctionInfo>* out_func_info,
RunStateArgs* run_state_args) {
BuildGraphOptions options;
options.callable_options = callable_options;
options.use_function_convention = !run_state_args->is_partial_run;
options.collective_graph_key =
callable_options.run_options().experimental().collective_graph_key();
if (options_.config.experimental()
.collective_deterministic_sequential_execution()) {
options.collective_order = GraphCollectiveOrder::kEdges;
} else if (options_.config.experimental().collective_nccl()) {
options.collective_order = GraphCollectiveOrder::kAttrs;
}
std::unique_ptr<FunctionInfo> func_info(new FunctionInfo);
std::unique_ptr<ExecutorsAndKeys> ek(new ExecutorsAndKeys);
ek->callable_options = callable_options;
std::unordered_map<string, std::unique_ptr<Graph>> graphs;
TF_RETURN_IF_ERROR(CreateGraphs(
options, &graphs, &func_info->flib_def, run_state_args, &ek->input_types,
&ek->output_types, &ek->collective_graph_key));
if (run_state_args->is_partial_run) {
ek->graph = std::move(run_state_args->graph);
std::unordered_set<StringPiece, StringPieceHasher> names;
for (const string& input : callable_options.feed()) {
TensorId id(ParseTensorName(input));
names.emplace(id.first);
}
for (const string& output : callable_options.fetch()) {
TensorId id(ParseTensorName(output));
names.emplace(id.first);
}
for (Node* n : ek->graph->nodes()) {
if (names.count(n->name()) > 0) {
ek->name_to_node.insert({n->name(), n});
}
}
}
ek->items.reserve(graphs.size());
const auto& optimizer_opts =
options_.config.graph_options().optimizer_options();
int graph_def_version = graphs.begin()->second->versions().producer();
const auto* session_metadata =
options_.config.experimental().has_session_metadata()
? &options_.config.experimental().session_metadata()
: nullptr;
func_info->proc_flr.reset(new ProcessFunctionLibraryRuntime(
device_mgr_.get(), options_.env, &options_.config, graph_def_version,
func_info->flib_def.get(), optimizer_opts, thread_pools_[0].first,
nullptr, session_metadata,
Rendezvous::Factory{[](const int64_t, const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}));
GraphOptimizer optimizer(optimizer_opts);
for (auto iter = graphs.begin(); iter != graphs.end(); ++iter) {
const string& partition_name = iter->first;
std::unique_ptr<Graph>& partition_graph = iter->second;
Device* device;
TF_RETURN_IF_ERROR(device_mgr_->LookupDevice(partition_name, &device));
ek->items.resize(ek->items.size() + 1);
auto* item = &(ek->items.back());
auto lib = func_info->proc_flr->GetFLR(partition_name);
if (lib == nullptr) {
return errors::Internal("Could not find device: ", partition_name);
}
item->flib = lib;
LocalExecutorParams params;
params.device = device;
params.session_metadata = session_metadata;
params.function_library = lib;
auto opseg = device->op_segment();
params.create_kernel =
[this, lib, opseg](const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
if (!OpSegment::ShouldOwnKernel(lib, props->node_def.op())) {
return lib->CreateKernel(props, kernel);
}
auto create_fn = [lib, &props](OpKernel** kernel) {
return lib->CreateKernel(props, kernel);
};
return opseg->FindOrCreate(session_handle_, props->node_def.name(),
kernel, create_fn);
};
params.delete_kernel = [lib](OpKernel* kernel) {
if (kernel && !OpSegment::ShouldOwnKernel(lib, kernel->type_string()))
delete kernel;
};
optimizer.Optimize(lib, options_.env, device, &partition_graph,
GraphOptimizer::Options());
const DebugOptions& debug_options =
options.callable_options.run_options().debug_options();
if (!debug_options.debug_tensor_watch_opts().empty()) {
TF_RETURN_IF_ERROR(DecorateAndPublishGraphForDebug(
debug_options, partition_graph.get(), params.device));
}
TF_RETURN_IF_ERROR(EnsureMemoryTypes(DeviceType(device->device_type()),
device->name(),
partition_graph.get()));
item->executor = nullptr;
item->device = device;
auto executor_type = options_.config.experimental().executor_type();
TF_RETURN_IF_ERROR(
NewExecutor(executor_type, params, *partition_graph, &item->executor));
if (!options_.config.experimental().disable_output_partition_graphs() ||
options_.config.graph_options().build_cost_model() > 0) {
item->graph = std::move(partition_graph);
}
}
if (!run_state_args->is_partial_run) {
for (int i = 0; i < callable_options.feed().size(); ++i) {
const string& input = callable_options.feed(i);
ek->input_name_to_index[input] = i;
}
for (int i = 0; i < callable_options.fetch().size(); ++i) {
const string& output = callable_options.fetch(i);
ek->output_name_to_index[output] = i;
}
} else {
for (int i = 0; i < callable_options.feed().size(); ++i) {
const string& input = callable_options.feed(i);
ek->input_name_to_rendezvous_key[input] = GetRendezvousKey(
input, device_set_.client_device()->attributes(), FrameAndIter(0, 0));
}
for (int i = 0; i < callable_options.fetch().size(); ++i) {
const string& output = callable_options.fetch(i);
ek->output_name_to_rendezvous_key[output] =
GetRendezvousKey(output, device_set_.client_device()->attributes(),
FrameAndIter(0, 0));
}
}
*out_executors_and_keys = std::move(ek);
*out_func_info = std::move(func_info);
return absl::OkStatus();
}
Status DirectSession::GetOrCreateExecutors(
absl::Span<const string> inputs, absl::Span<const string> outputs,
absl::Span<const string> target_nodes,
ExecutorsAndKeys** executors_and_keys, RunStateArgs* run_state_args) {
int64_t handle_name_counter_value = -1;
if (LogMemory::IsEnabled() || run_state_args->is_partial_run) {
handle_name_counter_value = handle_name_counter_.fetch_add(1);
}
string debug_tensor_watches_summary;
if (!run_state_args->debug_options.debug_tensor_watch_opts().empty()) {
debug_tensor_watches_summary = SummarizeDebugTensorWatches(
run_state_args->debug_options.debug_tensor_watch_opts());
}
const string key = strings::StrCat(
absl::StrJoin(inputs, ","), "->", absl::StrJoin(outputs, ","), "/",
absl::StrJoin(target_nodes, ","), "/", run_state_args->is_partial_run,
"/", debug_tensor_watches_summary);
if (handle_name_counter_value >= 0) {
run_state_args->handle =
strings::StrCat(key, ";", handle_name_counter_value);
}
{
mutex_lock l(executor_lock_);
auto it = executors_.find(key);
if (it != executors_.end()) {
*executors_and_keys = it->second.get();
return absl::OkStatus();
}
}
std::vector<string> inputs_sorted(inputs.begin(), inputs.end());
std::sort(inputs_sorted.begin(), inputs_sorted.end());
std::vector<string> outputs_sorted(outputs.begin(), outputs.end());
std::sort(outputs_sorted.begin(), outputs_sorted.end());
std::vector<string> tn_sorted(target_nodes.begin(), target_nodes.end());
std::sort(tn_sorted.begin(), tn_sorted.end());
const string sorted_key = strings::StrCat(
absl::StrJoin(inputs_sorted, ","), "->",
absl::StrJoin(outputs_sorted, ","), "/", absl::StrJoin(tn_sorted, ","),
"/", run_state_args->is_partial_run, "/", debug_tensor_watches_summary);
if (handle_name_counter_value >= 0) {
run_state_args->handle =
strings::StrCat(sorted_key, ";", handle_name_counter_value);
}
{
mutex_lock l(executor_lock_);
auto it = executors_.find(sorted_key);
if (it != executors_.end()) {
*executors_and_keys = it->second.get();
return absl::OkStatus();
}
}
CallableOptions callable_options;
callable_options.mutable_feed()->Reserve(inputs_sorted.size());
for (const string& input : inputs_sorted) {
callable_options.add_feed(input);
}
callable_options.mutable_fetch()->Reserve(outputs_sorted.size());
for (const string& output : outputs_sorted) {
callable_options.add_fetch(output);
}
callable_options.mutable_target()->Reserve(tn_sorted.size());
for (const string& target : tn_sorted) {
callable_options.add_target(target);
}
*callable_options.mutable_run_options()->mutable_debug_options() =
run_state_args->debug_options;
callable_options.mutable_run_options()
->mutable_experimental()
->set_collective_graph_key(run_state_args->collective_graph_key);
std::unique_ptr<ExecutorsAndKeys> ek;
std::unique_ptr<FunctionInfo> func_info;
TF_RETURN_IF_ERROR(
CreateExecutors(callable_options, &ek, &func_info, run_state_args));
mutex_lock l(executor_lock_);
auto insert_result = executors_.emplace(
sorted_key, std::shared_ptr<ExecutorsAndKeys>(std::move(ek)));
if (insert_result.second) {
functions_.push_back(std::move(func_info));
}
executors_.emplace(key, insert_result.first->second);
*executors_and_keys = insert_result.first->second.get();
return absl::OkStatus();
}
Status DirectSession::CreateGraphs(
const BuildGraphOptions& subgraph_options,
std::unordered_map<string, std::unique_ptr<Graph>>* outputs,
std::unique_ptr<FunctionLibraryDefinition>* flib_def,
RunStateArgs* run_state_args, DataTypeVector* input_types,
DataTypeVector* output_types, int64_t* collective_graph_key) {
mutex_lock l(graph_state_lock_);
if (finalized_) {
return errors::FailedPrecondition("Session has been finalized.");
}
std::unique_ptr<ClientGraph> client_graph;
std::unique_ptr<GraphExecutionState> temp_exec_state_holder;
GraphExecutionState* execution_state = nullptr;
if (options_.config.graph_options().place_pruned_graph()) {
GraphExecutionStateOptions prune_options;
prune_options.device_set = &device_set_;
prune_options.session_options = &options_;
prune_options.stateful_placements = stateful_placements_;
prune_options.session_handle = session_handle_;
TF_RETURN_IF_ERROR(GraphExecutionState::MakeForPrunedGraph(
*execution_state_, prune_options, subgraph_options,
&temp_exec_state_holder, &client_graph));
execution_state = temp_exec_state_holder.get();
} else {
execution_state = execution_state_.get();
TF_RETURN_IF_ERROR(
execution_state->BuildGraph(subgraph_options, &client_graph));
}
*collective_graph_key = client_graph->collective_graph_key;
if (subgraph_options.callable_options.feed_size() !=
client_graph->feed_types.size()) {
return errors::Internal(
"Graph pruning failed: requested number of feed endpoints = ",
subgraph_options.callable_options.feed_size(),
" versus number of pruned feed endpoints = ",
client_graph->feed_types.size());
}
if (subgraph_options.callable_options.fetch_size() !=
client_graph->fetch_types.size()) {
return errors::Internal(
"Graph pruning failed: requested number of fetch endpoints = ",
subgraph_options.callable_options.fetch_size(),
" versus number of pruned fetch endpoints = ",
client_graph->fetch_types.size());
}
auto current_stateful_placements = execution_state->GetStatefulPlacements();
for (const auto& placement_pair : current_stateful_placements) {
const string& node_name = placement_pair.first;
const string& placement = placement_pair.second;
auto iter = stateful_placements_.find(node_name);
if (iter == stateful_placements_.end()) {
stateful_placements_.insert(std::make_pair(node_name, placement));
} else if (iter->second != placement) {
return errors::Internal(
"Stateful placement mismatch. "
"Current assignment of ",
node_name, " to ", iter->second, " does not match ", placement);
}
}
stateful_placements_ = execution_state->GetStatefulPlacements();
if (run_state_args->is_partial_run) {
run_state_args->graph.reset(new Graph(flib_def_.get()));
CopyGraph(*execution_state->full_graph(), run_state_args->graph.get());
}
PartitionOptions popts;
popts.node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
popts.new_name = [this](const string& prefix) {
return strings::StrCat(prefix, "/_", edge_name_counter_.fetch_add(1));
};
popts.get_incarnation = [](const string& name) {
return 1;
};
popts.flib_def = flib_def->get();
popts.control_flow_added = false;
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(Partition(popts, &client_graph->graph, &partitions));
std::vector<string> device_names;
device_names.reserve(devices_.size());
for (auto device : devices_) {
device_names.push_back(DeviceNameUtils::LocalName(device->name()));
}
for (const auto& partition : partitions) {
const string local_partition_name =
DeviceNameUtils::LocalName(partition.first);
if (std::count(device_names.begin(), device_names.end(),
local_partition_name) == 0) {
return errors::InvalidArgument(
"Creating a partition for ", local_partition_name,
" which doesn't exist in the list of available devices. Available "
"devices: ",
absl::StrJoin(device_names, ","));
}
}
for (auto& partition : partitions) {
std::unique_ptr<Graph> device_graph(
new Graph(client_graph->flib_def.get()));
device_graph->SetConstructionContext(ConstructionContext::kDirectSession);
GraphConstructorOptions device_opts;
device_opts.allow_internal_ops = true;
device_opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
device_opts, std::move(partition.second), device_graph.get()));
outputs->emplace(partition.first, std::move(device_graph));
}
GraphOptimizationPassOptions optimization_options;
optimization_options.session_options = &options_;
optimization_options.flib_def = client_graph->flib_def.get();
optimization_options.partition_graphs = outputs;
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
Status s;
for (auto& partition : *outputs) {
const string& partition_name = partition.first;
std::unique_ptr<Graph>* graph = &partition.second;
VLOG(2) << "Created " << DebugString(graph->get()) << " for "
<< partition_name;
Device* d;
s = device_mgr_->LookupDevice(partition_name, &d);
if (!s.ok()) break;
s = d->MaybeRewriteGraph(graph);
if (!s.ok()) {
break;
}
}
*flib_def = std::move(client_graph->flib_def);
std::swap(*input_types, client_graph->feed_types);
std::swap(*output_types, client_graph->fetch_types);
return s;
}
::tensorflow::Status DirectSession::ListDevices(
std::vector<DeviceAttributes>* response) {
response->clear();
response->reserve(devices_.size());
for (Device* d : devices_) {
const DeviceAttributes& attrs = d->attributes();
response->emplace_back(attrs);
}
return absl::OkStatus();
}
::tensorflow::Status DirectSession::Reset(
const std::vector<string>& containers) {
device_mgr_->ClearContainers(containers);
return absl::OkStatus();
}
::tensorflow::Status DirectSession::Close() {
cancellation_manager_->StartCancel();
{
mutex_lock l(closed_lock_);
if (closed_) return absl::OkStatus();
closed_ = true;
}
if (factory_ != nullptr) factory_->Deregister(this);
return absl::OkStatus();
}
DirectSession::RunState::RunState(int64_t step_id,
const std::vector<Device*>* devices)
: step_container(step_id, [devices, step_id](const string& name) {
for (auto d : *devices) {
if (!d->resource_manager()->Cleanup(name).ok()) {
}
ScopedAllocatorMgr* sam = d->GetScopedAllocatorMgr();
if (sam) sam->Cleanup(step_id);
}
}) {}
DirectSession::PartialRunState::PartialRunState(
const std::vector<string>& pending_input_names,
const std::vector<string>& pending_output_names, int64_t step_id,
const std::vector<Device*>* devices)
: RunState(step_id, devices) {
for (auto& name : pending_input_names) {
pending_inputs[name] = false;
}
for (auto& name : pending_output_names) {
pending_outputs[name] = false;
}
}
DirectSession::PartialRunState::~PartialRunState() {
if (rendez != nullptr) {
rendez->StartAbort(errors::Cancelled("PRun cancellation"));
executors_done.WaitForNotification();
}
}
bool DirectSession::PartialRunState::PendingDone() const {
for (const auto& it : pending_inputs) {
if (!it.second) return false;
}
for (const auto& it : pending_outputs) {
if (!it.second) return false;
}
return true;
}
void DirectSession::WaitForNotification(Notification* n, RunState* run_state,
CancellationManager* cm,
int64_t timeout_in_ms) {
const Status status = WaitForNotification(n, timeout_in_ms);
if (!status.ok()) {
{
mutex_lock l(run_state->mu);
run_state->status.Update(status);
}
cm->StartCancel();
n->WaitForNotification();
}
}
::tensorflow::Status DirectSession::WaitForNotification(
Notification* notification, int64_t timeout_in_ms) {
if (timeout_in_ms > 0) {
const int64_t timeout_in_us = timeout_in_ms * 1000;
const bool notified =
WaitForNotificationWithTimeout(notification, timeout_in_us);
if (!notified) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Timed out waiting for notification");
}
} else {
notification->WaitForNotification();
}
return absl::OkStatus();
}
Status DirectSession::MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("MakeCallable()"));
std::unique_ptr<ExecutorsAndKeys> ek;
std::unique_ptr<FunctionInfo> func_info;
RunStateArgs run_state_args(callable_options.run_options().debug_options());
TF_RETURN_IF_ERROR(
CreateExecutors(callable_options, &ek, &func_info, &run_state_args));
{
mutex_lock l(callables_lock_);
*out_handle = next_callable_handle_++;
callables_[*out_handle] = {std::move(ek), std::move(func_info)};
}
return absl::OkStatus();
}
class DirectSession::RunCallableCallFrame : public CallFrameInterface {
public:
RunCallableCallFrame(DirectSession* session,
ExecutorsAndKeys* executors_and_keys,
const std::vector<Tensor>* feed_tensors,
std::vector<Tensor>* fetch_tensors)
: session_(session),
executors_and_keys_(executors_and_keys),
feed_tensors_(feed_tensors),
fetch_tensors_(fetch_tensors) {}
size_t num_args() const override {
return executors_and_keys_->input_types.size();
}
size_t num_retvals() const override {
return executors_and_keys_->output_types.size();
}
Status GetArg(int index, const Tensor** val) override {
if (TF_PREDICT_FALSE(index > feed_tensors_->size())) {
return errors::Internal("Args index out of bounds: ", index);
} else {
*val = &(*feed_tensors_)[index];
}
return absl::OkStatus();
}
Status SetRetval(int index, const Tensor& val) override {
if (index > fetch_tensors_->size()) {
return errors::Internal("RetVal index out of bounds: ", index);
}
(*fetch_tensors_)[index] = val;
return absl::OkStatus();
}
private:
DirectSession* const session_;
ExecutorsAndKeys* const executors_and_keys_;
const std::vector<Tensor>* const feed_tensors_;
std::vector<Tensor>* const fetch_tensors_;
};
::tensorflow::Status DirectSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata) {
return RunCallable(handle, feed_tensors, fetch_tensors, run_metadata,
thread::ThreadPoolOptions());
}
::tensorflow::Status DirectSession::RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) {
TF_RETURN_IF_ERROR(CheckNotClosed());
TF_RETURN_IF_ERROR(CheckGraphCreated("RunCallable()"));
direct_session_runs->GetCell()->IncrementBy(1);
std::shared_ptr<ExecutorsAndKeys> executors_and_keys;
const int64_t step_id = step_id_counter_.fetch_add(1);
{
tf_shared_lock l(callables_lock_);
if (handle >= next_callable_handle_) {
return errors::InvalidArgument("No such callable handle: ", handle);
}
executors_and_keys = callables_[handle].executors_and_keys;
}
if (!executors_and_keys) {
return errors::InvalidArgument(
"Attempted to run callable after handle was released: ", handle);
}
DebugOptions debug_options;
RunStateArgs run_state_args(debug_options);
if (feed_tensors.size() != executors_and_keys->input_types.size()) {
return errors::InvalidArgument(
"Expected ", executors_and_keys->input_types.size(),
" feed tensors, but got ", feed_tensors.size());
}
if (fetch_tensors != nullptr) {
fetch_tensors->resize(executors_and_keys->output_types.size());
} else if (!executors_and_keys->output_types.empty()) {
return errors::InvalidArgument(
"`fetch_tensors` must be provided when the callable has one or more "
"outputs.");
}
size_t input_size = 0;
bool any_resource_feeds = false;
for (auto& tensor : feed_tensors) {
input_size += tensor.AllocatedBytes();
any_resource_feeds = any_resource_feeds || tensor.dtype() == DT_RESOURCE;
}
metrics::RecordGraphInputTensors(input_size);
std::unique_ptr<std::vector<Tensor>> converted_feed_tensors;
const std::vector<Tensor>* actual_feed_tensors;
if (TF_PREDICT_FALSE(any_resource_feeds)) {
converted_feed_tensors = std::make_unique<std::vector<Tensor>>();
converted_feed_tensors->reserve(feed_tensors.size());
for (const Tensor& t : feed_tensors) {
if (t.dtype() == DT_RESOURCE) {
converted_feed_tensors->emplace_back();
Tensor* tensor_from_handle = &converted_feed_tensors->back();
TF_RETURN_IF_ERROR(ResourceHandleToInputTensor(t, tensor_from_handle));
} else {
converted_feed_tensors->emplace_back(t);
}
}
actual_feed_tensors = converted_feed_tensors.get();
} else {
actual_feed_tensors = &feed_tensors;
}
RunCallableCallFrame call_frame(this, executors_and_keys.get(),
actual_feed_tensors, fetch_tensors);
if (LogMemory::IsEnabled()) {
LogMemory::RecordStep(step_id, run_state_args.handle);
}
TF_RETURN_IF_ERROR(RunInternal(
step_id, executors_and_keys->callable_options.run_options(), &call_frame,
executors_and_keys.get(), run_metadata, threadpool_options));
if (fetch_tensors != nullptr) {
size_t output_size = 0;
for (auto& tensor : *fetch_tensors) {
output_size += tensor.AllocatedBytes();
}
metrics::RecordGraphOutputTensors(output_size);
}
return absl::OkStatus();
}
::tensorflow::Status DirectSession::ReleaseCallable(CallableHandle handle) {
mutex_lock l(callables_lock_);
if (handle >= next_callable_handle_) {
return errors::InvalidArgument("No such callable handle: ", handle);
}
callables_.erase(handle);
return absl::OkStatus();
}
Status DirectSession::Finalize() {
mutex_lock l(graph_state_lock_);
if (finalized_) {
return errors::FailedPrecondition("Session already finalized.");
}
if (!graph_created_) {
return errors::FailedPrecondition("Session not yet created.");
}
execution_state_.reset();
flib_def_.reset();
finalized_ = true;
return absl::OkStatus();
}
DirectSession::Callable::~Callable() {
executors_and_keys.reset();
function_info.reset();
}
} | #include "tensorflow/core/common_runtime/direct_session.h"
#include <map>
#include <memory>
#include <random>
#include <string>
#include <thread>
#include <unordered_map>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/costmodel.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/protobuf.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#endif
namespace tensorflow {
namespace {
CallableOptions MakeCallableOptions(absl::Span<const string> feeds,
absl::Span<const string> fetches,
absl::Span<const string> targets) {
CallableOptions ret;
for (const string& feed : feeds) {
ret.add_feed(feed);
}
for (const string& fetch : fetches) {
ret.add_fetch(fetch);
}
for (const string& target : targets) {
ret.add_target(target);
}
return ret;
}
SessionOptions DefaultSessionOptions() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
return options;
}
std::unique_ptr<Session> CreateSession() {
return std::unique_ptr<Session>(NewSession(DefaultSessionOptions()));
}
class DirectSessionMinusAXTest : public ::testing::Test {
public:
void Initialize(std::initializer_list<float> a_values) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, a_values);
Node* a = test::graph::Constant(&graph, a_tensor);
a->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
a_ = a->name();
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {1, 1});
Node* x = test::graph::Constant(&graph, x_tensor);
x->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
x_ = x->name();
Node* y = test::graph::Matmul(&graph, a, x, false, false);
y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
y_ = y->name();
Node* y_neg = test::graph::Unary(&graph, "Neg", y);
y_neg_ = y_neg->name();
y_neg->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
Node* z = test::graph::Unary(&graph, "Identity", y_neg);
z_ = z->name();
z->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
graph.ToGraphDef(&def_);
}
string a_;
string x_;
string y_;
string y_neg_;
string z_;
GraphDef def_;
};
TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, target_nodes, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_Callable) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
for (int i = 0; i < 2; ++i) {
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions({}, {y_ + ":0"}, {y_neg_}), &handle));
for (int i = 0; i < 2; ++i) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
Status s = session->RunCallable(handle, {}, nullptr, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "`fetch_tensors` must be provided"));
TF_ASSERT_OK(session->ReleaseCallable(handle));
std::vector<Tensor> outputs;
s = session->RunCallable(handle, {}, &outputs, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(
s.message(), "Attempted to run callable after handle was released"));
s = session->RunCallable(handle + 1, {}, &outputs, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "No such callable handle"));
}
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_OptimizeForStaticGraph) {
Initialize({3, 2, -1, 0});
SessionOptions options(DefaultSessionOptions());
options.config.mutable_experimental()->set_optimize_for_static_graph(true);
auto session = absl::WrapUnique(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, target_nodes, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
s = session->Extend({});
EXPECT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(absl::StrContains(s.message(), "optimize_for_static_graph"));
}
TEST_F(DirectSessionMinusAXTest,
RunSimpleNetwork_DisableOutputPartitionGraphs) {
Initialize({3, 2, -1, 0});
SessionOptions options(DefaultSessionOptions());
options.config.mutable_experimental()->set_disable_output_partition_graphs(
true);
auto session = absl::WrapUnique(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, target_nodes, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
s = session->Run(run_options, inputs, output_names, target_nodes, &outputs,
&run_metadata);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "disable_output_partition_graphs"));
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_FinalizeWithCallables) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions({}, {y_ + ":0"}, {y_neg_}), &handle));
TF_ASSERT_OK(session->Finalize());
for (int i = 0; i < 2; ++i) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
TF_ASSERT_OK(session->ReleaseCallable(handle));
Status s =
session->MakeCallable(MakeCallableOptions({}, {y_ + ":0"}, {}), &handle);
EXPECT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been finalized."));
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_FinalizeWithRun) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {y_ + ":0"}, {y_neg_}, &outputs));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
TF_ASSERT_OK(session->Finalize());
TF_ASSERT_OK(session->Run({}, {y_ + ":0"}, {y_neg_}, &outputs));
ASSERT_EQ(1, outputs.size());
mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
Status s = session->Run({}, {y_ + ":0"}, {}, &outputs);
EXPECT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been finalized."));
}
TEST_F(DirectSessionMinusAXTest, TestTensorConnection) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(a_ + ":0");
c->set_to_tensor(y_ + ":0");
callable_options.add_fetch(y_neg_ + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(callable_options, &handle));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(-3.0, mat(0, 0));
EXPECT_FLOAT_EQ(-2.0, mat(0, 1));
EXPECT_FLOAT_EQ(1.0, mat(1, 0));
EXPECT_FLOAT_EQ(0.0, mat(1, 1));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(a_ + ":0");
c->set_to_tensor(y_ + ":0");
callable_options.add_fetch(a_ + ":0");
callable_options.add_fetch(y_neg_ + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(callable_options, &handle));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(2, outputs.size());
auto mat_a = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(3.0, mat_a(0, 0));
EXPECT_FLOAT_EQ(2.0, mat_a(0, 1));
EXPECT_FLOAT_EQ(-1.0, mat_a(1, 0));
EXPECT_FLOAT_EQ(0.0, mat_a(1, 1));
auto mat_y_neg = outputs[1].matrix<float>();
ASSERT_TRUE(outputs[1].IsInitialized());
EXPECT_FLOAT_EQ(-3.0, mat_y_neg(0, 0));
EXPECT_FLOAT_EQ(-2.0, mat_y_neg(0, 1));
EXPECT_FLOAT_EQ(1.0, mat_y_neg(1, 0));
EXPECT_FLOAT_EQ(0.0, mat_y_neg(1, 1));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(y_ + ":0");
c->set_to_tensor(a_ + ":0");
callable_options.add_fetch(y_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "would create a cycle"));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor("unknown_node:0");
c->set_to_tensor(y_ + ":0");
callable_options.add_fetch(y_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "unknown node"));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(a_ + ":17");
c->set_to_tensor(y_ + ":0");
callable_options.add_fetch(y_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "unknown edge"));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(a_ + ":0");
c->set_to_tensor("unknown_node:0");
callable_options.add_fetch(y_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsNotFound(s));
EXPECT_TRUE(absl::StrContains(s.message(), "unable to find feed output"));
}
{
CallableOptions callable_options;
TensorConnection* c1 = callable_options.add_tensor_connection();
c1->set_from_tensor(a_ + ":0");
c1->set_to_tensor(y_neg_ + ":0");
TensorConnection* c2 = callable_options.add_tensor_connection();
c2->set_from_tensor(x_ + ":0");
c2->set_to_tensor(y_neg_ + ":0");
callable_options.add_fetch(z_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once"));
}
{
CallableOptions callable_options;
TensorConnection* c = callable_options.add_tensor_connection();
c->set_from_tensor(a_ + ":0");
c->set_to_tensor(y_ + ":0");
callable_options.add_feed(y_ + ":0");
callable_options.add_fetch(y_neg_ + ":0");
Session::CallableHandle handle;
Status s = session->MakeCallable(callable_options, &handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once"));
}
}
TEST_F(DirectSessionMinusAXTest, TestFeed) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
Tensor t(DT_FLOAT, TensorShape({2, 1}));
t.matrix<float>()(0, 0) = 5;
t.matrix<float>()(1, 0) = 6;
std::vector<std::pair<string, Tensor>> inputs = {{x_, t}};
std::vector<string> output_names = {y_ + ":0"};
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, {}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(17.0, mat(0, 0));
EXPECT_FLOAT_EQ(39.0, mat(1, 0));
}
TEST_F(DirectSessionMinusAXTest, TestFeed_Callable) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
CallableOptions callable_options;
callable_options.add_feed(x_);
callable_options.add_fetch(y_ + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(MakeCallableOptions({x_}, {y_ + ":0"}, {}),
&handle));
Tensor t(DT_FLOAT, TensorShape({2, 1}));
t.matrix<float>()(0, 0) = 5;
t.matrix<float>()(1, 0) = 6;
std::vector<Tensor> inputs = {t};
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, inputs, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(17.0, mat(0, 0));
EXPECT_FLOAT_EQ(39.0, mat(1, 0));
}
TEST_F(DirectSessionMinusAXTest, TestConcurrency) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
std::vector<string> output_names = {y_ + ":0"};
auto fn = [&session, output_names]() {
for (int i = 0; i < 1000; ++i) {
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, {}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(3.0, mat(0, 0));
}
};
for (int i = 0; i < 4; ++i) {
tp->Schedule(fn);
}
delete tp;
}
TEST_F(DirectSessionMinusAXTest, TestConcurrency_Callable) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
Session::CallableHandle handle;
TF_ASSERT_OK(
session->MakeCallable(MakeCallableOptions({}, {y_ + ":0"}, {}), &handle));
auto fn = [&session, handle]() {
for (int i = 0; i < 1000; ++i) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(3.0, mat(0, 0));
}
};
for (int i = 0; i < 4; ++i) {
tp->Schedule(fn);
}
delete tp;
}
TEST_F(DirectSessionMinusAXTest, TestPerSessionThreads) {
Initialize({1, 2, 3, 4});
SessionOptions options;
options.config.set_use_per_session_threads(true);
(*options.config.mutable_device_count())["CPU"] = 2;
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
std::vector<string> output_names = {y_ + ":0"};
auto fn = [&session, output_names]() {
for (int i = 0; i < 1000; ++i) {
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, {}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
EXPECT_FLOAT_EQ(3.0, mat(0, 0));
}
};
for (int i = 0; i < 4; ++i) {
tp->Schedule(fn);
}
delete tp;
}
TEST_F(DirectSessionMinusAXTest, TwoCreateCallsFails) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
ASSERT_FALSE(session->Create(def_).ok());
}
TEST_F(DirectSessionMinusAXTest, ForgetToCreate) {
Initialize({1, 2, 3, 4});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
ASSERT_FALSE(session->Run(inputs, {y_ + ":0"}, {y_neg_}, &outputs).ok());
}
TEST_F(DirectSessionMinusAXTest, InvalidDevice) {
GraphDef def;
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
a_tensor.flat<float>().setRandom();
Node* a = test::graph::Constant(&graph, a_tensor);
a->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
x_tensor.flat<float>().setRandom();
Node* x = test::graph::Constant(&graph, x_tensor);
x->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
Node* y = test::graph::Matmul(&graph, a, x, false, false);
y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:2");
graph.ToGraphDef(&def);
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
ASSERT_FALSE(session->Create(def).ok());
def.Clear();
y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
graph.ToGraphDef(&def);
session.reset(NewSession(options));
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {y->name() + ":0"}, {}, &outputs));
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetworkWithOpts) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.set_trace_level(RunOptions::SOFTWARE_TRACE);
RunMetadata run_metadata;
EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 0);
Status s = session->Run(run_options, inputs, output_names, target_nodes,
&outputs, &run_metadata);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
ASSERT_TRUE(run_metadata.has_step_stats());
EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 2);
}
TEST_F(DirectSessionMinusAXTest, RunSimpleNetworkWithOpts_Callable) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
Session::CallableHandle handle;
CallableOptions callable_options =
MakeCallableOptions({}, {y_ + ":0"}, {y_neg_});
callable_options.mutable_run_options()->set_trace_level(
RunOptions::SOFTWARE_TRACE);
TF_ASSERT_OK(session->MakeCallable(callable_options, &handle));
RunMetadata run_metadata;
EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 0);
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, &run_metadata));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
ASSERT_TRUE(run_metadata.has_step_stats());
EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 2);
}
TEST_F(DirectSessionMinusAXTest, UseRunHandlerPool) {
Initialize({3, 2, -1, 0});
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y_ + ":0"};
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.mutable_experimental()->set_use_run_handler_pool(true);
Status s = session->Run(run_options, inputs, output_names, target_nodes,
&outputs, nullptr);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(5.0, mat(0, 0));
}
TEST(DirectSessionTest, KeepsStateAcrossRunsOfSession) {
GraphDef def;
Graph g(OpRegistry::Global());
Node* var = test::graph::Var(&g, DT_FLOAT, TensorShape({10}));
var->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
Tensor twenty(DT_FLOAT, TensorShape({10}));
for (int i = 0; i < 10; ++i) {
twenty.flat<float>()(i) = 20.0;
}
Node* twenty_node = test::graph::Constant(&g, twenty);
twenty_node->set_assigned_device_name(
"/job:localhost/replica:0/task:0/cpu:0");
Node* init = test::graph::Assign(&g, var, twenty_node);
init->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0");
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
Status s = session->Run(inputs, {init->name()}, {}, &outputs);
TF_ASSERT_OK(s);
s = session->Run(inputs, {var->name() + ":0"}, {}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_EQ(20.0, outputs[0].flat<float>()(0));
}
TEST(DirectSessionTest, MultipleFeedTest) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
Status s = session->Run(
{}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(1.0, outputs[0].flat<float>()(0));
ASSERT_EQ(2.0, outputs[1].flat<float>()(0));
s = session->Run(
{}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(2.0, outputs[0].flat<float>()(0));
ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
Tensor value_22(DT_FLOAT, TensorShape({}));
value_22.scalar<float>()() = 22.0;
s = session->Run(
{{first_const->name(), value_11}, {second_const->name(), value_22}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
s = session->Run(
{{second_const->name(), value_22}, {first_const->name(), value_11}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
s = session->Run(
{{first_const->name(), value_11}, {first_const->name(), value_22}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once"));
}
TEST(DirectSessionTest, MultipleFeedTest_Callable) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
Session::CallableHandle handle;
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions(
{}, {first_identity->name() + ":0", second_identity->name() + ":0"},
{}),
&handle));
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(1.0, outputs[0].flat<float>()(0));
ASSERT_EQ(2.0, outputs[1].flat<float>()(0));
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions(
{}, {second_identity->name() + ":0", first_identity->name() + ":0"},
{}),
&handle));
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(2.0, outputs[0].flat<float>()(0));
ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
Tensor value_22(DT_FLOAT, TensorShape({}));
value_22.scalar<float>()() = 22.0;
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions(
{first_const->name(), second_const->name()},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {}),
&handle));
TF_ASSERT_OK(
session->RunCallable(handle, {value_11, value_22}, &outputs, nullptr));
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions(
{second_const->name(), first_const->name()},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {}),
&handle));
TF_ASSERT_OK(
session->RunCallable(handle, {value_22, value_11}, &outputs, nullptr));
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
Status s = session->MakeCallable(
MakeCallableOptions(
{first_const->name(), first_const->name()},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {}),
&handle);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once"));
}
TEST(DirectSessionTest, TestTensorConnectionUseTwice) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {1.0, 2.0, 3.0, 4.0});
Node* a = test::graph::Constant(&graph, a_tensor);
Tensor dummy_tensor(DT_FLOAT, TensorShape({1}));
test::FillValues<float>(&dummy_tensor, {-1.0});
Node* left = test::graph::Constant(&graph, dummy_tensor);
Node* right = test::graph::Constant(&graph, dummy_tensor);
Node* y = test::graph::Add(&graph, left, right);
GraphDef def;
graph.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
CallableOptions callable_options;
TensorConnection* c_left = callable_options.add_tensor_connection();
c_left->set_from_tensor(a->name() + ":0");
c_left->set_to_tensor(left->name() + ":0");
TensorConnection* c_right = callable_options.add_tensor_connection();
c_right->set_from_tensor(a->name() + ":0");
c_right->set_to_tensor(right->name() + ":0");
callable_options.add_fetch(y->name() + ":0");
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(callable_options, &handle));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr));
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_FLOAT_EQ(2.0, mat(0, 0));
EXPECT_FLOAT_EQ(4.0, mat(0, 1));
EXPECT_FLOAT_EQ(6.0, mat(1, 0));
EXPECT_FLOAT_EQ(8.0, mat(1, 1));
TF_ASSERT_OK(session->ReleaseCallable(handle));
}
TEST(DirectSessionTest, FetchMultipleTimes) {
Graph g(OpRegistry::Global());
Tensor seven_tensor(DT_INT32, TensorShape());
seven_tensor.flat<int32>()(0) = 7;
Node* seven_node = test::graph::Constant(&g, seven_tensor);
GraphDef def;
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
const std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
auto seven = seven_node->name();
Status s = session->Run(inputs, {seven, seven}, {}, &outputs);
TF_ASSERT_OK(s);
EXPECT_EQ(2, outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
const Tensor& t = outputs[i];
ASSERT_TRUE(t.IsInitialized()) << i;
EXPECT_EQ(7, t.flat<int32>()(0)) << i;
}
}
TEST(DirectSessionTest, MultipleFeedTestSomeSyncRun) {
GraphDef def;
Graph g(OpRegistry::Global());
RunOptions run_options;
run_options.set_inter_op_thread_pool(-1);
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
Status s = session->Run(
run_options, {},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs, nullptr);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(1.0, outputs[0].flat<float>()(0));
ASSERT_EQ(2.0, outputs[1].flat<float>()(0));
s = session->Run(
{}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(2.0, outputs[0].flat<float>()(0));
ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
Tensor value_22(DT_FLOAT, TensorShape({}));
value_22.scalar<float>()() = 22.0;
s = session->Run(
{{first_const->name(), value_11}, {second_const->name(), value_22}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
s = session->Run(
{{second_const->name(), value_22}, {first_const->name(), value_11}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
s = session->Run(
run_options,
{{first_const->name(), value_11}, {first_const->name(), value_22}},
{first_identity->name() + ":0", second_identity->name() + ":0"}, {},
&outputs, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once"));
}
REGISTER_OP("SessionMetadataReader")
.Input("x: int64")
.Output("y: string")
.SetIsStateful()
.Doc(R"doc(SessionMetadataReader returns the session metadata.
x: int64
y: string
)doc");
class SessionMetadataReaderOp : public OpKernel {
public:
explicit SessionMetadataReaderOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor* out_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("y", TensorShape({}), &out_tensor));
if (ctx->session_metadata() != nullptr) {
out_tensor->scalar<tstring>()() =
tsl::LegacyUnredactedDebugString(*ctx->session_metadata());
} else {
out_tensor->scalar<tstring>()() = "";
}
}
};
REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_CPU),
SessionMetadataReaderOp);
REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_GPU),
SessionMetadataReaderOp);
FunctionDef SessionMetadataReaderOpFn() {
return FunctionDefHelper::Define(
"SessionMetadataReaderFn",
{"x: int64"},
{"y: string"},
{},
{{{"y"}, "SessionMetadataReader", {"x"}, {}}});
}
TEST(DirectSessionTest, SessionMetadataAbsent) {
Graph g(OpRegistry::Global());
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "SessionMetadataReader", x);
GraphDef def;
g.ToGraphDef(&def);
auto sess = CreateSession();
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
EXPECT_EQ("", outputs[0].scalar<tstring>()());
}
TEST(DirectSessionTest, SessionMetadataAbsentViaFunction) {
FunctionDefLibrary library_graph_def;
*library_graph_def.add_function() = SessionMetadataReaderOpFn();
FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def);
Graph g(&flib);
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "SessionMetadataReaderFn", x);
GraphDef def;
g.ToGraphDef(&def);
*def.mutable_library() = library_graph_def;
auto sess = CreateSession();
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
EXPECT_EQ("", outputs[0].scalar<tstring>()());
}
TEST(DirectSessionTest, SessionMetadataPresent) {
Graph g(OpRegistry::Global());
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "SessionMetadataReader", x);
GraphDef def;
g.ToGraphDef(&def);
auto session_options = DefaultSessionOptions();
auto* session_metadata =
session_options.config.mutable_experimental()->mutable_session_metadata();
session_metadata->set_name("name");
session_metadata->set_version(1);
auto sess = std::unique_ptr<Session>(NewSession(session_options));
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs,
nullptr );
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
outputs[0].scalar<tstring>()(), &read_metadata));
EXPECT_EQ("name", read_metadata.name());
EXPECT_EQ(1, read_metadata.version());
RunMetadata metadata;
s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs,
&metadata );
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
outputs[0].scalar<tstring>()(), &read_metadata));
EXPECT_EQ("name", read_metadata.name());
EXPECT_EQ(1, read_metadata.version());
EXPECT_EQ(session_metadata->name(), metadata.session_metadata().name());
EXPECT_EQ(session_metadata->version(), metadata.session_metadata().version());
}
TEST(DirectSessionTest, SessionMetadataPresentViaFunction) {
FunctionDefLibrary library_graph_def;
*library_graph_def.add_function() = SessionMetadataReaderOpFn();
FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def);
Graph g(&flib);
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "SessionMetadataReaderFn", x);
GraphDef def;
g.ToGraphDef(&def);
*def.mutable_library() = library_graph_def;
auto session_options = DefaultSessionOptions();
auto* session_metadata =
session_options.config.mutable_experimental()->mutable_session_metadata();
session_metadata->set_name("name");
session_metadata->set_version(1);
auto sess = std::unique_ptr<Session>(NewSession(session_options));
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs,
nullptr );
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
outputs[0].scalar<tstring>()(), &read_metadata));
EXPECT_EQ("name", read_metadata.name());
EXPECT_EQ(1, read_metadata.version());
RunMetadata metadata;
s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs,
&metadata );
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
outputs[0].scalar<tstring>()(), &read_metadata));
EXPECT_EQ("name", read_metadata.name());
EXPECT_EQ(1, read_metadata.version());
EXPECT_EQ(session_metadata->name(), metadata.session_metadata().name());
EXPECT_EQ(session_metadata->version(), metadata.session_metadata().version());
}
TEST(DirectSessionTest, SessionMetadataKey) {
auto session_options0 = DefaultSessionOptions();
auto* session_metadata0 = session_options0.config.mutable_experimental()
->mutable_session_metadata();
session_metadata0->set_name("name");
Session* sess0_ptr;
ASSERT_TRUE(NewSession(session_options0, &sess0_ptr).ok());
auto sess0 = absl::WrapUnique(sess0_ptr);
Session* dup_ptr;
EXPECT_TRUE(
errors::IsInvalidArgument(NewSession(session_options0, &dup_ptr)));
auto session_options1 = DefaultSessionOptions();
auto* session_metadata1 = session_options1.config.mutable_experimental()
->mutable_session_metadata();
session_metadata1->set_name("name");
session_metadata1->set_version(1);
Session* sess1_ptr;
EXPECT_TRUE(NewSession(session_options1, &sess1_ptr).ok());
auto sess1 = absl::WrapUnique(sess1_ptr);
sess0 = nullptr;
EXPECT_TRUE(NewSession(session_options0, &dup_ptr).ok());
auto dup = absl::WrapUnique(dup_ptr);
auto sess_without_metadata0 = CreateSession();
EXPECT_NE(sess_without_metadata0, nullptr);
auto sess_without_metadata1 = CreateSession();
EXPECT_NE(sess_without_metadata1, nullptr);
}
TEST(DirectSessionTest, SessionMetadataInvalid) {
const auto valid_session_options = DefaultSessionOptions();
Session* sess_ptr;
ASSERT_TRUE(NewSession(valid_session_options, &sess_ptr).ok());
auto sess = absl::WrapUnique(sess_ptr);
auto invalid_session_options = valid_session_options;
auto* invalid_metadata =
invalid_session_options.config.mutable_experimental()
->mutable_session_metadata();
invalid_metadata->set_name("name");
invalid_metadata->set_version(-1);
Session* error_sess_ptr;
EXPECT_TRUE(errors::IsInvalidArgument(
NewSession(invalid_session_options, &error_sess_ptr)));
}
REGISTER_OP("ThreadID").Input("x: int64").Output("y: int64").Doc(R"doc(
ThreadID returns the thread ID that called compute.
x: int64
y: int64
)doc");
class ThreadIDOp : public OpKernel {
public:
explicit ThreadIDOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor* out_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("y", TensorShape({}), &out_tensor));
std::hash<std::thread::id> hasher;
out_tensor->scalar<int64_t>()() =
static_cast<int64_t>(hasher(std::this_thread::get_id()));
}
};
REGISTER_KERNEL_BUILDER(Name("ThreadID").Device(DEVICE_CPU), ThreadIDOp);
TEST(DirectSessionTest, SessionSyncRun) {
Graph g(OpRegistry::Global());
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "ThreadID", x);
GraphDef def;
g.ToGraphDef(&def);
auto sess = CreateSession();
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
std::hash<std::thread::id> hasher;
EXPECT_EQ(static_cast<int64_t>(hasher(std::this_thread::get_id())),
static_cast<int64_t>(outputs[0].scalar<int64_t>()()));
}
REGISTER_OP("ExpensiveNoop").SetIsStateful();
class ExpensiveNoopOp : public OpKernel {
public:
using OpKernel::OpKernel;
bool IsExpensive() override { return true; }
void Compute(OpKernelContext* ctx) override {
const string& stack_trace = tensorflow::CurrentStackTrace();
const string process_method = "ExecutorState::Process()";
size_t pos = 0;
int frame_count = 0;
while ((pos = stack_trace.find("ExecutorState::Process()", pos)) !=
string::npos) {
++frame_count;
++pos;
}
OP_REQUIRES(ctx, frame_count <= 1,
errors::Internal(
"Recursive call to ExecutorState::Process() detected."));
}
};
REGISTER_KERNEL_BUILDER(Name("ExpensiveNoop").Device(DEVICE_CPU),
ExpensiveNoopOp);
TEST(DirectSessionTest, SessionSyncRun_DeepGraph) {
Graph g(OpRegistry::Global());
std::vector<Node*> nodes;
nodes.reserve(1024);
auto make_expensive_noop = [&g](absl::Span<Node* const> control_deps) {
Node* ret;
auto builder = NodeBuilder(g.NewName("N"), "ExpensiveNoop");
for (Node* control_dep : control_deps) {
builder = builder.ControlInput(control_dep);
}
TF_CHECK_OK(builder.Finalize(&g, &ret));
return ret;
};
Node* base = make_expensive_noop({});
Node* child_1 = make_expensive_noop({base});
Node* child_2 = make_expensive_noop({base});
GraphDef def;
g.ToGraphDef(&def);
auto sess = CreateSession();
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
run_opts.set_inter_op_thread_pool(-1);
EXPECT_TRUE(sess->Run(run_opts, {}, {}, {child_1->name(), child_2->name()},
&outputs, nullptr)
.ok());
}
TEST(DirectSessionTest, SyncSession) {
Graph g(OpRegistry::Global());
Tensor vx(DT_INT64, TensorShape({}));
vx.scalar<int64_t>()() = 17;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "ThreadID", x);
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
options.config.set_inter_op_parallelism_threads(-1);
std::unique_ptr<Session> sess(NewSession(options));
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
RunOptions run_opts;
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
std::hash<std::thread::id> hasher;
EXPECT_EQ(static_cast<int64_t>(hasher(std::this_thread::get_id())),
static_cast<int64_t>(outputs[0].scalar<int64_t>()()));
}
REGISTER_OP("Darth").Input("x: float").Output("y: float").Doc(R"doc(
Darth promises one return value.
x: float
y: float
)doc");
class DarthOp : public OpKernel {
public:
explicit DarthOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {}
};
REGISTER_KERNEL_BUILDER(Name("Darth").Device(DEVICE_CPU), DarthOp);
TEST(DirectSessionTest, DarthKernel) {
Graph g(OpRegistry::Global());
Tensor vx(DT_FLOAT, TensorShape({}));
vx.scalar<float>()() = 1.0;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "Darth", x);
GraphDef def;
g.ToGraphDef(&def);
auto sess = CreateSession();
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
auto s = sess->Run({}, {y->name() + ":0"}, {}, &outputs);
EXPECT_TRUE(errors::IsInternal(s));
}
TEST(DirectSessionTest, PlacePrunedGraph) {
{
Graph g(OpRegistry::Global());
Tensor vx(DT_FLOAT, TensorShape({}));
vx.scalar<float>()() = 1.0;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "Darth", x);
y->set_assigned_device_name("/job:localhost/replica:0/task:0/device:GPU:0");
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
std::unique_ptr<Session> sess(NewSession(options));
auto s = sess->Create(def);
EXPECT_TRUE(errors::IsInvalidArgument(s));
}
{
Graph g(OpRegistry::Global());
Tensor vx(DT_FLOAT, TensorShape({}));
vx.scalar<float>()() = 1.0;
Node* x = test::graph::Constant(&g, vx);
Node* y = test::graph::Unary(&g, "Darth", x);
y->set_assigned_device_name("/job:localhost/replica:0/task:0/device:GPU:0");
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
options.config.mutable_graph_options()->set_place_pruned_graph(true);
std::unique_ptr<Session> sess(NewSession(options));
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
auto s = sess->Run({}, {x->name() + ":0"}, {}, &outputs);
TF_EXPECT_OK(s);
}
}
TEST(DirectSessionTest, PartialRunTest) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
Node* third = test::graph::Add(&g, first_identity, second_identity);
Node* third_identity = test::graph::Identity(&g, third);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
string handle;
Status s = session->PRunSetup(
{first_const->name(), second_const->name()},
{first_identity->name() + ":0", second_identity->name() + ":0",
third_identity->name() + ":0"},
{}, &handle);
TF_ASSERT_OK(s);
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
Tensor value_22(DT_FLOAT, TensorShape({}));
value_22.scalar<float>()() = 22.0;
s = session->PRun(handle, {{first_const->name(), value_11}},
{first_identity->name() + ":0"}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
s = session->PRun(
handle, {{second_const->name(), value_22}},
{second_identity->name() + ":0", third_identity->name() + ":0"},
&outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(22.0, outputs[0].flat<float>()(0));
ASSERT_EQ(11.0 + 22.0, outputs[1].flat<float>()(0));
}
TEST(DirectSessionTest, PartialRunMissingFeed) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
Node* third = test::graph::Add(&g, first_identity, second_identity);
Node* third_identity = test::graph::Identity(&g, third);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
string handle;
Status s = session->PRunSetup({first_const->name(), second_const->name()},
{third_identity->name() + ":0"}, {}, &handle);
TF_ASSERT_OK(s);
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
s = session->PRun(handle, {{first_const->name(), value_11}},
{third_identity->name() + ":0"}, &outputs);
ASSERT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "can't be computed from the feeds"));
}
TEST(DirectSessionTest, PartialRunMultiOutputFeed) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor bool_value(DT_BOOL, TensorShape({}));
bool_value.scalar<bool>()() = true;
Node* bool_const = test::graph::Constant(&g, bool_value);
Node* switch_node = test::graph::Switch(&g, bool_const, bool_const);
Node* fourth_identity = test::graph::Identity(&g, switch_node, 1);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
string handle;
Status s = session->PRunSetup({switch_node->name() + ":1"},
{fourth_identity->name() + ":0"}, {}, &handle);
TF_ASSERT_OK(s);
s = session->PRun(handle, {}, {fourth_identity->name() + ":0"}, &outputs);
ASSERT_TRUE(errors::IsInvalidArgument(s));
EXPECT_TRUE(
absl::StrContains(s.message(), "can't be computed from the feeds"));
s = session->PRun(handle, {{switch_node->name() + ":1", bool_value}},
{fourth_identity->name() + ":0"}, &outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(true, outputs[0].flat<bool>()(0));
}
TEST(DirectSessionTest, RunHandleTest) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor value0(DT_FLOAT, TensorShape({}));
value0.scalar<float>()() = 1.0;
Node* const0 = test::graph::Constant(&g, value0);
Node* identity0 = test::graph::Identity(&g, const0);
Tensor value1(DT_FLOAT, TensorShape({}));
value1.scalar<float>()() = 2.0;
Node* const1 = test::graph::Constant(&g, value1);
Node* node3 = test::graph::Add(&g, identity0, const1);
Node* node4 = test::graph::Unary(&g, "GetSessionHandleV2", node3);
Tensor value2(DT_STRING, TensorShape({}));
Node* const2 = test::graph::Constant(&g, value2);
Node* node5 = test::graph::GetSessionTensor(&g, const2);
Node* node6 = test::graph::Add(&g, node5, const1);
Node* node7 = test::graph::Unary(&g, "DeleteSessionTensor", const2);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
Status s = session->Run({}, {node4->name() + ":0"}, {}, &outputs);
ASSERT_TRUE(s.ok());
ASSERT_EQ(1, outputs.size());
const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()();
Tensor string_handle(DT_STRING, {});
string_handle.flat<tstring>().setConstant(resource_handle.name());
std::vector<Tensor> outputs1;
s = session->Run({{const2->name(), string_handle}}, {node6->name() + ":0"},
{}, &outputs1);
ASSERT_TRUE(s.ok());
ASSERT_EQ(1, outputs1.size());
ASSERT_EQ(5.0, outputs1[0].flat<float>()(0));
std::vector<Tensor> outputs2;
s = session->Run({{const2->name(), string_handle}}, {}, {node7->name()},
&outputs2);
ASSERT_TRUE(s.ok());
}
TEST(DirectSessionTest, RunHandleTest_Callable) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor value0(DT_FLOAT, TensorShape({}));
value0.scalar<float>()() = 1.0;
Node* const0 = test::graph::Constant(&g, value0);
Node* identity0 = test::graph::Identity(&g, const0);
Tensor value1(DT_FLOAT, TensorShape({}));
value1.scalar<float>()() = 2.0;
Node* const1 = test::graph::Constant(&g, value1);
Node* node3 = test::graph::Add(&g, identity0, const1);
Node* node4 = test::graph::Unary(&g, "GetSessionHandleV2", node3);
Tensor value2(DT_STRING, TensorShape({}));
Node* const2 = test::graph::Constant(&g, value2);
Node* node5 = test::graph::GetSessionTensor(&g, const2);
Node* node6 = test::graph::Add(&g, node5, const1);
Node* node7 = test::graph::Unary(&g, "DeleteSessionTensor", const2);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
Status s = session->Run({}, {node4->name() + ":0"}, {}, &outputs);
ASSERT_TRUE(s.ok());
ASSERT_EQ(1, outputs.size());
const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()();
Tensor string_handle(DT_STRING, {});
string_handle.flat<tstring>().setConstant(resource_handle.name());
std::vector<Tensor> outputs1;
s = session->Run({{const2->name(), string_handle}}, {node6->name() + ":0"},
{}, &outputs1);
ASSERT_TRUE(s.ok());
ASSERT_EQ(1, outputs1.size());
ASSERT_EQ(5.0, outputs1[0].flat<float>()(0));
std::vector<Tensor> outputs2;
s = session->Run({{const2->name(), string_handle}}, {}, {node7->name()},
&outputs2);
ASSERT_TRUE(s.ok());
}
TEST(DirectSessionTest, CreateGraphFailsWhenAssigningAFedVar) {
Graph graph(OpRegistry::Global());
Node* a = test::graph::Var(&graph, DT_FLOAT, {});
Node* b = test::graph::Constant(&graph, {});
Tensor zero(DT_FLOAT, {});
test::FillValues<float>(&zero, {0});
Node* assign = test::graph::Assign(&graph, a, b);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
std::vector<Tensor> outputs;
Status s = session->Run({{a->name(), zero}}, {assign->name()}, {}, &outputs);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST(DirectSessionTest, TimeoutSession) {
GraphDef graph;
protobuf::TextFormat::ParseFromString(R"pb(
node {
name: 'fifo_queue'
op: 'FIFOQueue'
device: '/device:CPU:0'
attr {
key: 'capacity'
value { i: 10 }
}
attr {
key: 'component_types'
value { list { type: DT_FLOAT } }
}
attr {
key: 'container'
value { s: '' }
}
attr {
key: 'shapes'
value { list {} }
}
attr {
key: 'shared_name'
value { s: '' }
}
}
node {
name: 'fifo_queue_Dequeue'
op: 'QueueDequeue'
input: 'fifo_queue'
device: '/device:CPU:0'
attr {
key: 'component_types'
value { list { type: DT_FLOAT } }
}
attr {
key: 'timeout_ms'
value { i: -1 }
}
}
versions { producer: 9 }
)pb",
&graph);
{
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
options.config.set_operation_timeout_in_ms(100);
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
Status s = session->Run({}, {}, {"fifo_queue_Dequeue"}, nullptr);
ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code());
TF_ASSERT_OK(session->Close());
}
{
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
RunOptions run_options;
run_options.set_timeout_in_ms(20);
Status s2 = session->Run(run_options, {}, {}, {"fifo_queue_Dequeue"},
nullptr, nullptr);
ASSERT_EQ(error::DEADLINE_EXCEEDED, s2.code());
TF_ASSERT_OK(session->Close());
}
}
class CancellationMgrPollingOp : public OpKernel {
public:
explicit CancellationMgrPollingOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
CancellationManager* cm = ctx->cancellation_manager();
while (!cm->IsCancelled()) {
ctx->env()->SleepForMicroseconds(1000);
}
notification.Notify();
}
static Notification notification;
};
Notification CancellationMgrPollingOp::notification;
REGISTER_KERNEL_BUILDER(Name("CancellationMgrPollingOp").Device(DEVICE_CPU),
CancellationMgrPollingOp);
REGISTER_OP("CancellationMgrPollingOp").Doc("");
TEST(DirectSessionTest, TestTimeoutCleanShutdown) {
GraphDef graph;
protobuf::TextFormat::ParseFromString(R"pb(
node {
name: 'cm_polling'
op: 'CancellationMgrPollingOp'
device: '/device:CPU:0'
}
versions { producer: 9 }
)pb",
&graph);
SessionOptions options;
options.config.set_operation_timeout_in_ms(100);
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph));
Status s = session->Run({}, {}, {"cm_polling"}, nullptr);
ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code());
ASSERT_TRUE(CancellationMgrPollingOp::notification.HasBeenNotified());
TF_ASSERT_OK(session->Close());
}
static void TestSessionInterOpThreadsImpl(bool use_function_lib,
bool use_global_pools) {
using test::function::blocking_op_state;
using test::function::BlockingOpState;
FunctionDefLibrary library_graph_def;
if (use_function_lib) {
*library_graph_def.add_function() = test::function::BlockingOpFn();
}
FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def);
Graph g(&flib);
Tensor t(DT_FLOAT, TensorShape({}));
t.scalar<float>()() = {1.2f};
Node* x = test::graph::Constant(&g, t);
Node* y;
if (use_function_lib) {
y = test::graph::Unary(&g, "BlockingOpFn", x);
} else {
y = test::graph::Unary(&g, "BlockingOp", x);
}
GraphDef def;
g.ToGraphDef(&def);
*def.mutable_library() = library_graph_def;
SessionOptions options;
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
options.config.mutable_graph_options()
->mutable_rewrite_options()
->set_constant_folding(RewriterConfig::OFF);
(*options.config.mutable_device_count())["CPU"] = 2;
(*options.config.mutable_device_count())["GPU"] = 0;
auto* p = options.config.add_session_inter_op_thread_pool();
if (use_global_pools) p->set_global_name("large pool");
p = options.config.add_session_inter_op_thread_pool();
if (use_global_pools) p->set_global_name("small pool");
p->set_num_threads(1);
const int kSyncPool = -1;
const int kLargePool = 0;
const int kSmallPool = 1;
std::vector<std::unique_ptr<Session>> sessions;
if (!use_global_pools) {
sessions.emplace_back(NewSession(options));
TF_ASSERT_OK(sessions.back()->Create(def));
}
mutex sessions_mu;
std::atomic<int32> num_done(0);
auto add_session_run_call =
[use_global_pools, &def, &options, &sessions, &sessions_mu, &num_done](
thread::ThreadPool* tp, Node* node, int inter_op_pool) {
auto fn = [use_global_pools, &def, &options, &sessions, &sessions_mu,
inter_op_pool, node, &num_done]() {
RunOptions run_options;
run_options.set_inter_op_thread_pool(inter_op_pool);
std::vector<Tensor> outputs;
Session* session;
if (use_global_pools) {
std::unique_ptr<Session> s(NewSession(options));
TF_ASSERT_OK(s->Create(def));
session = s.get();
mutex_lock l(sessions_mu);
sessions.emplace_back(std::move(s));
} else {
session = sessions[0].get();
}
Status s = session->Run(run_options, {} ,
{node->name() + ":0"} , {},
&outputs, nullptr );
TF_CHECK_OK(s);
ASSERT_EQ(1, outputs.size());
auto flat = outputs[0].flat<float>();
EXPECT_FLOAT_EQ(1.2, flat(0));
num_done.fetch_add(1);
};
if (tp != nullptr) {
tp->Schedule(fn);
} else {
fn();
}
};
thread::ThreadPool* tp1 = new thread::ThreadPool(Env::Default(), "tp1", 1);
blocking_op_state = new BlockingOpState();
add_session_run_call(tp1, y, kLargePool);
blocking_op_state->AwaitState(1);
blocking_op_state->MoveToState(1, 2);
blocking_op_state->AwaitState(3);
blocking_op_state->MoveToState(3, 0);
delete tp1;
num_done = 0;
tp1 = new thread::ThreadPool(Env::Default(), "tp1", 5);
add_session_run_call(tp1, y, kSmallPool);
blocking_op_state->AwaitState(1);
const int kBlockedThreads = 3;
for (int i = 0; i < kBlockedThreads; ++i) {
add_session_run_call(tp1, x, kSmallPool);
}
thread::ThreadPool* tp2 = new thread::ThreadPool(Env::Default(), "tp2", 3);
const int kUnblockedThreads = 4;
for (int i = 0; i < kUnblockedThreads; ++i) {
add_session_run_call(tp2, x, kLargePool);
}
delete tp2;
EXPECT_EQ(kUnblockedThreads, num_done.load());
add_session_run_call(nullptr, x, kSyncPool);
blocking_op_state->MoveToState(1, 2);
delete tp1;
EXPECT_EQ(kUnblockedThreads + kBlockedThreads + 1 + 1, num_done.load());
delete blocking_op_state;
blocking_op_state = nullptr;
}
TEST(DirectSessionTest, TestSessionInterOpThreads) {
TestSessionInterOpThreadsImpl(false ,
false );
}
TEST(DirectSessionTest, TestSessionInterOpThreadsWithFunctions) {
TestSessionInterOpThreadsImpl(true ,
false );
}
TEST(DirectSessionTest, TestSessionInterOpGlobalPools) {
TestSessionInterOpThreadsImpl(false ,
true );
}
TEST(DirectSessionTest, TestSessionInterOpGlobalPoolsWithFunctions) {
TestSessionInterOpThreadsImpl(true ,
true );
}
TEST(DirectSessionTest, TestSessionInterOpThreadsInvalidOptions) {
Graph g(OpRegistry::Global());
Tensor t(DT_FLOAT, TensorShape({}));
t.scalar<float>()() = {1.2f};
Node* x = test::graph::Constant(&g, t);
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
(*options.config.mutable_device_count())["CPU"] = 2;
options.config.add_session_inter_op_thread_pool();
{
std::unique_ptr<Session> session(NewSession(options));
TF_ASSERT_OK(session->Create(def));
for (int pool_num = -2; pool_num <= 1; pool_num += 3) {
RunOptions run_options;
run_options.set_inter_op_thread_pool(pool_num);
std::vector<Tensor> outputs;
Status s = session->Run(run_options, {} ,
{x->name() + ":0"} , {},
&outputs, nullptr );
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(),
strings::StrCat("Invalid inter_op_thread_pool: ", pool_num)));
}
}
std::vector<std::unique_ptr<Session>> sessions;
auto* pool_config = options.config.mutable_session_inter_op_thread_pool(0);
pool_config->set_num_threads(0);
pool_config->set_global_name("foo");
sessions.emplace_back(NewSession(options));
TF_ASSERT_OK(sessions.back()->Create(def));
sessions.emplace_back(NewSession(options));
TF_ASSERT_OK(sessions.back()->Create(def));
for (int pass = 0; pass < 2; ++pass) {
for (int i = 1; i < 128; ++i) {
pool_config->set_num_threads(i);
sessions.emplace_back(NewSession(options));
auto status = sessions.back()->Create(def);
ASSERT_FALSE(status.ok()) << status;
}
sessions.clear();
}
}
TEST(DirectSessionTest, TestDirectSessionRunClose) {
Graph g(OpRegistry::Global());
Tensor t(DT_FLOAT, TensorShape({}));
t.scalar<float>()() = {1.2f};
Node* var_val = test::graph::Constant(&g, t);
Node* var = test::graph::Var(&g, DT_FLOAT, {});
Node* var_assign = test::graph::Assign(&g, var, var_val);
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Run({} , {},
{var_assign->name()} , nullptr));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(
{} , {var->name() + ":0"} , {}, &outputs));
EXPECT_EQ(t.scalar<float>()(), outputs[0].scalar<float>()());
outputs.clear();
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(
MakeCallableOptions({}, {}, {var_assign->name()}), &handle));
TF_ASSERT_OK(session->Close());
Status s = session->Run({} , {},
{var_assign->name()} , nullptr);
EXPECT_EQ(s.code(), error::CANCELLED);
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed."));
s = session->RunCallable(handle, {}, {}, nullptr);
EXPECT_EQ(s.code(), error::CANCELLED);
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed."));
}
TEST(DirectSessionTest, TestDirectSessionPRunClose) {
GraphDef def;
Graph g(OpRegistry::Global());
Tensor first_value(DT_FLOAT, TensorShape({}));
first_value.scalar<float>()() = 1.0;
Node* first_const = test::graph::Constant(&g, first_value);
Node* first_identity = test::graph::Identity(&g, first_const);
Tensor second_value(DT_FLOAT, TensorShape({}));
second_value.scalar<float>()() = 2.0;
Node* second_const = test::graph::Constant(&g, second_value);
Node* second_identity = test::graph::Identity(&g, second_const);
Node* third = test::graph::Add(&g, first_identity, second_identity);
Node* third_identity = test::graph::Identity(&g, third);
g.ToGraphDef(&def);
auto session = CreateSession();
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
string handle;
Status s = session->PRunSetup(
{first_const->name(), second_const->name()},
{first_identity->name() + ":0", second_identity->name() + ":0",
third_identity->name() + ":0"},
{}, &handle);
TF_ASSERT_OK(s);
Tensor value_11(DT_FLOAT, TensorShape({}));
value_11.scalar<float>()() = 11.0;
Tensor value_22(DT_FLOAT, TensorShape({}));
value_22.scalar<float>()() = 22.0;
TF_ASSERT_OK(session->Close());
s = session->PRun(handle, {{first_const->name(), value_11}},
{first_identity->name() + ":0"}, &outputs);
EXPECT_EQ(s.code(), error::CANCELLED);
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed."));
}
TEST(DirectSessionTest, TestDirectSessionReset) {
Graph g(OpRegistry::Global());
Tensor t(DT_FLOAT, TensorShape({}));
t.scalar<float>()() = {1.2f};
Node* var_val = test::graph::Constant(&g, t);
Node* var = test::graph::Var(&g, DT_FLOAT, {});
Node* var_assign = test::graph::Assign(&g, var, var_val);
GraphDef def;
g.ToGraphDef(&def);
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Run({} , {},
{var_assign->name()} , nullptr));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(
{} , {var->name() + ":0"} , {}, &outputs));
EXPECT_EQ(t.scalar<float>()(), outputs[0].scalar<float>()());
outputs.clear();
TF_EXPECT_OK(Reset(options, {}));
Status s = session->Run({} , {},
{var_assign->name()} , nullptr);
EXPECT_EQ(s.code(), error::CANCELLED);
EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed."));
}
TEST(DirectSessionTest, LocalDeviceManager) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
const DeviceMgr* mgr = nullptr;
TF_ASSERT_OK(session->LocalDeviceManager(&mgr));
ASSERT_TRUE(mgr != nullptr);
EXPECT_GT(mgr->ListDevices().size(), 0);
}
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
Status Sync() override {
return absl::UnimplementedError("FakeDevice::Sync()");
}
};
template <char FirstLetter>
class FakeFactory : public DeviceFactory {
public:
Status ListPhysicalDevices(std::vector<string>* devices) override {
return absl::OkStatus();
}
Status CreateDevices(const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) override {
std::string name = absl::StrFormat("%cPU", FirstLetter);
DeviceAttributes attr;
attr.set_name(
absl::StrFormat("/job:localhost/replica:0/task:0/device:%s:0", name));
attr.set_device_type(DeviceType(name).type());
devices->emplace_back(std::make_unique<FakeDevice>(attr));
return absl::OkStatus();
}
};
REGISTER_LOCAL_DEVICE_FACTORY("APU", FakeFactory<'A'>);
REGISTER_LOCAL_DEVICE_FACTORY("ZPU", FakeFactory<'Z'>);
TEST(DirectSessionTest, FeedsAndFetchesGoToCpu) {
auto session = CreateSession();
const DeviceMgr* mgr = nullptr;
TF_ASSERT_OK(session->LocalDeviceManager(&mgr));
ASSERT_TRUE(mgr != nullptr);
EXPECT_GT(mgr->ListDevices().size(), 2);
GraphDef def;
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
a_tensor.flat<float>().setRandom();
Node* a = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
x_tensor.flat<float>().setRandom();
Node* x = test::graph::Constant(&graph, x_tensor);
Node* y = test::graph::Matmul(&graph, a, x, false, false);
graph.ToGraphDef(&def);
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {y->name() + ":0"}, {}, &outputs));
}
GraphDef CreateGraphForYEqualsXSquared() {
GraphDef graph_def;
const char* text_proto = R"EOF(
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
}
node {
name: "y"
op: "Square"
input: "x"
attr { key: "T" value { type: DT_FLOAT } }
}
versions {
producer: 26
}
)EOF";
QCHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph_def));
return graph_def;
}
bool IsCUDATensor(const Tensor& t) {
#ifdef GOOGLE_CUDA
cudaPointerAttributes attributes;
cudaError_t err =
cudaPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == cudaErrorInvalidValue) return false;
CHECK_EQ(cudaSuccess, err) << cudaGetErrorString(err);
return (attributes.type == cudaMemoryTypeDevice);
#elif TENSORFLOW_USE_ROCM
hipPointerAttribute_t attributes;
hipError_t err = hipPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == hipErrorInvalidValue) return false;
CHECK_EQ(hipSuccess, err) << hipGetErrorString(err);
return (attributes.memoryType == hipMemoryTypeDevice);
#else
return false;
#endif
}
string GPUDeviceName(Session* session) {
std::vector<DeviceAttributes> devices;
TF_CHECK_OK(session->ListDevices(&devices));
for (const DeviceAttributes& d : devices) {
if (d.device_type() == "GPU" || d.device_type() == "gpu") {
return d.name();
}
}
return "";
}
TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory) {
std::unique_ptr<Session> session(NewSession(SessionOptions()));
const string gpu_device_name = GPUDeviceName(session.get());
if (gpu_device_name.empty()) {
LOG(INFO) << "Skipping test since no GPU is available";
return;
}
TF_ASSERT_OK(session->Create(CreateGraphForYEqualsXSquared()));
CallableOptions opts;
opts.add_feed("x:0");
opts.add_fetch("y:0");
Tensor gpu_tensor;
{
Session::CallableHandle feed_cpu_fetch_gpu;
opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
opts.set_fetch_skip_sync(true);
TF_ASSERT_OK(session->MakeCallable(opts, &feed_cpu_fetch_gpu));
Tensor input(DT_FLOAT, {});
input.scalar<float>()() = 2.0f;
std::vector<Tensor> outputs;
TF_ASSERT_OK(
session->RunCallable(feed_cpu_fetch_gpu, {input}, &outputs, nullptr));
TF_ASSERT_OK(session->ReleaseCallable(feed_cpu_fetch_gpu));
ASSERT_EQ(1, outputs.size());
gpu_tensor = outputs[0];
ASSERT_TRUE(IsCUDATensor(gpu_tensor));
}
{
Session::CallableHandle feed_gpu_fetch_cpu;
opts.clear_fetch_devices();
opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
TF_ASSERT_OK(session->MakeCallable(opts, &feed_gpu_fetch_cpu));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(feed_gpu_fetch_cpu, {gpu_tensor},
&outputs, nullptr));
TF_ASSERT_OK(session->ReleaseCallable(feed_gpu_fetch_cpu));
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(16.0, outputs[0].scalar<float>()());
}
}
GraphDef CreateIdentityGraphDef(DataType dtype) {
GraphDef def;
AttrValue dtype_attr;
dtype_attr.set_type(dtype);
AttrValue shape_attr;
shape_attr.mutable_shape()->set_unknown_rank(true);
auto* placeholder = def.add_node();
placeholder->set_name("x");
placeholder->set_op("Placeholder");
placeholder->mutable_attr()->insert({"dtype", dtype_attr});
placeholder->mutable_attr()->insert({"shape", shape_attr});
auto* identity = def.add_node();
identity->set_name("y");
identity->set_op("Identity");
identity->add_input("x");
identity->mutable_attr()->insert({"T", dtype_attr});
return def;
}
void TestFeedAndFetchTensorsInDeviceMemory(
const SessionOptions& session_options, DataType dtype) {
std::unique_ptr<Session> session(NewSession(session_options));
const string gpu_device_name = GPUDeviceName(session.get());
if (gpu_device_name.empty()) {
LOG(INFO) << "Skipping test since no GPU is available";
return;
}
TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype)))
<< DataType_Name(dtype);
CallableOptions opts;
opts.add_feed("x:0");
opts.add_fetch("y:0");
Tensor gpu_tensor;
Tensor host_tensor(dtype, {3});
{
opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
opts.set_fetch_skip_sync(true);
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype);
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {host_tensor}, &outputs, nullptr))
<< DataType_Name(dtype);
TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype);
ASSERT_EQ(1, outputs.size()) << DataType_Name(dtype);
gpu_tensor = outputs[0];
ASSERT_TRUE(IsCUDATensor(gpu_tensor)) << DataType_Name(dtype);
}
{
opts.clear_fetch_devices();
opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
Session::CallableHandle handle;
TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype);
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->RunCallable(handle, {gpu_tensor}, &outputs, nullptr))
<< DataType_Name(dtype);
TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype);
ASSERT_EQ(1, outputs.size());
const StringPiece actual_data = outputs[0].tensor_data();
const StringPiece expected_data = host_tensor.tensor_data();
EXPECT_EQ(expected_data.size(), actual_data.size()) << DataType_Name(dtype);
EXPECT_EQ(0, memcmp(expected_data.data(), actual_data.data(),
std::min(expected_data.size(), actual_data.size())))
<< DataType_Name(dtype);
}
}
void TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable(
const SessionOptions& session_options, DataType dtype) {
std::unique_ptr<Session> session(NewSession(session_options));
const string gpu_device_name = GPUDeviceName(session.get());
if (gpu_device_name.empty()) {
LOG(INFO) << "Skipping test since no GPU is available";
return;
}
TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype)))
<< DataType_Name(dtype);
CallableOptions opts;
opts.add_feed("x:0");
opts.add_fetch("y:0");
{
opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name});
opts.set_fetch_skip_sync(true);
Session::CallableHandle handle;
Status status = session->MakeCallable(opts, &handle);
EXPECT_FALSE(status.ok()) << DataType_Name(dtype);
EXPECT_TRUE(absl::StrContains(
status.message(),
strings::StrCat(
"Cannot feed or fetch tensor 'y:0' from device ", gpu_device_name,
" as feeding/fetching from GPU devices is not yet supported for ",
DataTypeString(dtype), " tensors")))
<< DataType_Name(dtype) << ", Status: " << status;
}
{
opts.clear_feed_devices();
opts.mutable_feed_devices()->insert({"x:0", gpu_device_name});
Session::CallableHandle handle;
Status status = session->MakeCallable(opts, &handle);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(
status.message(),
strings::StrCat(
"Cannot feed or fetch tensor 'x:0' from device ", gpu_device_name,
" as feeding/fetching from GPU devices is not yet supported for ",
DataTypeString(dtype), " tensors")))
<< DataType_Name(dtype) << ", Status: " << status;
}
}
void TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(
const SessionOptions& opts) {
for (int i = DataType_MIN; i <= DataType_MAX; ++i) {
if (!DataType_IsValid(i)) continue;
const DataType dtype = static_cast<DataType>(i);
switch (dtype) {
case DT_INVALID:
break;
case DT_BFLOAT16:
case DT_BOOL:
case DT_COMPLEX128:
case DT_COMPLEX64:
case DT_DOUBLE:
case DT_FLOAT:
case DT_HALF:
case DT_INT16:
case DT_INT64:
case DT_INT8:
case DT_UINT16:
case DT_UINT8:
case DT_INT4:
case DT_UINT4:
TestFeedAndFetchTensorsInDeviceMemory(opts, dtype);
break;
default:
if (!IsRefType(dtype)) {
TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable(opts, dtype);
}
break;
}
}
}
TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory_AllDataTypes) {
SessionOptions opts;
opts.config.set_allow_soft_placement(false);
TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts);
}
TEST(DirectSessionTest,
FeedAndFetchTensorsInDeviceMemory_AllDataTypes_SoftPlacement) {
SessionOptions opts;
opts.config.set_allow_soft_placement(true);
TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts);
}
void FeedFetchBenchmarkHelper(::testing::benchmark::State& state, int num_feeds,
bool use_make_callable, int inter_op_threads,
bool use_single_threaded_executor) {
Tensor value(DT_FLOAT, TensorShape());
value.flat<float>()(0) = 37.0;
std::vector<std::pair<string, Tensor>> inputs;
inputs.reserve(num_feeds);
std::vector<string> outputs;
Graph g(OpRegistry::Global());
for (int i = 0; i < num_feeds; ++i) {
Node* placeholder;
TF_CHECK_OK(NodeBuilder(g.NewName("Placeholder"), "Placeholder")
.Attr("shape", TensorShape())
.Attr("dtype", DT_FLOAT)
.Device("/cpu:0")
.Finalize(&g, &placeholder));
Node* identity;
TF_CHECK_OK(NodeBuilder(g.NewName("Identity"), "Identity")
.Input(placeholder)
.Attr("T", DT_FLOAT)
.Device("/cpu:0")
.Finalize(&g, &identity));
inputs.push_back({placeholder->name() + ":0", value});
outputs.push_back(identity->name() + ":0");
}
GraphDef gd;
g.ToGraphDef(&gd);
SessionOptions opts;
opts.config.set_inter_op_parallelism_threads(inter_op_threads);
if (use_single_threaded_executor) {
opts.config.mutable_experimental()->set_executor_type(
"SINGLE_THREADED_EXECUTOR");
}
std::unique_ptr<Session> session(NewSession(opts));
TF_CHECK_OK(session->Create(gd));
if (use_make_callable) {
Session::CallableHandle handle;
CallableOptions callable_options;
std::vector<Tensor> input_tensors;
for (const auto& input : inputs) {
callable_options.add_feed(input.first);
input_tensors.push_back(input.second);
}
for (const string& output : outputs) {
callable_options.add_fetch(output);
}
TF_CHECK_OK(session->MakeCallable(callable_options, &handle));
for (auto s : state) {
std::vector<Tensor> output_values;
TF_CHECK_OK(
session->RunCallable(handle, input_tensors, &output_values, nullptr));
}
} else {
{
std::vector<Tensor> output_values;
TF_CHECK_OK(session->Run(inputs, outputs, {}, &output_values));
}
for (auto s : state) {
std::vector<Tensor> output_values;
TF_CHECK_OK(session->Run(inputs, outputs, {}, &output_values));
}
}
}
void BM_FeedFetch(::testing::benchmark::State& state) {
const int num_feeds = state.range(0);
FeedFetchBenchmarkHelper(state, num_feeds, false,
0,
false);
}
void BM_FeedFetchCallable(::testing::benchmark::State& state) {
const int num_feeds = state.range(0);
FeedFetchBenchmarkHelper(state, num_feeds, true,
0,
false);
}
void BM_FeedFetchCallableSingleThread(::testing::benchmark::State& state) {
const int num_feeds = state.range(0);
FeedFetchBenchmarkHelper(state, num_feeds, true,
-1,
false);
}
void BM_FeedFetchCallableSingleThreadExecutor(
::testing::benchmark::State& state) {
const int num_feeds = state.range(0);
FeedFetchBenchmarkHelper(state, num_feeds, true,
-1,
true);
}
BENCHMARK(BM_FeedFetch)->Arg(1)->Arg(2)->Arg(5)->Arg(10);
BENCHMARK(BM_FeedFetchCallable)->Arg(1)->Arg(2)->Arg(5)->Arg(10);
BENCHMARK(BM_FeedFetchCallableSingleThread)->Arg(1)->Arg(2)->Arg(5)->Arg(10);
BENCHMARK(BM_FeedFetchCallableSingleThreadExecutor)
->Arg(1)
->Arg(2)
->Arg(5)
->Arg(10);
}
class DirectSessionCollectiveTest : public ::testing::Test {
public:
Status RunGraphWithCollectiveFunctions(bool add_unused_function,
int64_t* collective_graph_key) {
GraphDef g = CreateGraph(add_unused_function);
const Tensor t1 =
test::AsTensor<float>({0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1});
const Tensor t2 =
test::AsTensor<float>({0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3});
auto session = CreateSession();
TF_RETURN_IF_ERROR(session->Create(g));
std::vector<Tensor> outputs;
TF_RETURN_IF_ERROR(
session->Run({{"input0:0", t1}, {"input1:0", t2}}, {},
{"collective_call0:0", "collective_call1:0"}, &outputs));
DirectSession* direct_session = static_cast<DirectSession*>(session.get());
{
mutex_lock l(direct_session->collective_graph_key_lock_);
*collective_graph_key = direct_session->collective_graph_key_;
}
return absl::OkStatus();
}
private:
FunctionDef CollectiveFunction(const string& function_name,
int instance_key) {
return FunctionDefHelper::Define(
function_name,
{"arg:float"},
{"reduce:float"},
{},
{{
{"reduce"},
"CollectiveReduce",
{"arg"},
{{"group_size", 2},
{"group_key", 1},
{"instance_key", instance_key},
{"subdiv_offsets", absl::Span<const int32>({0})},
{"merge_op", "Add"},
{"final_op", "Div"},
{"T", DT_FLOAT}},
}});
}
NodeDef Input(int id) {
AttrValue dtype_attr;
SetAttrValue(DT_FLOAT, &dtype_attr);
NodeDef input;
input.set_name(strings::StrCat("input", id));
input.set_op("Placeholder");
input.mutable_attr()->insert({"dtype", dtype_attr});
return input;
}
NodeDef CollectiveCall(const string& op, const string& input, int cpu_id) {
NodeDef collective_call;
collective_call.set_name(strings::StrCat("collective_call", cpu_id));
collective_call.set_op(op);
collective_call.add_input(input);
collective_call.set_device(
strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", cpu_id));
return collective_call;
}
GraphDef CreateGraph(bool add_unused_function) {
GraphDef g;
FunctionDef collective_function =
CollectiveFunction("CollectiveFunction1", 1);
FunctionDefLibrary* lib = g.mutable_library();
*lib->add_function() = collective_function;
if (add_unused_function) {
FunctionDef unused_function =
CollectiveFunction("CollectiveFunction2", 2);
*lib->add_function() = unused_function;
}
*g.add_node() = Input(0);
*g.add_node() = Input(1);
*g.add_node() = CollectiveCall("CollectiveFunction1", "input0", 0);
*g.add_node() = CollectiveCall("CollectiveFunction1", "input1", 1);
return g;
}
};
TEST_F(DirectSessionCollectiveTest,
TestCollectiveGraphKeyUsesOnlyCalledFunctions) {
int64_t key1;
TF_ASSERT_OK(RunGraphWithCollectiveFunctions(false, &key1));
int64_t key2;
TF_ASSERT_OK(RunGraphWithCollectiveFunctions(true, &key2));
ASSERT_EQ(key1, key2);
}
class StatefulOutputRequiredOp : public OpKernel {
public:
explicit StatefulOutputRequiredOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor count_outputs_required_t(int64_t{0});
int64_t& count_outputs_required =
count_outputs_required_t.scalar<int64_t>()();
for (int i = 0; i < num_outputs(); ++i) {
if (ctx->output_required(i)) ++count_outputs_required;
}
for (int i = 0; i < num_outputs(); ++i) {
if (ctx->output_required(i)) ctx->set_output(i, count_outputs_required_t);
}
}
};
REGISTER_KERNEL_BUILDER(Name("StatefulOutputRequired").Device(DEVICE_CPU),
StatefulOutputRequiredOp);
REGISTER_OP("StatefulOutputRequired")
.Output("results : num_outs * int64")
.Attr("num_outs : int = 5")
.SetIsStateful();
TEST(DirectSessionTest, TestStatefulOutputRequiredOp) {
GraphDef graph;
protobuf::TextFormat::ParseFromString(
R"pb(
node { name: 'n' op: 'StatefulOutputRequired' device: '/device:CPU:0' }
versions { producer: 9 }
)pb",
&graph);
std::unique_ptr<Session> session(NewSession(SessionOptions()));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(std::move(graph)));
for (int num_outputs_required = 1; num_outputs_required <= 5;
++num_outputs_required) {
std::vector<string> fetch_tensor_names;
fetch_tensor_names.reserve(num_outputs_required);
for (int output_idx = 0; output_idx < num_outputs_required; ++output_idx) {
fetch_tensor_names.push_back(strings::StrCat("n:", output_idx));
}
std::vector<Tensor> fetch_tensors;
TF_ASSERT_OK(session->Run({}, fetch_tensor_names, {}, &fetch_tensors));
ASSERT_EQ(num_outputs_required, fetch_tensors.size());
for (const Tensor& t : fetch_tensors) {
ASSERT_EQ(num_outputs_required, t.scalar<int64_t>()());
}
}
TF_ASSERT_OK(session->Close());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/direct_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/direct_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71fb6a7c-61d9-4cc6-9997-be6a2af9367a | cpp | tensorflow/tensorflow | inline_function_utils | tensorflow/core/common_runtime/inline_function_utils.cc | tensorflow/core/common_runtime/inline_function_utils_test.cc | #include "tensorflow/core/common_runtime/inline_function_utils.h"
#include <deque>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/optimizer_cse.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
constexpr const char* const
LowerFunctionalOpsConstants::kLowerUsingSwitchMergeAttr;
constexpr const char* const
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
namespace {
static constexpr const char* const kArgOp = FunctionLibraryDefinition::kArgOp;
static constexpr const char* const kDeviceArgOp =
FunctionLibraryDefinition::kDeviceArgOp;
static constexpr const char* const kRetOp = FunctionLibraryDefinition::kRetOp;
static constexpr const char* const kDeviceRetOp =
FunctionLibraryDefinition::kDeviceRetOp;
static constexpr const char* const kGradientOp =
FunctionLibraryDefinition::kGradientOp;
static constexpr const char* const kNodeLabel = "Func";
static constexpr const char* const kFuncAttr =
FunctionLibraryDefinition::kFuncAttr;
struct Endpoint {
Node* node;
int index;
string name() const {
if (index == 0) {
return node->name();
} else {
return strings::StrCat(node->name(), ":", index);
}
}
DataType dtype() const { return node->output_type(index); }
};
struct EndpointHash {
uint64 operator()(const Endpoint& x) const {
return Hash64(reinterpret_cast<const char*>(&x.node), sizeof(Node*),
x.index);
}
};
struct EndpointEq {
bool operator()(const Endpoint& x, const Endpoint& y) const {
return (x.node == y.node) && (x.index == y.index);
}
};
static Node* AddNoOp(StringPiece name, Graph* g) {
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("NoOp");
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
return ret;
}
static Node* AddIdentity(StringPiece name, Graph* g, Endpoint input) {
DCHECK_LT(0, input.dtype());
NodeDef ndef;
ndef.set_name(g->NewName(absl::StrCat(kNodeLabel, "/", name)));
ndef.set_op("Identity");
ndef.add_input(input.name());
AddNodeAttr("T", BaseType(input.dtype()), &ndef);
Status s;
Node* ret = g->AddNode(ndef, &s);
TF_CHECK_OK(s);
g->AddEdge(input.node, input.index, ret, 0);
return ret;
}
std::vector<string> InputDevices(const Node& caller) {
std::vector<string> input_devices(caller.in_edges().size());
std::vector<string> input_tensors(caller.in_edges().size());
for (const Edge* edge : caller.in_edges()) {
if (edge->IsControlEdge()) continue;
const string& input_device = edge->src()->has_assigned_device_name()
? edge->src()->assigned_device_name()
: edge->src()->requested_device();
input_devices[edge->dst_input()] = input_device;
input_tensors[edge->dst_input()] =
absl::StrCat(edge->src()->name(), ":", edge->src_output());
}
if (VLOG_IS_ON(4)) {
VLOG(4) << "Function instantiation input devices:";
for (int i = 0; i < input_devices.size(); ++i) {
if (input_tensors[i].empty()) continue;
VLOG(4) << " [index " << i << "]"
<< " device: " << input_devices[i]
<< " (input: " << input_tensors[i] << ")";
}
}
return input_devices;
}
class DefaultFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit DefaultFunctionBodyPlacer(const Node& caller)
: input_devices_(InputDevices(caller)) {}
absl::optional<string> InputNodeDevice(int input_index) const override {
return input_devices_[input_index];
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return absl::nullopt;
}
bool ColocateInputOutputIdentities() const override { return false; }
absl::optional<string> ControlNodeDevice() const override {
return absl::nullopt;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
return absl::nullopt;
}
private:
const std::vector<string> input_devices_;
};
class SingleDeviceFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit SingleDeviceFunctionBodyPlacer(const Node& caller)
: caller_device_(caller.def().device()) {}
absl::optional<string> InputNodeDevice(int input_index) const override {
return caller_device_;
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return caller_device_;
}
bool ColocateInputOutputIdentities() const override { return false; }
absl::optional<string> ControlNodeDevice() const override {
return caller_device_;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
return caller_device_;
}
private:
const string caller_device_;
};
class MultiDeviceFunctionBodyPlacer : public InlinedFunctionBodyPlacer {
public:
explicit MultiDeviceFunctionBodyPlacer(const Node& caller)
: caller_device_(caller.def().device()),
input_devices_(InputDevices(caller)) {
has_parsed_caller_device_ =
DeviceNameUtils::ParseFullName(caller_device_, &caller_parsed_device_);
}
absl::optional<string> InputNodeDevice(int input_index) const override {
return input_devices_[input_index];
}
absl::optional<string> OutputNodeDevice(int output_index) const override {
return absl::nullopt;
}
bool ColocateInputOutputIdentities() const override { return true; }
absl::optional<string> ControlNodeDevice() const override {
return caller_device_;
}
absl::optional<string> BodyNodeDevice(const NodeDef& ndef) const override {
if (ndef.device().empty()) return caller_device_;
if (!has_parsed_caller_device_) return ndef.device();
DeviceNameUtils::ParsedName ndef_parsed_device;
if (!DeviceNameUtils::ParseFullName(ndef.device(), &ndef_parsed_device))
return ndef.device();
DeviceNameUtils::MergeUnsetDevNames(&ndef_parsed_device,
caller_parsed_device_);
return DeviceNameUtils::ParsedNameToString(ndef_parsed_device);
}
private:
string caller_device_;
bool has_parsed_caller_device_;
DeviceNameUtils::ParsedName caller_parsed_device_;
std::vector<string> input_devices_;
};
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::DefaultPlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create default placer for inlined function body.";
return std::make_unique<DefaultFunctionBodyPlacer>(caller);
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::SingleDevicePlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create single device placer for inlined function body.";
return std::make_unique<SingleDeviceFunctionBodyPlacer>(caller);
}
std::unique_ptr<InlinedFunctionBodyPlacer>
InlinedFunctionBodyPlacer::MultiDevicePlacer(const Graph& graph,
const Node& caller) {
VLOG(3) << "Create multi device placer for inlined function body.";
return std::make_unique<MultiDeviceFunctionBodyPlacer>(caller);
}
namespace {
Status ValidateNoInline(const FunctionBody* fbody) {
const auto attr = AttrSlice(&fbody->record->fdef().attr());
bool noinline = false;
if (TryGetNodeAttr(attr, kNoInlineAttr, &noinline) && noinline) {
return errors::InvalidArgument(
"Can't inline function marked with '_noinline'");
}
return absl::OkStatus();
}
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
void PropagateDebugInfoToNode(const string& func,
const std::vector<const Node*>& nodes,
NodeDef* target) {
if (nodes.empty() || target->has_experimental_debug_info()) {
return;
}
for (const Node* node : nodes) {
const auto& node_def = node->def();
if (node_def.has_experimental_debug_info()) {
target->mutable_experimental_debug_info()->MergeFrom(
node_def.experimental_debug_info());
} else {
target->mutable_experimental_debug_info()->add_original_node_names(
node_def.name());
target->mutable_experimental_debug_info()->add_original_func_names(func);
}
}
}
}
string InlineFunctionBodyOptions::DebugString() const {
const auto true_false = [](bool b) { return b ? "true" : "false"; };
const auto keep_caller_node_str = [this]() -> string {
switch (keep_caller_node) {
case KeepCallerNode::kDoNotKeep:
return "DoNotKeep";
case KeepCallerNode::kFetchable:
return "Fetchable";
case KeepCallerNode::kTargetable:
return "Targetable";
}
};
return absl::StrCat(
"disable_inlining=", true_false(disable_inlining),
", ignore_noinline=", true_false(ignore_noinline),
", inline_impl_selection_group_functions=",
true_false(inline_impl_selection_group_functions),
", keep_caller_node=", keep_caller_node_str(), ", output_control_src=",
output_control_src == OutputControlSrc::kDataOutputs ? "DataOutputs"
: "ControlOutputs",
", inlined_function_body_placer=", inlined_function_body_placer.name,
", uniquify_frame_names=", true_false(uniquify_frame_names));
}
Status ValidateInlining(const Node* node, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options) {
const auto num_node_inputs = static_cast<size_t>(node->num_inputs());
const auto num_node_outputs = static_cast<size_t>(node->num_outputs());
if (num_node_inputs != fbody->arg_types.size() ||
num_node_inputs != fbody->arg_nodes.size()) {
return errors::InvalidArgument(
"Node inputs do not match function arguments: inputs=", num_node_inputs,
" arg_types=", fbody->arg_types.size(),
" arg_nodes=", fbody->arg_nodes.size());
}
if (num_node_outputs != fbody->ret_types.size() ||
num_node_outputs != fbody->ret_nodes.size()) {
return errors::InvalidArgument(
"Node outputs do not match function returns: outputs=",
num_node_outputs, " ret_types=", fbody->ret_types.size(),
" ret_nodes=", fbody->ret_nodes.size());
}
for (int i = 0; i < node->num_inputs(); ++i) {
if (node->input_type(i) != fbody->arg_types[i]) {
return errors::InvalidArgument(
"Node input type doesn't match function argument type: ",
node->input_type(i), " != ", fbody->arg_types[i], " @ index=", i);
}
}
for (int i = 0; i < node->num_outputs(); ++i) {
if (node->output_type(i) != fbody->ret_types[i]) {
return errors::InvalidArgument(
"Node output type doesn't match function return type: ",
node->output_type(i), " != ", fbody->ret_types[i], " @ index=", i);
}
}
if (options.disable_inlining) {
return errors::InvalidArgument(
"Function inlining explicitly disabled by 'options.disable_inlining'");
}
if (!options.inline_impl_selection_group_functions) {
bool is_impl_selection_group_function =
fbody->record->fdef().attr().find("api_implements") !=
fbody->record->fdef().attr().end();
if (is_impl_selection_group_function) {
return errors::InvalidArgument(
"Inlining of implementation selection group function ",
fbody->record->fdef().signature().name(),
" is disabled by options.inline_impl_selection_group_functions");
}
}
if (!options.ignore_noinline) {
TF_RETURN_IF_ERROR(ValidateNoInline(fbody));
}
return absl::OkStatus();
}
Status InlineFunctionBody(const FunctionLibraryDefinition& flib_def, Graph* g,
Node* caller, const FunctionBody* fbody,
const InlineFunctionBodyOptions& options) {
VLOG(3) << "Inline function call: " << SummarizeNode(*caller) << " ["
<< options.DebugString() << "]";
VLOG(4) << "Inlining function: "
<< fbody->record->fdef().DebugString();
VLOG(4) << "Current graphdef: " << g->ToGraphDefDebug().DebugString();
VLOG(4) << "Caller: " << caller->DebugString();
Status validation = ValidateInlining(caller, fbody, options);
if (!validation.ok()) {
return errors::Internal("Inlining mismatch: ", validation.message());
}
const std::unique_ptr<InlinedFunctionBodyPlacer> placer =
options.inlined_function_body_placer.get(*g, *caller);
static constexpr bool kDoNotCheckDuplicates = true;
const auto no_op = [&](StringPiece name) -> Node* {
Node* node = AddNoOp(absl::StrCat(caller->name(), "/", name), g);
const absl::optional<string> device = placer->ControlNodeDevice();
if (device.has_value()) node->set_requested_device(*device);
return node;
};
const auto input_identity = [&](StringPiece name, Endpoint input,
int index) -> Node* {
Node* node = AddIdentity(absl::StrCat(caller->name(), "/", name), g, input);
const absl::optional<string> device = placer->InputNodeDevice(index);
if (device.has_value()) node->set_requested_device(*device);
bool colocate_identity = placer->ColocateInputOutputIdentities();
if (colocate_identity) {
node->AddAttr(kColocationAttrName,
std::vector<string>{absl::StrCat(kColocationGroupPrefix,
input.node->name())});
}
return node;
};
const auto output_identity = [&](StringPiece name, Endpoint input,
int index) -> Node* {
Node* node = AddIdentity(absl::StrCat(caller->name(), "/", name), g, input);
const absl::optional<string> device = placer->OutputNodeDevice(index);
if (device.has_value()) node->set_requested_device(*device);
bool colocate_identity = placer->ColocateInputOutputIdentities();
if (colocate_identity) {
node->AddAttr(kColocationAttrName,
std::vector<string>{absl::StrCat(kColocationGroupPrefix,
input.node->name())});
}
return node;
};
auto arg_name = [&](auto& args, size_t i) -> absl::string_view {
if (i < args.size()) {
return args[i].name();
} else {
return "<unknown>";
}
};
std::vector<Endpoint> inputs(caller->num_inputs());
Node* input_control_node = nullptr;
for (const Edge* e : caller->in_edges()) {
if (e->IsControlEdge()) {
if (input_control_node == nullptr) {
input_control_node = no_op("input_control_node");
}
g->AddControlEdge(e->src(), input_control_node, kDoNotCheckDuplicates);
} else {
inputs[e->dst_input()] = {e->src(), e->src_output()};
}
}
if (input_control_node != nullptr) {
VLOG(3) << "Created input control node: " << input_control_node->name();
}
std::vector<Node*> input_nodes;
std::map<absl::string_view, absl::string_view> input_node_name_map;
for (std::size_t i = 0; i < fbody->arg_nodes.size(); ++i) {
if (inputs[i].node == nullptr)
return errors::Internal("Null node found for input ", i);
Node* n = input_identity("input", inputs[i], i);
input_node_name_map[arg_name(fbody->record->fdef().signature().input_arg(),
i)] = n->name();
input_nodes.push_back(n);
}
std::unordered_set<string> fn_nodes;
for (Node* n : fbody->graph->op_nodes()) {
fn_nodes.insert(n->name());
}
std::vector<Node*> node_map(fbody->graph->num_node_ids());
for (Node* n : fbody->graph->op_nodes()) {
NodeDef ndef = n->def();
const absl::optional<string> device = placer->BodyNodeDevice(ndef);
if (device.has_value()) ndef.set_device(*device);
PropagateDebugInfoToNode(fbody->record->fdef().signature().name(), {n},
&ndef);
const string prefix = strings::StrCat(caller->name(), "/");
TF_RETURN_IF_ERROR(AddPrefixAndSuffixToNode(prefix, "", &ndef,
options.uniquify_frame_names));
TF_RETURN_IF_ERROR(
MaybeUpdateColocationConstraintsWithMap(input_node_name_map, &ndef));
TF_RETURN_IF_ERROR(
MaybeAddPrefixToColocationConstraints(fn_nodes, prefix, &ndef));
Status added_node;
Node* clone = g->AddNode(std::move(ndef), &added_node);
TF_CHECK_OK(added_node);
node_map[n->id()] = clone;
clone->SetStackTrace(n->GetStackTrace());
if (input_control_node) {
const auto is_input_edge = [](const Edge* e) -> bool {
return !e->src()->IsSource();
};
const auto is_control_edge = [](const Edge* e) -> bool {
return !e->src()->IsSource() && e->IsControlEdge();
};
const bool forward_execution_frame =
(absl::c_none_of(n->in_edges(), is_input_edge) ||
(n->IsFunctionCall() &&
absl::c_none_of(n->in_edges(), is_control_edge))) &&
!n->IsArg();
if (forward_execution_frame) {
VLOG(4) << "Add control edge from input control node to: "
<< clone->name();
g->AddControlEdge(input_control_node, clone, kDoNotCheckDuplicates);
}
}
}
for (const Edge* e : fbody->graph->edges()) {
if (e->src()->IsSource() || e->src()->IsSink() || e->dst()->IsSource() ||
e->dst()->IsSink()) {
continue;
}
Node* src_copy = node_map[e->src()->id()];
Node* dst_copy = node_map[e->dst()->id()];
g->AddEdge(src_copy, e->src_output(), dst_copy, e->dst_input());
}
VLOG(4) << "Add input Identity nodes for each function argument:";
for (std::size_t i = 0; i < fbody->arg_nodes.size(); ++i) {
Node* arg = node_map[fbody->arg_nodes[i]->id()];
Node* n = input_nodes[i];
VLOG(4) << " [index " << i << "] "
<< arg_name(fbody->record->fdef().signature().input_arg(), i)
<< " as " << n->name() << " (input: " << inputs[i].name()
<< ", requested_device: " << n->requested_device() << ")";
if (input_control_node) {
g->AddControlEdge(input_control_node, n, kDoNotCheckDuplicates);
}
for (const Edge* e : arg->out_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(n, e->dst(), kDoNotCheckDuplicates);
} else {
g->AddEdge(n, 0, e->dst(), e->dst_input());
}
}
node_map[fbody->arg_nodes[i]->id()] = n;
g->RemoveNode(arg);
}
VLOG(4) << "Add output Identity nodes for each function output argument:";
std::vector<Node*> outputs(caller->num_outputs());
for (std::size_t i = 0; i < fbody->ret_nodes.size(); ++i) {
Node* ret = node_map[fbody->ret_nodes[i]->id()];
Endpoint data;
for (const Edge* e : ret->in_edges()) {
if (!e->IsControlEdge()) {
data = {e->src(), e->src_output()};
break;
}
}
CHECK(data.node != nullptr);
Node* n = output_identity("output", data, i);
outputs[i] = n;
VLOG(4) << " [index " << i << "] "
<< arg_name(fbody->record->fdef().signature().output_arg(), i)
<< " as " << n->name() << " (ret: " << data.node->name() << ":"
<< data.index << ", requested_device: " << n->requested_device()
<< ")";
for (const Edge* e : ret->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), n, kDoNotCheckDuplicates);
}
}
g->RemoveNode(ret);
}
Node* output_control_node = nullptr;
const bool has_control_outputs = absl::c_any_of(
caller->out_edges(), [](const Edge* e) { return e->IsControlEdge(); });
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
const bool keep_caller_node =
options.keep_caller_node == KeepCallerNode::kFetchable ||
options.keep_caller_node == KeepCallerNode::kTargetable;
if (has_control_outputs || keep_caller_node) {
output_control_node = no_op("output_control_node");
VLOG(4) << "Add output control node: " << output_control_node->name();
if (options.output_control_src == OutputControlSrc::kDataOutputs) {
for (Node* n : outputs) {
VLOG(4) << " [data output] add control edge from: " << n->name();
g->AddControlEdge(n, output_control_node, kDoNotCheckDuplicates);
}
} else {
for (Node* fbody_node : fbody->control_ret_nodes) {
Node* n = node_map[fbody_node->id()];
VLOG(4) << " [control output] add control edge from: " << n->name();
g->AddControlEdge(n, output_control_node, kDoNotCheckDuplicates);
}
}
}
if (output_control_node && output_control_node->in_edges().empty()) {
if (input_control_node) {
VLOG(4) << "Add a control edge between input and output control nodes: "
<< input_control_node->name() << " to "
<< output_control_node->name();
g->AddControlEdge(input_control_node, output_control_node,
kDoNotCheckDuplicates);
} else {
VLOG(4) << "Function inlining potentially dropped execution frame "
"information from outgoing control edges.";
}
}
for (const Edge* e : caller->out_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(output_control_node, e->dst(), kDoNotCheckDuplicates);
} else {
g->AddEdge(outputs[e->src_output()], 0, e->dst(), e->dst_input());
}
}
if (keep_caller_node) {
std::vector<NodeBuilder::NodeOut> output_tensors;
absl::c_transform(outputs, std::back_inserter(output_tensors),
[](Node* n) { return NodeBuilder::NodeOut(n, 0); });
Node* caller_substitute_node;
if (options.keep_caller_node == KeepCallerNode::kTargetable ||
output_tensors.empty()) {
TF_CHECK_OK(NodeBuilder(caller->name(), "NoOp")
.Device(caller->requested_device())
.ControlInput(output_control_node)
.Finalize(g, &caller_substitute_node));
} else if (options.keep_caller_node == KeepCallerNode::kFetchable) {
TF_CHECK_OK(NodeBuilder(caller->name(), "IdentityN")
.Device(caller->requested_device())
.Input(output_tensors)
.ControlInput(output_control_node)
.Finalize(g, &caller_substitute_node));
}
}
VLOG(3) << "Successfully inlined function call node: " << caller->name();
g->RemoveNode(caller);
VLOG(4) << "Final graph: " << g->ToGraphDefDebug().DebugString();
return absl::OkStatus();
}
bool ExpandInlineFunctions(FunctionLibraryRuntime* lib, Graph* graph,
const ExpandInlineFunctionsOptions& options) {
std::vector<std::pair<Node*, const FunctionBody*>> candidates;
const FunctionLibraryDefinition* fld = lib->GetFunctionLibraryDefinition();
for (Node* node : graph->nodes()) {
if (!IsFunctionCall(*lib->GetFunctionLibraryDefinition(), *node)) {
continue;
}
bool noinline;
if (fld->GetAttr(*node, kNoInlineAttr, &noinline).ok() && noinline) {
VLOG(3) << "noinline: " << SummarizeNode(*node);
continue;
}
FunctionLibraryRuntime::Handle handle;
Status s = InstantiateFunctionCall(node->def(), lib, &handle);
if (!s.ok()) {
LOG(ERROR) << "Failed to instantiate a function: " << s.message();
continue;
}
const FunctionBody* fbody = lib->GetFunctionBody(handle);
CHECK_NOTNULL(fbody);
candidates.emplace_back(node, fbody);
}
bool inlined_any = false;
for (const auto& p : candidates) {
Status inlined = InlineFunctionBody(*fld, graph, p.first, p.second,
p.first->IsPartitionedCall()
? options.multi_device_options
: options.native_options);
if (inlined.ok()) {
inlined_any = true;
} else {
VLOG(1) << "Failed to inline function call: node=" << p.first->name()
<< " error=" << inlined.message();
}
}
return inlined_any;
}
} | #include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
TEST(InlineFunctionBody, ColocationConstraintPropagation) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
FunctionDef fdef = FunctionDefHelper::Define(
"f",
{"x: float", "y: float"},
{"z: float"},
{},
{
{{"z"},
"AddV2",
{"x", "y"},
{{"T", DT_FLOAT}, {"_class", std::vector<string>({"loc:@x"})}}},
});
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
auto g = std::make_unique<Graph>(OpRegistry::Global());
GraphConstructorOptions opts;
const Tensor kZero = test::AsScalar<int64_t>(0);
const Tensor kOne = test::AsScalar<int64_t>(1);
GraphDef gdef = GDef(
{
NDef("inp0", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kZero}}),
NDef("inp1", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}),
NDef("call", "StatefulPartitionedCall", {"inp0", "inp1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("f", {})}}),
NDef("out0", "_Retval", {"call:0"}, {{"T", DT_FLOAT}}),
},
{});
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, gdef, g.get()));
Node* caller = nullptr;
for (Node* node : g->nodes()) {
if (node->name() == "call") {
caller = node;
}
}
std::unique_ptr<FunctionBody> fbody;
TF_ASSERT_OK(FunctionDefToBodyHelper(fdef, {}, &flib_def, &fbody));
InlineFunctionBodyOptions inline_options;
TF_ASSERT_OK(InlineFunctionBody(flib_def, g.get(), caller, fbody.get(),
inline_options));
GraphDef expected_gdef = GDef(
{
NDef("inp0", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kZero}}),
NDef("inp1", "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}),
NDef("out0", "_Retval", {"Func/call/output/_2"}, {{"T", DT_FLOAT}}),
NDef("Func/call/input/_0", "Identity", {"inp0"}, {{"T", DT_FLOAT}}),
NDef("Func/call/input/_1", "Identity", {"inp1"}, {{"T", DT_FLOAT}}),
NDef("call/z", "AddV2", {"Func/call/input/_0", "Func/call/input/_1"},
{{"T", DT_FLOAT},
{"_class", std::vector<string>({"loc:@Func/call/input/_0"})}}),
NDef("Func/call/output/_2", "Identity", {"call/z"},
{{"T", DT_FLOAT}}),
},
{});
GraphDef output_gdef;
g->ToGraphDef(&output_gdef);
TF_EXPECT_GRAPH_EQ(expected_gdef, output_gdef);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/inline_function_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/inline_function_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits