ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
b0117d33-c5af-4127-8481-159be8a2c317 | cpp | tensorflow/tensorflow | device_mgr | tensorflow/core/common_runtime/device_mgr.cc | tensorflow/core/common_runtime/device_mgr_test.cc | #include "tensorflow/core/common_runtime/device_mgr.h"
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
DeviceMgr::~DeviceMgr() {}
} | #include "tensorflow/core/common_runtime/device_mgr.h"
#include <memory>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static Device* CreateDevice(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
TEST(StaticDeviceMgr, NoCPUDevice) {
std::unique_ptr<Device> d0(CreateDevice("GPU", "/device:GPU:0"));
std::unique_ptr<Device> d1(CreateDevice("GPU", "/device:GPU:1"));
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
devices.emplace_back(std::move(d1));
StaticDeviceMgr lm(std::move(devices));
EXPECT_EQ(lm.HostCPU(), nullptr);
}
TEST(StaticDeviceMgr, SomeCPUDevice) {
std::unique_ptr<Device> d0(CreateDevice("GPU", "/device:GPU:0"));
std::unique_ptr<Device> d1(CreateDevice("GPU", "/device:GPU:1"));
std::unique_ptr<Device> d2(CreateDevice("CPU", "/device:CPU:0"));
Device* d2_ptr = d2.get();
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
devices.emplace_back(std::move(d1));
devices.emplace_back(std::move(d2));
StaticDeviceMgr lm(std::move(devices));
EXPECT_EQ(lm.HostCPU(), d2_ptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb4af92b-029e-4f0c-9777-99652edb4d8c | cpp | tensorflow/tensorflow | collective_rma_local | tensorflow/core/common_runtime/collective_rma_local.cc | tensorflow/core/common_runtime/collective_rma_local_test.cc | #include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
namespace tensorflow {
void CollectiveRemoteAccessLocal::StartAbort(const Status& s) {
buf_rendezvous_.StartAbort(s);
}
void CollectiveRemoteAccessLocal::RecvFromPeer(
const string& peer_device, const string& peer_task, bool peer_is_local,
const string& key, Device* to_device, DeviceContext* to_device_ctx,
const AllocatorAttributes& to_alloc_attr, Tensor* to_tensor,
const DeviceLocality& client_locality, int dev_to_dev_stream_index,
CancellationManager* cancellation_manager, const StatusCallback& done) {
VLOG(1) << "RecvFromPeer " << this << " from " << peer_device << " key "
<< key;
if (!peer_is_local) {
done(
errors::Internal("CollectiveRemoteAccessLocal::RecvFromPeer "
"called with peer_is_local=false"));
return;
}
Device* from_device;
Status status = dev_mgr_->LookupDevice(peer_device, &from_device);
if (!status.ok()) {
done(status);
return;
}
auto consumer_callback = [to_tensor, to_device_ctx, to_device, to_alloc_attr,
dev_to_dev_stream_index,
done](const Status& status,
BufRendezvous::Hook* hook) {
Status s = status;
if (s.ok()) {
if (hook == nullptr) {
s = errors::Internal("Invalid null hook in ConsumeBuf callback");
}
} else {
if (hook != nullptr) {
LOG(ERROR) << "Got hook " << hook << " with status " << s
<< " from ConsumeBuf";
}
}
if (s.ok()) {
int64_t recv_bytes = to_tensor->TotalBytes();
CHECK_EQ(recv_bytes, hook->prod_value->TotalBytes());
MemCpyAsync(hook->prod_ctx,
to_device_ctx,
hook->prod_dev,
to_device,
hook->prod_attr,
to_alloc_attr,
hook->prod_value,
to_tensor,
dev_to_dev_stream_index,
[hook, done](const Status& memcpy_status) {
done(memcpy_status);
BufRendezvous::DoneWithHook(hook);
});
} else {
done(s);
if (hook != nullptr) {
BufRendezvous::DoneWithHook(hook);
}
}
};
buf_rendezvous_.ConsumeBuf(key, from_device->name(),
from_device->attributes().incarnation(),
consumer_callback, cancellation_manager);
}
void CollectiveRemoteAccessLocal::PostToPeer(
const string& peer_device, const string& peer_task, const string& key,
Device* from_device, DeviceContext* from_device_ctx,
const AllocatorAttributes& from_alloc_attr, const Tensor* from_tensor,
const DeviceLocality& client_locality,
CancellationManager* cancellation_manager, const StatusCallback& done) {
VLOG(1) << "PostToPeer " << this << " key " << key
<< " step_id_=" << step_id_;
buf_rendezvous_.ProvideBuf(key, from_device, from_device_ctx, from_tensor,
from_alloc_attr, done, cancellation_manager);
}
void CollectiveRemoteAccessLocal::CheckPeerHealth(const string& peer_task,
int64_t timeout_in_ms,
const StatusCallback& done) {
done(errors::Internal(
"CheckPeerHealth is not supposed to be called for local collectives"));
}
void CollectiveRemoteAccessLocal::MemCpyAsync(
DeviceContext* src_dev_ctx, DeviceContext* dst_dev_ctx, Device* src_dev,
Device* dst_dev, const AllocatorAttributes& src_attr,
const AllocatorAttributes& dst_attr, const Tensor* src, Tensor* dst,
int dev_to_dev_stream_index, const StatusCallback& done) {
const DeviceType src_device_type(
src_attr.on_host() ? DEVICE_CPU : src_dev->attributes().device_type());
const DeviceType dst_device_type(
dst_attr.on_host() ? DEVICE_CPU : dst_dev->attributes().device_type());
const bool non_cpu_src = src_device_type != DeviceType(DEVICE_CPU);
const bool non_cpu_dst = dst_device_type != DeviceType(DEVICE_CPU);
if (src_dev_ctx == nullptr && src_device_type == DEVICE_GPU) {
const DeviceBase::AcceleratorDeviceInfo* dev_info =
src_dev->tensorflow_accelerator_device_info();
CHECK(dev_info);
src_dev_ctx = dev_info->default_context;
}
if (dst_dev_ctx == nullptr && dst_device_type == DEVICE_GPU) {
const DeviceBase::AcceleratorDeviceInfo* dev_info =
src_dev->tensorflow_accelerator_device_info();
CHECK(dev_info);
dst_dev_ctx = dev_info->default_context;
}
if (non_cpu_src) CHECK(src_dev_ctx);
if (non_cpu_dst) CHECK(dst_dev_ctx);
if (non_cpu_src || non_cpu_dst) {
CopyTensor::ViaDMA("",
src_dev_ctx, dst_dev_ctx, src_dev, dst_dev, src_attr,
dst_attr, src, dst, dev_to_dev_stream_index, done);
} else {
int64_t bytes = src->TotalBytes();
DCHECK_EQ(dst->TotalBytes(), bytes);
memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes);
done(absl::OkStatus());
}
}
} | #include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "tensorflow/core/common_runtime/collective_param_resolver_local.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
#define NUM_DEVS 3
static const int kStepId = 123;
class CollectiveRemoteAccessLocalTest : public ::testing::Test {
protected:
const string kTaskName = "/job:localhost/replica:0/task:0";
CollectiveRemoteAccessLocalTest() {
work_queue_ = std::make_shared<UnboundedWorkQueue>(Env::Default(), "test");
ConfigProto cp;
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", NUM_DEVS});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, kTaskName, &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
drl_ = std::make_unique<DeviceResolverLocal>(device_mgr_.get());
prl_ = std::make_unique<CollectiveParamResolverLocal>(
cp, device_mgr_.get(), drl_.get(), nullptr,
kTaskName);
rma_ = std::make_unique<CollectiveRemoteAccessLocal>(device_mgr_.get(),
drl_.get(), kStepId);
cm_ = std::make_unique<CancellationManager>();
}
~CollectiveRemoteAccessLocalTest() override = default;
std::shared_ptr<UnboundedWorkQueue> work_queue_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<DeviceResolverLocal> drl_;
std::unique_ptr<CollectiveParamResolverLocal> prl_;
std::unique_ptr<CollectiveRemoteAccessLocal> rma_;
std::unique_ptr<CancellationManager> cm_;
};
TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU0) {
Device* cpu0 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0));
Tensor sink_tensor(DT_FLOAT, TensorShape({8}));
Notification recv_note;
Status recv_status;
rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true ,
"key_0", cpu0 , nullptr ,
attr , &sink_tensor, dev_locality,
0 , cm_.get(),
[&recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
});
Tensor source_tensor(DT_FLOAT, TensorShape({8}));
for (int i = 0; i < 8; ++i) {
source_tensor.flat<float>()(i) = i / 2;
}
EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor));
Notification send_note;
Status send_status;
rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0",
cpu0 , nullptr ,
attr , &source_tensor, dev_locality,
cm_.get(), [&send_note, &send_status](const Status& s) {
send_status = s;
send_note.Notify();
});
recv_note.WaitForNotification();
send_note.WaitForNotification();
TF_EXPECT_OK(recv_status);
TF_EXPECT_OK(send_status);
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2);
}
EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor));
}
TEST_F(CollectiveRemoteAccessLocalTest, PostRecvCPU1_2) {
Device* cpu2 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:2", &cpu2));
Tensor sink_tensor(DT_FLOAT, TensorShape({8}));
Notification recv_note;
Status recv_status;
rma_->RecvFromPeer(kTaskName + "/device:CPU:1", kTaskName, true ,
"key_0", cpu2 , nullptr ,
attr , &sink_tensor, dev_locality,
0 , cm_.get(),
[&recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
});
Tensor source_tensor(DT_FLOAT, TensorShape({8}));
for (int i = 0; i < 8; ++i) {
source_tensor.flat<float>()(i) = i / 2;
}
EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor));
Device* cpu1 = nullptr;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:1", &cpu1));
Notification send_note;
Status send_status;
rma_->PostToPeer(kTaskName + "/device:CPU:2", kTaskName, "key_0",
cpu1 , nullptr ,
attr , &source_tensor, dev_locality,
cm_.get(), [&send_note, &send_status](const Status& s) {
send_status = s;
send_note.Notify();
});
recv_note.WaitForNotification();
send_note.WaitForNotification();
TF_EXPECT_OK(recv_status);
TF_EXPECT_OK(send_status);
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(sink_tensor.flat<float>()(i), i / 2);
}
EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor));
}
TEST_F(CollectiveRemoteAccessLocalTest, CheckHealth) {
Status status;
Notification done;
rma_->CheckPeerHealth(kTaskName, 0,
[&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
EXPECT_TRUE(errors::IsInternal(status));
}
TEST_F(CollectiveRemoteAccessLocalTest, RecvThenCancel) {
Device* cpu0 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0));
Tensor sink_tensor(DT_FLOAT, TensorShape({8}));
Notification recv_note;
Status recv_status;
rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true ,
"key_0", cpu0 , nullptr ,
attr , &sink_tensor, dev_locality,
0 , cm_.get(),
[&recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
});
cm_->StartCancel();
recv_note.WaitForNotification();
EXPECT_TRUE(cm_->IsCancelled());
EXPECT_TRUE(errors::IsCancelled(recv_status));
}
TEST_F(CollectiveRemoteAccessLocalTest, CancelThenRecv) {
Device* cpu0 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0));
Tensor sink_tensor(DT_FLOAT, TensorShape({8}));
Notification recv_note;
Status recv_status;
cm_->StartCancel();
rma_->RecvFromPeer(kTaskName + "/device:CPU:0", kTaskName, true ,
"key_0", cpu0 , nullptr ,
attr , &sink_tensor, dev_locality,
0 , cm_.get(),
[&recv_note, &recv_status](const Status& s) {
recv_status = s;
recv_note.Notify();
});
recv_note.WaitForNotification();
EXPECT_TRUE(cm_->IsCancelled());
EXPECT_TRUE(errors::IsCancelled(recv_status));
}
TEST_F(CollectiveRemoteAccessLocalTest, PostThenCancel) {
Device* cpu0 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0));
Tensor source_tensor(DT_FLOAT, TensorShape({8}));
Notification send_note;
Status send_status;
rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0",
cpu0 , nullptr ,
attr , &source_tensor, dev_locality,
cm_.get(), [&send_note, &send_status](const Status& s) {
send_status = s;
send_note.Notify();
});
cm_->StartCancel();
send_note.WaitForNotification();
EXPECT_TRUE(cm_->IsCancelled());
EXPECT_TRUE(errors::IsCancelled(send_status));
}
TEST_F(CollectiveRemoteAccessLocalTest, CancelThenPost) {
Device* cpu0 = nullptr;
AllocatorAttributes attr;
DeviceLocality dev_locality;
TF_ASSERT_OK(device_mgr_->LookupDevice(kTaskName + "/device:CPU:0", &cpu0));
Tensor source_tensor(DT_FLOAT, TensorShape({8}));
Notification send_note;
Status send_status;
cm_->StartCancel();
rma_->PostToPeer(kTaskName + "/device:CPU:0", kTaskName, "key_0",
cpu0 , nullptr ,
attr , &source_tensor, dev_locality,
cm_.get(), [&send_note, &send_status](const Status& s) {
send_status = s;
send_note.Notify();
});
send_note.WaitForNotification();
EXPECT_TRUE(cm_->IsCancelled());
EXPECT_TRUE(errors::IsCancelled(send_status));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_rma_local.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/collective_rma_local_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34e66ece-9ff8-4b9c-9545-dcb0094f4b53 | cpp | tensorflow/tensorflow | partitioning_utils | tensorflow/core/common_runtime/partitioning_utils.cc | tensorflow/core/common_runtime/partitioning_utils_test.cc | #include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
namespace tensorflow {
namespace {
Status PartitionFunctionGraph(
const DeviceSet& device_set, Graph* graph,
std::unordered_map<string, GraphDef>* partitions,
std::function<string(const Node*)> node_to_loc,
std::function<string(const Edge*)> get_tensor_name_attr) {
PartitionOptions partition_options;
if (node_to_loc != nullptr) {
partition_options.node_to_loc = node_to_loc;
} else {
partition_options.node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
}
int64_t edge_name_counter = 0;
partition_options.new_name = [&edge_name_counter](const string& prefix) {
return strings::StrCat(prefix, "/_", ++edge_name_counter);
};
partition_options.get_incarnation =
[&device_set](const string& name) -> int64 {
const Device* d = device_set.FindDeviceByName(name);
if (d == nullptr) {
return PartitionOptions::kIllegalIncarnation;
} else {
return d->attributes().incarnation();
}
};
partition_options.control_flow_added = false;
partition_options.get_tensor_name_attr = get_tensor_name_attr;
partition_options.can_make_destructive_changes = true;
return Partition(partition_options, graph, partitions);
}
struct SendRecvPair {
Node* send_node = nullptr;
Node* recv_node = nullptr;
};
constexpr char kTensorNameAttr[] = "tensor_name";
Status MakeSendRecvDependencyExplicit(Graph* graph) {
absl::flat_hash_map<std::string, SendRecvPair> send_recv_pairs;
for (Node* node : graph->op_nodes()) {
if (node->IsSend() || node->IsRecv()) {
auto tensor_name_it = node->def().attr().find(kTensorNameAttr);
if (tensor_name_it == node->def().attr().end()) {
return errors::Internal(
"'", kTensorNameAttr,
"' attribute is not found from node: ", node->DebugString());
}
if (node->IsSend()) {
send_recv_pairs[tensor_name_it->second.s()].send_node = node;
} else {
send_recv_pairs[tensor_name_it->second.s()].recv_node = node;
}
}
}
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
if (send_recv_pair.send_node == nullptr ||
send_recv_pair.recv_node == nullptr) {
return errors::Internal(
"No matching Send/Recv nodes found for tensor_name = ", tensor_name);
}
graph->AddControlEdge(send_recv_pair.send_node, send_recv_pair.recv_node);
}
return absl::OkStatus();
}
}
Status PartitionFunctionGraph(
const DeviceSet& device_set, std::unique_ptr<Graph> graph,
std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs,
std::function<string(const Edge*)> get_tensor_name_attr) {
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(
PartitionFunctionGraph(device_set, graph.get(), &partitions,
nullptr, get_tensor_name_attr));
const OpRegistryInterface* default_registry =
graph->flib_def().default_registry();
graph.reset();
for (auto& partition : partitions) {
const string& device = partition.first;
GraphDef& graph_def = partition.second;
auto subgraph = std::make_unique<Graph>(default_registry);
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(opts, std::move(graph_def), subgraph.get()));
subgraphs->emplace(device, std::move(subgraph));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const DeviceSet& device_set, std::unique_ptr<Graph> graph) {
auto node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
bool has_multiple_devices = false;
absl::optional<std::string> location;
for (const Node* node : graph->op_nodes()) {
if (location) {
if (*location != node_to_loc(node)) {
has_multiple_devices = true;
break;
}
} else {
location = node_to_loc(node);
}
}
if (!has_multiple_devices) {
return graph;
}
auto new_graph = std::make_unique<Graph>(graph->flib_def());
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(PartitionFunctionGraph(device_set, graph.get(),
&partitions, node_to_loc,
nullptr));
GraphDef merged_graph_def;
if (!partitions.empty()) {
auto iter = partitions.begin();
merged_graph_def = std::move(iter->second);
while (++iter != partitions.end()) {
merged_graph_def.MergeFrom(iter->second);
}
}
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(merged_graph_def),
new_graph.get()));
TF_RETURN_IF_ERROR(MakeSendRecvDependencyExplicit(new_graph.get()));
return std::move(new_graph);
}
Status UpdateArgAndRetvalMetadata(
Graph* graph, std::vector<FunctionArgIndex>* arg_indices,
std::vector<int>* ret_indices,
std::vector<AllocatorAttributes>* arg_alloc_attrs,
std::vector<AllocatorAttributes>* ret_alloc_attrs, bool ints_on_device) {
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes;
std::vector<std::pair<Node*, int>> ret_nodes;
const AttrValue* attr_value;
for (Node* node : graph->op_nodes()) {
if (node->IsArg()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
int sub_index = -1;
if (node->attrs().Find("sub_index", &attr_value).ok()) {
sub_index = static_cast<int>(attr_value->i());
}
arg_nodes.emplace_back(node, FunctionArgIndex(index, sub_index));
} else if (node->IsRetval()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ret_nodes.emplace_back(node, index);
}
}
auto arg_comparator = [](std::pair<Node*, FunctionArgIndex> a,
std::pair<Node*, FunctionArgIndex> b) {
return std::tie(a.second.index, a.second.sub_index) <
std::tie(b.second.index, b.second.sub_index);
};
std::sort(arg_nodes.begin(), arg_nodes.end(), arg_comparator);
auto ret_comparator = [](std::pair<Node*, int> a, std::pair<Node*, int> b) {
return a.second < b.second;
};
std::sort(ret_nodes.begin(), ret_nodes.end(), ret_comparator);
arg_indices->reserve(arg_nodes.size());
for (const auto& pair : arg_nodes) arg_indices->push_back(pair.second);
ret_indices->reserve(ret_nodes.size());
for (const auto& pair : ret_nodes) ret_indices->push_back(pair.second);
for (int i = 0; i < arg_nodes.size(); ++i) {
Node* arg = arg_nodes[i].first;
arg->AddAttr("index", i);
}
if (arg_alloc_attrs != nullptr) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, ints_on_device, *arg_alloc_attrs));
}
for (int i = 0; i < ret_nodes.size(); ++i) {
Node* ret = ret_nodes[i].first;
ret->AddAttr("index", i);
}
if (ret_alloc_attrs) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, ints_on_device, *ret_alloc_attrs));
}
return absl::OkStatus();
}
string FunctionNameGenerator::GetName() {
while (true) {
const string candidate = strings::StrCat(name_, "_", counter_++);
if (flib_def_->Find(candidate) == nullptr) {
return candidate;
}
}
}
} | #include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
using ::testing::SizeIs;
class PartitioningUtilsTest : public ::testing::Test {
public:
void SetUp() override {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 2});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&devices));
device0_ = devices[0].get();
device1_ = devices[1].get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
for (auto d : device_mgr_->ListDevices()) {
device_set_.AddDevice(d);
}
}
void SwapGraph(Graph* graph, bool assign_device = false) {
Scope s = Scope::NewRootScope();
if (assign_device) {
s = s.WithDevice(device0_->name());
}
auto x = ops::_Arg(s.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s.WithOpName("id_x"), x);
auto id_y = ops::Identity(s.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
if (assign_device) {
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
}
void TwoDeviceSwapGraph(Graph* graph) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
Scope s2 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:1");
auto x = ops::_Arg(s1.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s2.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto id_y = ops::Identity(s2.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s2.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s1.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
void SubGraph(Graph* subgraph, DataType dtype,
absl::Span<const int> arg_indices,
absl::Span<const int> ret_indices) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
CHECK_EQ(arg_indices.size(), ret_indices.size());
for (size_t i = 0; i < arg_indices.size(); ++i) {
auto x = ops::_Arg(s1.WithOpName("x"), dtype, arg_indices[i]);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto dx_retval =
ops::_Retval(s1.WithOpName("retval1"), id_x, ret_indices[i]);
}
TF_ASSERT_OK(s.ToGraph(subgraph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(subgraph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
DeviceSet device_set_;
};
TEST_F(PartitioningUtilsTest, GraphWithoutAssignedDevicesFails) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << status.ToString();
}
TEST_F(PartitioningUtilsTest, OneDevice) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get(), true);
int num_nodes = graph->num_op_nodes();
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(1, subgraphs.size());
const auto& pair = *subgraphs.begin();
ASSERT_EQ("/job:a/replica:0/task:0/device:CPU:0", pair.first);
ASSERT_EQ(num_nodes, pair.second->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, TwoDevices) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
TwoDeviceSwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(2, subgraphs.size());
const auto& part1 = subgraphs["/job:a/replica:0/task:0/device:CPU:0"];
ASSERT_EQ(3, part1->num_op_nodes());
const auto& part2 = subgraphs["/job:a/replica:0/task:0/device:CPU:1"];
ASSERT_EQ(3, part2->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithOneDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice(device0_->name());
auto x = ops::_Arg(scope.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 3);
send_count = recv_count = 0;
for (const auto* op : new_graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
EXPECT_EQ(send_count, 0);
EXPECT_EQ(recv_count, 0);
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithTwoDevices) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope();
Scope scope1 = scope.WithDevice(device0_->name());
Scope scope2 = scope.WithDevice(device1_->name());
auto x = ops::_Arg(scope1.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope2.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope1.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 7);
send_count = recv_count = 0;
auto get_tensor_name_attr = [](const Node* node) -> std::string {
auto tensor_name_it = node->def().attr().find("tensor_name");
return tensor_name_it->second.s();
};
absl::flat_hash_map<std::string, std::pair<Node*, Node*>> send_recv_pairs;
for (auto* op : new_graph->op_nodes()) {
if (op->IsSend()) {
++send_count;
send_recv_pairs[get_tensor_name_attr(op)].first = op;
} else if (op->IsRecv()) {
++recv_count;
send_recv_pairs[get_tensor_name_attr(op)].second = op;
}
}
EXPECT_EQ(send_count, 2);
EXPECT_EQ(recv_count, 2);
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
ASSERT_TRUE(send_recv_pair.first != nullptr &&
send_recv_pair.second != nullptr);
std::vector<const Edge*> out_edges(
send_recv_pair.first->out_edges().begin(),
send_recv_pair.first->out_edges().end());
ASSERT_THAT(out_edges, SizeIs(2));
for (const Edge* out_edge : out_edges) {
if (out_edge->dst() != new_graph->sink_node()) {
EXPECT_TRUE(out_edge->IsControlEdge());
EXPECT_EQ(out_edge->dst(), send_recv_pair.second);
}
}
}
}
void CheckRetIndices(const std::vector<int>& expected,
const std::vector<int>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i]) << " at index " << i;
}
}
void CheckArgIndices(const std::vector<FunctionArgIndex>& expected,
const std::vector<FunctionArgIndex>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i].index, actual[i].index) << " at index " << i;
ASSERT_EQ(expected[i].sub_index, actual[i].sub_index) << " at index " << i;
}
}
void CheckAlloc(const std::vector<bool>& expected,
const std::vector<AllocatorAttributes>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i].on_host()) << " at index " << i;
}
}
void CheckIndex(const Node& node, int expected_index) {
const AttrValue* attr_value;
TF_ASSERT_OK(node.attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ASSERT_EQ(expected_index, index);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{3, -1}}, arg_indices);
CheckRetIndices({5}, ret_indices);
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
std::unordered_map<string, Node*> nodes = graph->BuildNodeNameIndex();
ASSERT_EQ(1, nodes.count("x"));
CheckIndex(*nodes["x"], 0);
ASSERT_EQ(1, nodes.count("retval1"));
CheckIndex(*nodes["retval1"], 0);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsNotOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Int32FulltypePass int32_fulltype;
TF_ASSERT_OK(
int32_fulltype.ProcessGraph(graph.get(), false));
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({true}, arg_alloc_attrs);
CheckAlloc({true}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, true);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets_Order) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {9, 7, 5, 3, 1}, {2, 4, 6, 8, 10});
const std::map<int, int> sub_indices = {
{7, 2}, {3, 1}, {1, 0}, {5, 2}, {9, 0}};
const AttrValue* attr_value;
for (Node* n : graph->op_nodes()) {
if (n->IsArg()) {
TF_ASSERT_OK(n->attrs().Find("index", &attr_value));
n->AddAttr("sub_index",
sub_indices.at(static_cast<int>(attr_value->i())));
}
}
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{1, 0}, {3, 1}, {5, 2}, {7, 2}, {9, 0}}, arg_indices);
CheckRetIndices({2, 4, 6, 8, 10}, ret_indices);
CheckAlloc({false, false, false, false, false}, arg_alloc_attrs);
CheckAlloc({false, false, false, false, false}, ret_alloc_attrs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa658703-36ea-4391-9a72-299b017e9eb8 | cpp | tensorflow/tensorflow | placer | tensorflow/core/common_runtime/placer.cc | tensorflow/core/common_runtime/placer_test.cc | #include "tensorflow/core/common_runtime/placer.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/colocation_graph.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/util/port.h"
namespace tensorflow {
namespace {
struct NameCounts {
mutex counts_mutex;
std::unordered_map<string, int> counts;
};
string MakeUniqueFilename(string name) {
static NameCounts& instance = *new NameCounts;
for (int i = 0; i < name.size(); ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?') {
name[i] = '_';
}
}
int count;
{
mutex_lock lock(instance.counts_mutex);
count = instance.counts[name]++;
}
string filename = name;
if (count > 0) {
absl::StrAppend(&filename, "_", count);
}
absl::StrAppend(&filename, ".txt");
return filename;
}
Status GetFileName(string base_name, string* fname) {
const char* dir = nullptr;
dir = getenv("TF_DUMP_GRAPH_PREFIX");
if (!dir) {
return absl::InternalError(
absl::StrCat("Failed to get the directory for ", base_name,
" because dump location is not specified through "
"TF_DUMP_GRAPH_PREFIX environment variable"));
}
std::string result = dir;
if (absl::EqualsIgnoreCase(result, "sponge") &&
!io::GetTestUndeclaredOutputsDir(&result)) {
return absl::InternalError(
"TF_DUMP_GRAPH_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS is not set");
}
base_name = MakeUniqueFilename(base_name);
*fname = absl::StrCat(result, "/", base_name);
return absl::OkStatus();
}
void DumpColocationGraph(const string& base_name,
const ColocationGraph& colocation_graph) {
string fname;
Status status = GetFileName(base_name, &fname);
if (status.ok()) {
status = WriteStringToFile(Env::Default(), fname,
colocation_graph.DebugString());
if (status.ok()) {
LOG(INFO) << "Wrote ColocationGraph to " << fname;
}
}
if (!status.ok()) {
LOG(ERROR) << "Failed to write final colocation graph to file " << fname
<< " with " << status.ToString();
}
}
bool IsGeneratorNode(const Node* node) {
return node->num_inputs() == 0 && node->num_outputs() == 1 &&
!IsRefType(node->output_type(0));
}
bool MatchIdentityOperation(const Node* node) {
if (!node) {
return false;
}
if (!node->IsIdentity()) {
return false;
}
if (node->has_assigned_device_name()) {
return false;
}
if (!node->requested_device().empty()) {
return false;
}
if (node->in_edges().size() != 1) {
return false;
}
if (node->out_edges().size() != 1) {
return false;
}
const Node* input = *node->in_nodes().begin();
const Node* output = *node->out_nodes().begin();
return input->requested_device() == output->requested_device();
}
void LogDeviceAssignment(const Node* node, bool log_device_placement) {
if (log_device_placement) {
printf("%s: (%s): %s\n", node->name().c_str(), node->type_string().c_str(),
node->assigned_device_name().c_str());
LOG(INFO) << node->name() << ": "
<< "(" << node->type_string()
<< "): " << node->assigned_device_name();
}
if (VLOG_IS_ON(1)) {
if (VLOG_IS_ON(4)) {
VLOG(4) << "\nNode:\n"
<< node->def().DebugString()
<< "placed on: " << node->assigned_device_name();
} else {
VLOG(1) << node->name() << "(" << node->type_string()
<< ") placed on: " << node->assigned_device_name();
}
}
}
Status AssignAndLog(int assigned_device, Node* node,
ColocationGraph* colocation_graph,
bool log_device_placement) {
node->set_assigned_device_name_index(assigned_device);
TF_RETURN_IF_ERROR(colocation_graph->LimitToAssignedDevice(*node));
LogDeviceAssignment(node, log_device_placement);
return absl::OkStatus();
}
}
Placer::Placer(Graph* graph, const string& function_name,
const FunctionLibraryDefinition* flib_def,
const DeviceSet* devices, const Device* default_local_device,
bool allow_soft_placement, bool log_device_placement)
: graph_(graph),
function_name_(function_name),
flib_def_(flib_def),
devices_(devices),
default_local_device_(default_local_device),
allow_soft_placement_(allow_soft_placement),
log_device_placement_(log_device_placement) {}
Placer::Placer(Graph* graph, const string& function_name,
const FunctionLibraryDefinition* flib_def,
const DeviceSet* devices, const Device* default_local_device)
: Placer(graph, function_name, flib_def, devices, default_local_device,
true, false) {}
Placer::Placer(Graph* graph, const string& function_name,
const FunctionLibraryDefinition* flib_def,
const DeviceSet* devices)
: Placer(graph, function_name, flib_def, devices, nullptr, true, false) {}
Placer::~Placer() {}
Status Placer::Run() {
GraphOptimizationPassOptions options;
return Run(options);
}
Status Placer::Run(const GraphOptimizationPassOptions& options) {
if (devices_->devices().empty()) {
return errors::FailedPrecondition("No devices are registered");
}
if (VLOG_IS_ON(3)) {
DumpGraphToFile(
strings::StrCat(options.debug_filename_prefix, "placer_input"), *graph_,
nullptr);
}
if (VLOG_IS_ON(5)) {
for (const Node* node : graph_->op_nodes()) {
VLOG(5) << " " << node->name() << ": requested: '"
<< node->requested_device() << "' assigned: '"
<< node->assigned_device_name() << "'";
}
}
FunctionStack stack(function_name_);
ColocationGraph colocation_graph(graph_, stack, flib_def_, devices_,
default_local_device_, allow_soft_placement_,
log_device_placement_);
TF_RETURN_IF_ERROR(colocation_graph.Initialize());
std::vector<Node*> second_pass;
for (Node* node : graph_->op_nodes()) {
if (node->has_assigned_device_name()) {
TF_RETURN_IF_ERROR(colocation_graph.LimitToAssignedDevice(*node));
LogDeviceAssignment(node, log_device_placement_);
continue;
}
if (IsGeneratorNode(node)) {
second_pass.push_back(node);
continue;
}
const std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
return AttachDef(
errors::InvalidArgument("Cannot assign a device for operation ",
node->name(), ": ", status.message()),
*node);
}
int assigned_device = -1;
if (IsMetadata(node) || MatchIdentityOperation(node)) {
const Node* input = (*node->in_edges().begin())->src();
if (CanAssignToDevice(input->assigned_device_name(), *devices)) {
assigned_device = input->assigned_device_name_index();
}
}
if (assigned_device == -1) {
assigned_device = graph_->InternDeviceName((*devices)[0]->name());
}
TF_RETURN_IF_ERROR(AssignAndLog(assigned_device, node, &colocation_graph,
log_device_placement_));
}
for (Node* node : second_pass) {
const std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
return AttachDef(
errors::InvalidArgument("Cannot assign a device for operation ",
node->name(), ": ", status.message()),
*node);
}
int assigned_device = -1;
if (IsGeneratorNode(node) && !node->out_edges().empty()) {
const Node* output = (*node->out_edges().begin())->dst();
int output_device_name = output->assigned_device_name_index();
const bool consumers_on_same_device = std::all_of(
node->out_edges().begin(), node->out_edges().end(),
[output_device_name](const Edge* e) {
return e->dst()->assigned_device_name_index() == output_device_name;
});
if (consumers_on_same_device &&
CanAssignToDevice(output->assigned_device_name(), *devices)) {
assigned_device = output_device_name;
}
}
if (assigned_device == -1) {
assigned_device = graph_->InternDeviceName((*devices)[0]->name());
}
TF_RETURN_IF_ERROR(AssignAndLog(assigned_device, node, &colocation_graph,
log_device_placement_));
}
if (VLOG_IS_ON(3)) {
DumpGraphToFile(
strings::StrCat(options.debug_filename_prefix, "placer_output"),
*graph_, nullptr);
DumpColocationGraph(
strings::StrCat(options.debug_filename_prefix, "colocation_graph"),
colocation_graph);
}
return absl::OkStatus();
}
bool Placer::CanAssignToDevice(const string& candidate_device_name,
const std::vector<Device*>& devices) const {
if (!candidate_device_name.empty()) {
const Device* other_device =
devices_->FindDeviceByName(candidate_device_name);
if (std::find(devices.begin(), devices.end(), other_device) !=
devices.end()) {
return true;
}
}
return false;
}
} | #include "tensorflow/core/common_runtime/placer.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
constexpr char kCPU[] = "/device:FakeCPU:0";
constexpr char kGPU[] = "/device:FakeGPU:0";
constexpr char kFullCPU[] = "/job:a/replica:0/task:0/device:FakeCPU:0";
constexpr char kFullGPU[] = "/job:a/replica:0/task:0/device:FakeGPU:0";
namespace {
class DummyOp : public OpKernel {
public:
explicit DummyOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {}
};
class FakeDevice : public Device {
private:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
public:
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> MakeDevice(const string& name,
const string& device_type) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(device_type);
return std::unique_ptr<Device>(new FakeDevice(device_attributes));
}
static std::unique_ptr<Device> MakeCPU(const string& name) {
return MakeDevice(name, "FakeCPU");
}
static std::unique_ptr<Device> MakeGPU(const string& name) {
return MakeDevice(name, "FakeGPU");
}
};
class DummyFactory : public DeviceFactory {
public:
Status ListPhysicalDevices(std::vector<string>* devices) override {
return absl::OkStatus();
}
Status CreateDevices(const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) override {
return absl::OkStatus();
}
};
REGISTER_LOCAL_DEVICE_FACTORY("FakeCPU", DummyFactory);
REGISTER_LOCAL_DEVICE_FACTORY("FakeGPU", DummyFactory, 51);
REGISTER_OP("TestVariable").Output("o: Ref(float)");
REGISTER_KERNEL_BUILDER(Name("TestVariable").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestVariable").Device("FakeGPU"), DummyOp);
REGISTER_OP("VariableCPU").Output("o: Ref(float)");
REGISTER_KERNEL_BUILDER(Name("VariableCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("VariableGPU").Output("o: Ref(float)");
REGISTER_KERNEL_BUILDER(Name("VariableGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("VariableNoKernels").Output("o: Ref(float)");
REGISTER_OP("TestAdd").Input("a: float").Input("b: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("TestAdd").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestAdd").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestRelu").Input("i: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("TestRelu").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestRelu").Device("FakeGPU"), DummyOp);
REGISTER_OP("ReluCPU").Input("i: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("ReluCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("ReluGPU").Input("i: float").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("ReluGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestAssign").Input("i: Ref(float)").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("TestAssign").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestAssign").Device("FakeGPU"), DummyOp);
REGISTER_OP("AssignCPU").Input("i: Ref(float)").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("AssignCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("AssignGPU").Input("i: Ref(float)").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("AssignGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_KERNEL_BUILDER(Name("TestInput").Device("FakeCPU"), DummyOp);
REGISTER_OP("TestCPUGPUOutput").Output("a: float");
REGISTER_KERNEL_BUILDER(Name("TestCPUGPUOutput").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestCPUGPUOutput").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestGPUOutput").Output("a: float");
REGISTER_KERNEL_BUILDER(Name("TestGPUOutput").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestDevice").Output("a: float").Output("b: float");
REGISTER_KERNEL_BUILDER(Name("TestDevice").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestDeviceEnforce").Input("a: Ref(float)").Output("b: float");
REGISTER_KERNEL_BUILDER(Name("TestDeviceEnforce").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestDeviceEnforce").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Shape").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Shape").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestDatasetOp").Input("a: float").Output("b: float");
REGISTER_KERNEL_BUILDER(Name("TestDatasetOp").Device("FakeCPU").Priority(2),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestDatasetOp").Device("FakeGPU").Priority(1),
DummyOp);
REGISTER_OP("TestXlaOp").Input("a: float").Output("b: float");
REGISTER_KERNEL_BUILDER(Name("TestXlaOp").Device("XLA_CPU").Priority(2),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestXlaOp").Device("FakeCPU").Priority(1),
DummyOp);
REGISTER_OP("TestUncopiableTypeGeneratorCPU")
.Output("d: variant")
.SetTypeConstructor(full_type::UnaryGeneric(TFT_DATASET));
REGISTER_KERNEL_BUILDER(
Name("TestUncopiableTypeGeneratorCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("TestTypedConsumer").Input("i: variant");
REGISTER_KERNEL_BUILDER(Name("TestTypedConsumer").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestTypedConsumer").Device("FakeGPU"), DummyOp);
REGISTER_OP("ConvertToListOfCooTensorsV2").Input("i: int32");
class PlacerTest : public ::testing::Test {
protected:
PlacerTest() : PlacerTest(10) {}
explicit PlacerTest(int num_devices) {
for (int i = 0; i < num_devices; ++i) {
local_devices_.emplace_back(FakeDevice::MakeCPU(
strings::StrCat("/job:a/replica:0/task:0/device:FakeCPU:", i)));
devices_.AddDevice(local_devices_.back().get());
local_devices_.emplace_back(FakeDevice::MakeGPU(strings::StrCat(
"/job:a/replica:0/task:0/device:FakeGPU:", num_devices - 1 - i)));
devices_.AddDevice(local_devices_.back().get());
}
local_devices_.emplace_back(FakeDevice::MakeDevice(
"/job:a/replica:0/task:0/device:XLA_CPU:0", "XLA_CPU"));
devices_.AddDevice(local_devices_.back().get());
local_devices_.emplace_back(FakeDevice::MakeDevice(
"/job:a/replica:0/task:0/device:COMPOSITE:0", "COMPOSITE"));
devices_.AddDevice(local_devices_.back().get());
}
Status BuildGraph(const GraphDefBuilder& builder, Graph* out_graph) {
TF_RETURN_IF_ERROR(GraphDefBuilderToGraph(builder, out_graph));
RebuildNodeNameMap(*out_graph);
return absl::OkStatus();
}
Status BuildGraph(const GraphDef& graph_def, Graph* out_graph) {
GraphConstructorOptions opts;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, graph_def, out_graph));
RebuildNodeNameMap(*out_graph);
return absl::OkStatus();
}
Status Place(Graph* graph, DeviceSet* devices, Device* default_local_device,
bool allow_soft_placement, bool log_device_placement) {
Placer placer(graph, "", &graph->flib_def(), devices, default_local_device,
allow_soft_placement, log_device_placement);
return placer.Run();
}
Status CallOptPassesAndPlace(Graph* graph, DeviceSet* devices,
bool allow_soft_placement,
bool log_device_placement) {
SessionOptions session_options;
GraphOptions* graph_opts = session_options.config.mutable_graph_options();
OptimizerOptions* optimizer_opts = graph_opts->mutable_optimizer_options();
optimizer_opts->set_opt_level(OptimizerOptions::L0);
optimizer_opts->set_global_jit_level(OptimizerOptions::OFF);
RewriterConfig* rewriter_config = graph_opts->mutable_rewrite_options();
rewriter_config->set_disable_meta_optimizer(true);
GraphOptimizationPassOptions optimization_options;
std::unique_ptr<Graph> graph_ptr(graph);
optimization_options.graph = &graph_ptr;
FunctionLibraryDefinition flib_def(graph->flib_def());
optimization_options.flib_def = &flib_def;
optimization_options.device_set = &devices_;
optimization_options.session_options = &session_options;
optimization_options.debug_filename_prefix = "placer_test_";
Status s = OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, optimization_options);
if (!s.ok()) {
graph_ptr.release();
return s;
}
graph = graph_ptr.release();
RebuildNodeNameMap(*graph);
Placer placer(graph, "", &graph->flib_def(), devices, nullptr,
allow_soft_placement, log_device_placement);
return placer.Run(optimization_options);
}
Status Place(Graph* graph, DeviceSet* devices) {
return Place(graph, devices, nullptr, true, false);
}
Status Place(Graph* graph, bool allow_soft_placement,
bool log_device_placement) {
return Place(graph, &devices_, nullptr, allow_soft_placement,
log_device_placement);
}
Status Place(Graph* graph) {
return Place(graph, &devices_, nullptr, true, false);
}
Status CallOptPassesAndPlace(Graph* graph, bool allow_soft_placement,
bool log_device_placement) {
return CallOptPassesAndPlace(graph, &devices_, allow_soft_placement,
log_device_placement);
}
Status CallOptPassesAndPlace(Graph* graph) {
return CallOptPassesAndPlace(graph, &devices_, true, false);
}
Node* GetNodeByName(const Graph& graph, const string& name) {
const auto search = nodes_by_name_.find(name);
CHECK(search != nodes_by_name_.end()) << "Unknown node name: " << name;
return graph.FindNodeId(search->second);
}
protected:
std::vector<std::unique_ptr<Device>> local_devices_;
DeviceSet devices_;
std::unordered_map<string, int> nodes_by_name_;
Status ReferenceTestHelper(const string& variable_op_type,
const string& assign_op_type,
const DeviceType& expected_device_type);
private:
void RebuildNodeNameMap(const Graph& graph) {
nodes_by_name_.clear();
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node->id();
}
}
};
class SoftPlacementPlacerTest : public PlacerTest,
public ::testing::WithParamInterface<bool> {};
INSTANTIATE_TEST_SUITE_P(All, SoftPlacementPlacerTest,
::testing::Values(false, true),
::testing::PrintToStringParamName());
#define EXPECT_COLOCATED(g, name_a, name_b) \
do { \
Graph& g_ = (g); \
EXPECT_EQ(GetNodeByName(g_, (name_a))->assigned_device_name(), \
GetNodeByName(g_, (name_b))->assigned_device_name()); \
} while (0)
#define EXPECT_NOT_COLOCATED(g, name_a, name_b) \
do { \
Graph& g_ = (g); \
EXPECT_NE(GetNodeByName(g_, (name_a))->assigned_device_name(), \
GetNodeByName(g_, (name_b))->assigned_device_name()); \
} while (0)
#define EXPECT_DEVICE_TYPE(g, name, expected_device_type) \
EXPECT_EQ(DeviceType(expected_device_type).type(), \
devices_ \
.FindDeviceByName( \
GetNodeByName((g), (name))->assigned_device_name()) \
->attributes() \
.device_type())
#define EXPECT_SAME_TYPE(g, node1, node2) \
EXPECT_EQ(devices_ \
.FindDeviceByName( \
GetNodeByName((g), (node1))->assigned_device_name()) \
->attributes() \
.device_type(), \
devices_ \
.FindDeviceByName( \
GetNodeByName((g), (node2))->assigned_device_name()) \
->attributes() \
.device_type())
#define EXPECT_DEVICE_CONTAINS(g, name, device_substr) \
EXPECT_TRUE(absl::StrContains( \
GetNodeByName((g), (name))->assigned_device_name(), device_substr))
TEST_F(PlacerTest, TestNoConstraints) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::UnaryOp("TestRelu", ops::NodeOut(input, 0), b.opts().WithName("n1"));
ops::UnaryOp("TestRelu", ops::NodeOut(input, 1), b.opts().WithName("n2"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "n1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "n2", "FakeGPU");
}
TEST_F(PlacerTest, TestNoConstraintsWithPrioritizedKernels) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::UnaryOp("TestDatasetOp", ops::NodeOut(input, 0),
b.opts().WithName("n1"));
ops::UnaryOp("TestDatasetOp", ops::NodeOut(input, 1),
b.opts().WithName("n2"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "n1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "n2", "FakeCPU");
}
TEST_F(PlacerTest, TestXlaOpPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::UnaryOp("TestXlaOp", ops::NodeOut(input, 0), b.opts().WithName("n1"));
ops::UnaryOp("TestXlaOp", ops::NodeOut(input, 1), b.opts().WithName("n2"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "n2")->set_assigned_device_name(
"/job:a/replica:0/task:0/device:XLA_CPU:0");
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "n1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "n2", "XLA_CPU");
}
TEST_F(PlacerTest, TestGPUInputIntoPrioritizedKernel) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestGPUOutput", b.opts().WithName("in"));
ops::UnaryOp("TestDatasetOp", ops::NodeOut(input, 0),
b.opts().WithName("n1"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "n1", "FakeCPU");
}
TEST_F(PlacerTest, TestGPUInputColocatedWithPrioritizedKernel) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestGPUOutput", b.opts().WithName("in"));
ops::UnaryOp("TestDatasetOp", ops::NodeOut(input, 0),
b.opts().WithName("n1").WithAttr("_class", {"loc:@in"}));
ops::UnaryOp("TestDatasetOp", ops::NodeOut(input, 0),
b.opts().WithName("n2"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "n1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "n2", "FakeCPU");
}
REGISTER_OP("CreateDatasetCPU").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("CreateDatasetCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("CreateDatasetGPU").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("CreateDatasetGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("CreateDatasetSP").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("CreateDatasetSP").Device("FakeCPU").Priority(2),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("CreateDatasetSP").Device("FakeGPU").Priority(1),
DummyOp);
REGISTER_OP("CreateDatasetRP").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("CreateDatasetRP").Device("FakeCPU").Priority(1),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("CreateDatasetRP").Device("FakeGPU").Priority(2),
DummyOp);
REGISTER_OP("CreateDatasetNP").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("CreateDatasetNP").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("CreateDatasetNP").Device("FakeGPU"), DummyOp);
REGISTER_OP("IteratorNP").Input("i: resource").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("IteratorNP").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("IteratorNP").Device("FakeGPU"), DummyOp);
REGISTER_OP("IteratorSP").Input("i: resource").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("IteratorSP").Device("FakeCPU").Priority(2),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("IteratorSP").Device("FakeGPU").Priority(1),
DummyOp);
REGISTER_OP("IteratorRP").Input("i: resource").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("IteratorRP").Device("FakeCPU").Priority(1),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("IteratorRP").Device("FakeGPU").Priority(2),
DummyOp);
REGISTER_OP("IteratorGPU").Input("i: resource").Output("o: float");
REGISTER_KERNEL_BUILDER(Name("IteratorGPU").Device("FakeGPU"), DummyOp);
TEST_F(PlacerTest, TestDSWithPriority) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetSP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorNP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeCPU");
}
TEST_F(PlacerTest, TestDSWithGPUPriority) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetRP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorNP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestITWithPriority) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetNP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorSP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeCPU");
}
TEST_F(PlacerTest, TestITWithGPUPriority) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetNP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorRP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestITGPU) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetSP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorGPU", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestSimpleIteratorOnlyGPU) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetCPU", b.opts().WithName("ds"));
ops::UnaryOp("IteratorRP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeCPU");
}
TEST_F(PlacerTest, TestAgreeingPriorities) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetSP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorSP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeCPU");
}
TEST_F(PlacerTest, TestAgreeingRegularPriorities) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetRP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorRP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestConflictingPriorities) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetSP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorRP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestConflictingPrioritiesReversed) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds = ops::SourceOp("CreateDatasetRP", b.opts().WithName("ds"));
ops::UnaryOp("IteratorSP", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestDeviceTypeConstraints) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
ops::BinaryOp("AssignCPU", var_cpu, input, b.opts().WithName("assign_cpu"));
Node* var_gpu = ops::SourceOp("VariableGPU", b.opts().WithName("var_gpu"));
ops::BinaryOp("AssignGPU", var_gpu, input, b.opts().WithName("assign_gpu"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "var_cpu", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "assign_cpu", "FakeCPU");
EXPECT_COLOCATED(g, "var_cpu", "assign_cpu");
EXPECT_DEVICE_TYPE(g, "var_gpu", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "assign_gpu", "FakeGPU");
EXPECT_COLOCATED(g, "var_gpu", "assign_gpu");
}
TEST_F(PlacerTest, TestMetadataColocatedWithInput) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
ops::UnaryOp("Shape", var_cpu, b.opts().WithName("shape_op"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "var_cpu", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "shape_op", "FakeCPU");
EXPECT_COLOCATED(g, "var_cpu", "shape_op");
}
TEST_F(PlacerTest, TestHeuristicGeneratorFollowsSingleConsumer) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
Node* input = ops::SourceOp("TestCPUGPUOutput", b.opts().WithName("in"));
ops::BinaryOp("TestAssign", var_cpu, input, b.opts().WithName("assign"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "var_cpu", "in");
EXPECT_COLOCATED(g, "assign", "in");
}
TEST_F(PlacerTest, TestIgnoreGeneratorHeuristicIfWrongDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
Node* input = ops::SourceOp("TestGPUOutput", b.opts().WithName("in"));
ops::BinaryOp("TestAssign", var_cpu, input, b.opts().WithName("assign"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "var_cpu", "FakeCPU");
EXPECT_COLOCATED(g, "var_cpu", "assign");
}
TEST_F(PlacerTest, TestIgnoreGeneratorHeuristicIfWrongPartialDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
Node* input =
ops::SourceOp("TestCPUGPUOutput",
b.opts().WithName("in").WithDevice("/device:FakeCPU:1"));
ops::BinaryOp("TestAssign", var_cpu, input, b.opts().WithName("assign"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_CONTAINS(g, "in", "/device:FakeCPU:1");
EXPECT_DEVICE_TYPE(g, "var_cpu", "FakeCPU");
EXPECT_COLOCATED(g, "var_cpu", "assign");
EXPECT_DEVICE_CONTAINS(g, "var_cpu", "/device:FakeCPU:0");
}
TEST_F(PlacerTest, TestPartialSpec) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:a"));
ops::SourceOp("TestVariable",
b.opts().WithName("var").WithDevice("/job:a"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_CONTAINS(g, "in", "/job:a");
EXPECT_DEVICE_TYPE(g, "var", "FakeGPU");
EXPECT_DEVICE_CONTAINS(g, "var", "/job:a");
}
TEST_F(PlacerTest, TestAssignedDevicePreserved) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name(
"/job:a/replica:0/task:0/device:FakeCPU:7");
TF_EXPECT_OK(Place(&g));
EXPECT_EQ("/job:a/replica:0/task:0/device:FakeCPU:7",
GetNodeByName(g, "in")->assigned_device_name());
}
TEST_F(PlacerTest, TestPartialSpecGpuToCpu) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput",
b.opts().WithName("in").WithDevice("/device:FakeGPU:0"));
ops::SourceOp("TestVariable",
b.opts().WithName("var").WithDevice("/device:FakeGPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g, true, false));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_CONTAINS(g, "in", "/device:FakeCPU");
EXPECT_DEVICE_TYPE(g, "var", "FakeGPU");
EXPECT_DEVICE_CONTAINS(g, "var", "/device:FakeGPU:0");
}
TEST_F(PlacerTest, TestResourceMove) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* ds =
ops::SourceOp("CreateDatasetSP",
b.opts().WithName("ds").WithDevice("/device:FakeCPU:0"));
ops::UnaryOp("IteratorGPU", ops::NodeOut(ds, 0), b.opts().WithName("it"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "ds", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "it", "FakeGPU");
}
TEST_F(PlacerTest, TestAssignedGpuDeviceToCpuDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name(
"/job:a/replica:0/task:0/device:FakeGPU:0");
Status s = Place(&g);
EXPECT_EQ(error::INTERNAL, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Assigned device '/job:a/replica:0/task:0/device:FakeGPU:0' "
"does not have registered OpKernel support for TestInput"))
<< s.ToString();
}
Status PlacerTest::ReferenceTestHelper(const string& variable_op_type,
const string& assign_op_type,
const DeviceType& expected_device_type) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
for (int i = 0; i < 10; ++i) {
Node* var = ops::SourceOp(variable_op_type,
b.opts().WithName(strings::StrCat("var_", i)));
ops::BinaryOp(assign_op_type, var, input,
b.opts().WithName(strings::StrCat("assign_", i)));
}
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_RETURN_IF_ERROR(Place(&g));
for (int i = 0; i < 10; ++i) {
EXPECT_COLOCATED(g, strings::StrCat("var_", i),
strings::StrCat("assign_", i));
EXPECT_DEVICE_TYPE(g, strings::StrCat("var_", i), expected_device_type);
EXPECT_DEVICE_TYPE(g, strings::StrCat("assign_", i), expected_device_type);
}
return absl::OkStatus();
}
TEST_F(PlacerTest, TestReferenceConnection) {
Status s;
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "TestAssign", "FakeGPU"));
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignCPU", "FakeCPU"));
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignGPU", "FakeGPU"));
TF_EXPECT_OK(ReferenceTestHelper("VariableCPU", "TestAssign", "FakeCPU"));
TF_EXPECT_OK(ReferenceTestHelper("VariableCPU", "AssignCPU", "FakeCPU"));
{
Status s = ReferenceTestHelper("VariableCPU", "AssignGPU", "FakeCPU");
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(), "no device type supports both of those nodes"));
}
TF_EXPECT_OK(ReferenceTestHelper("VariableGPU", "TestAssign", "FakeGPU"));
{
Status s = ReferenceTestHelper("VariableGPU", "AssignCPU", "FakeCPU");
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(), "no device type supports both of those nodes"));
}
TF_EXPECT_OK(ReferenceTestHelper("VariableGPU", "AssignGPU", "FakeGPU"));
}
REGISTER_OP("TestHandleVariable").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("TestHandleVariable").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestHandleVariable").Device("FakeGPU"), DummyOp);
REGISTER_OP("HandleVariableCPU").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("HandleVariableCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("HandleVariableGPU").Output("o: resource");
REGISTER_KERNEL_BUILDER(Name("HandleVariableGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestHandleAssign").Input("i: resource").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("TestHandleAssign").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestHandleAssign").Device("FakeGPU"), DummyOp);
REGISTER_OP("HandleAssignCPU").Input("i: resource").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("HandleAssignCPU").Device("FakeCPU"), DummyOp);
REGISTER_OP("HandleAssignGPU").Input("i: resource").Input("v: float");
REGISTER_KERNEL_BUILDER(Name("HandleAssignGPU").Device("FakeGPU"), DummyOp);
REGISTER_OP("TestTwoHandlesIn").Input("i: resource").Input("j: resource");
REGISTER_KERNEL_BUILDER(Name("TestTwoHandlesIn").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("TestTwoHandlesIn").Device("FakeGPU"), DummyOp);
TEST_F(PlacerTest, TestResourceHandle) {
auto handle_test = [this](const string& var_op_name,
const string& use_op_name, DeviceType device) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var = ops::SourceOp(var_op_name, b.opts().WithName("var"));
ops::BinaryOp(use_op_name, var, input, b.opts().WithName("assign"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_RETURN_IF_ERROR(Place(&g));
EXPECT_COLOCATED(g, "var", "assign");
EXPECT_DEVICE_TYPE(g, "var", device);
EXPECT_DEVICE_TYPE(g, "assign", device);
return absl::OkStatus();
};
TF_EXPECT_OK(
handle_test("TestHandleVariable", "TestHandleAssign", "FakeGPU"));
TF_EXPECT_OK(handle_test("TestHandleVariable", "HandleAssignCPU", "FakeCPU"));
TF_EXPECT_OK(handle_test("TestHandleVariable", "HandleAssignGPU", "FakeGPU"));
TF_EXPECT_OK(handle_test("HandleVariableCPU", "TestHandleAssign", "FakeCPU"));
TF_EXPECT_OK(handle_test("HandleVariableCPU", "HandleAssignCPU", "FakeCPU"));
TF_EXPECT_OK(handle_test("HandleVariableGPU", "HandleAssignGPU", "FakeGPU"));
TF_EXPECT_OK(handle_test("HandleVariableGPU", "TestHandleAssign", "FakeGPU"));
EXPECT_FALSE(
handle_test("HandleVariableGPU", "HandleAssignCPU", "FakeCPU").ok());
EXPECT_FALSE(
handle_test("HandleVariableCPU", "HandleAssignGPU", "FakeCPU").ok());
}
TEST_F(PlacerTest, TestResourceHandlesOnDifferentDevicesFails) {
auto handle_test = [this](bool allow_soft_placement, bool set_assigned) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_cpu =
ops::SourceOp("TestHandleVariable", b.opts().WithName("var_cpu"));
Node* var_gpu =
ops::SourceOp("TestHandleVariable", b.opts().WithName("var_gpu"));
ops::BinaryOp("TestTwoHandlesIn", var_cpu, var_gpu,
b.opts().WithName("two_handles_in"));
TF_EXPECT_OK(BuildGraph(b, &g));
if (set_assigned) {
GetNodeByName(g, "var_cpu")
->set_assigned_device_name(
"/job:a/replica:0/task:0/device:FakeCPU:0");
GetNodeByName(g, "var_gpu")
->set_assigned_device_name(
"/job:a/replica:0/task:0/device:FakeGPU:0");
} else {
GetNodeByName(g, "var_cpu")
->set_requested_device("/job:a/replica:0/task:0/device:FakeCPU:0");
GetNodeByName(g, "var_gpu")
->set_requested_device("/job:a/replica:0/task:0/device:FakeGPU:0");
}
}
Status s = Place(&g, allow_soft_placement, true);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
if (set_assigned) {
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot place the graph because a reference or resource edge "
"connects "
"colocation groups with incompatible assigned devices: "
"/job:a/replica:0/task:0/device:FakeGPU:0 vs "
"/job:a/replica:0/task:0/device:FakeCPU:0"))
<< s.ToString();
} else {
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot place the graph because a reference or resource edge "
"connects "
"colocation groups with incompatible resource devices: "
"/job:a/replica:0/task:0/device:FakeGPU:0 vs "
"/job:a/replica:0/task:0/device:FakeCPU:0"))
<< s.ToString();
}
return absl::OkStatus();
};
TF_EXPECT_OK(handle_test(false, false));
TF_EXPECT_OK(handle_test(false, true));
TF_EXPECT_OK(handle_test(true, false));
TF_EXPECT_OK(handle_test(true, true));
}
TEST_F(PlacerTest, TestReferenceConnectionIgnoreInfeasible) {
Status s;
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp(
"TestDevice",
b.opts().WithName("in").WithDevice("/job:a/task:0/device:FakeGPU:0"));
Node* var =
ops::SourceOp("TestVariable", b.opts().WithName("var_0").WithDevice(
"/job:a/task:0/device:FakeGPU:0"));
ops::BinaryOp("TestAssign", var, input,
b.opts().WithName("assign").WithDevice(
"/job:a/task:0/device:FakeCPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
s = Place(&g, false, false);
TF_EXPECT_OK(s);
EXPECT_DEVICE_TYPE(g, "var_0", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "assign", "FakeGPU");
}
TEST_F(PlacerTest, TestReferenceConnectionMoreSpecificDestinationSourceWins) {
Status s;
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("TestCPUGPUOutput",
b.opts().WithName("in").WithDevice("/job:a/task:0"));
Node* var = ops::SourceOp(
"TestVariable", b.opts().WithName("var_0").WithDevice("/job:a/task:0"));
ops::BinaryOp("TestAssign", var, input,
b.opts().WithName("assign").WithDevice(
"/job:a/task:0/device:FakeCPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
s = Place(&g, false, false);
TF_EXPECT_OK(s);
EXPECT_DEVICE_TYPE(g, "var_0", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "assign", "FakeCPU");
}
TEST_F(PlacerTest, TestReferenceConnectionNoSourceDevice) {
Status s;
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp(
"TestDevice",
b.opts().WithName("in").WithDevice("/job:a/task:0/device:FakeGPU:0"));
Node* var = ops::SourceOp("TestVariable", b.opts().WithName("var_0"));
ops::BinaryOp("TestAssign", var, input,
b.opts().WithName("assign").WithDevice(
"/job:a/task:0/device:FakeCPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
s = Place(&g, false, false);
TF_EXPECT_OK(s);
EXPECT_DEVICE_TYPE(g, "var_0", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "assign", "FakeCPU");
}
TEST_F(PlacerTest, TestResourceHandleOnCompositeDevice) {
auto build_graph = [this](Graph* g) -> Status {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var = ops::SourceOp("HandleVariableCPU", b.opts().WithName("var"));
ops::BinaryOp("TestHandleAssign", var, input, b.opts().WithName("assign"));
TF_RETURN_IF_ERROR(BuildGraph(b, g));
GetNodeByName(*g, "var")->set_assigned_device_name(
"/job:a/replica:0/task:0/device:COMPOSITE:0");
return absl::OkStatus();
};
{
Graph g(OpRegistry::Global());
TF_ASSERT_OK(build_graph(&g));
TF_ASSERT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "var", "COMPOSITE");
EXPECT_DEVICE_TYPE(g, "assign", "COMPOSITE");
}
{
Graph g(OpRegistry::Global());
TF_ASSERT_OK(build_graph(&g));
GetNodeByName(g, "assign")
->set_assigned_device_name("/job:a/replica:0/task:0/device:FakeCPU:0");
TF_ASSERT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "var", "COMPOSITE");
EXPECT_DEVICE_TYPE(g, "assign", "FakeCPU");
}
}
TEST_F(PlacerTest, TestColocationGroup) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* colocated_with_input = ops::UnaryOp(
"TestRelu", input,
b.opts().WithName("colocated_1").WithAttr("_class", {"loc:@in"}));
Node* not_colocated_with_input =
ops::UnaryOp("TestRelu", input, b.opts().WithName("foo"));
CHECK(colocated_with_input);
CHECK(not_colocated_with_input);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "in", "colocated_1");
EXPECT_NOT_COLOCATED(g, "in", "foo");
}
TEST_F(PlacerTest, TestMultipleColocationGroups) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* colocated_with_input = ops::UnaryOp(
"TestRelu", input,
b.opts().WithName("colocated_1").WithAttr("_class", {"loc:@in"}));
Node* colocated_with_input_and_other =
ops::UnaryOp("TestRelu", input,
b.opts().WithName("foo").WithAttr(
"_class", {"loc:@in", "loc:@colocated_1"}));
CHECK(colocated_with_input);
CHECK(colocated_with_input_and_other);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "in", "colocated_1");
EXPECT_COLOCATED(g, "in", "foo");
}
TEST_F(PlacerTest, TestChainColocation) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* colocated_with_input = ops::UnaryOp(
"TestRelu", input,
b.opts().WithName("colocated_1").WithAttr("_class", {"loc:@in"}));
Node* colocated_with_input_and_other = ops::UnaryOp(
"TestRelu", input,
b.opts().WithName("foo").WithAttr("_class", {"loc:@colocated_1"}));
CHECK(colocated_with_input);
CHECK(colocated_with_input_and_other);
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "in", "colocated_1");
EXPECT_COLOCATED(g, "in", "foo");
}
TEST_P(SoftPlacementPlacerTest, TestInvalidMultipleColocationGroups) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* colocated_with_input = ops::UnaryOp(
"ReluCPU", input,
b.opts().WithName("colocated_1").WithAttr("_class", {"loc:@in"}));
Node* colocated_with_input_and_other =
ops::UnaryOp("ReluGPU", input,
b.opts().WithName("foo").WithAttr(
"_class", {"loc:@in", "loc:@colocated_1"}));
CHECK(colocated_with_input);
CHECK(colocated_with_input_and_other);
TF_EXPECT_OK(BuildGraph(b, &g));
}
bool allow_soft_placement = GetParam();
Status s = Place(&g, allow_soft_placement, true);
if (allow_soft_placement) {
EXPECT_EQ(error::OK, s.code()) << s.ToString();
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "colocated_1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "foo", "FakeGPU");
} else {
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot colocate nodes {{colocation_node foo}} and "
"{{colocation_node in}} because no device type supports both of those "
"nodes and the other nodes colocated with them"))
<< s.ToString();
}
}
TEST_F(PlacerTest, TestColocationGroupWithReferenceConnections) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var1 = ops::SourceOp("VariableCPU", b.opts().WithName("var1"));
Node* var2 = ops::SourceOp("VariableCPU", b.opts().WithName("var2"));
Node* var3 = ops::SourceOp(
"VariableCPU",
b.opts().WithName("var3").WithDevice("/device:COMPOSITE:0"));
ops::BinaryOp(
"TestAssign", var1, input,
b.opts().WithName("assign1").WithAttr("_class", {"loc:@var1"}));
ops::BinaryOp(
"TestAssign", var2, input,
b.opts().WithName("assign2").WithAttr("_class", {"loc:@var2"}));
ops::BinaryOp(
"TestAssign", var3, input,
b.opts().WithName("assign3").WithAttr("_class", {"loc:@var3"}));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", "FakeCPU");
EXPECT_COLOCATED(g, "in", "var1");
EXPECT_COLOCATED(g, "in", "var2");
EXPECT_COLOCATED(g, "var1", "assign2");
EXPECT_COLOCATED(g, "var2", "assign1");
EXPECT_DEVICE_TYPE(g, "var3", "COMPOSITE");
EXPECT_COLOCATED(g, "var3", "assign3");
}
TEST_P(SoftPlacementPlacerTest,
TestColocationGroupWithUnsatisfiableReferenceConnections) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var1 = ops::SourceOp("VariableCPU", b.opts().WithName("var1"));
Node* var2 = ops::SourceOp("VariableCPU", b.opts().WithName("var2"));
Node* var3 = ops::SourceOp("VariableGPU", b.opts().WithName("var3"));
ops::BinaryOp(
"TestAssign", var1, input,
b.opts().WithName("assign1").WithAttr("_class", {"loc:@var1"}));
ops::BinaryOp(
"TestAssign", var2, input,
b.opts().WithName("assign2").WithAttr("_class", {"loc:@var2"}));
ops::BinaryOp(
"TestAssign", var3, input,
b.opts().WithName("assign3").WithAttr("_class", {"loc:@var2"}));
TF_EXPECT_OK(BuildGraph(b, &g));
}
bool allow_soft_placement = GetParam();
Status s = Place(&g, allow_soft_placement, true);
if (allow_soft_placement) {
EXPECT_EQ(error::OK, s.code()) << s.ToString();
} else {
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot colocate nodes {{colocation_node assign3}} and "
"{{colocation_node var2}} because no device type supports both of "
"those nodes and the other nodes colocated with them."))
<< s.ToString();
}
}
TEST_F(PlacerTest, TestColocationAndReferenceConnections) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
for (int i = 0; i < 10; ++i) {
Node* var = ops::SourceOp("TestVariable",
b.opts().WithName(strings::StrCat("var_", i)));
ops::BinaryOp("TestAssign", var, input,
b.opts().WithName(strings::StrCat("assign_", i)));
}
for (int i = 10; i < 100; ++i) {
Node* var = ops::SourceOp(
"TestVariable",
b.opts()
.WithName(strings::StrCat("var_", i))
.WithAttr("_class", {strings::StrCat("loc:@var_", i % 6)}));
ops::BinaryOp(
"TestAssign", var, input,
b.opts()
.WithName(strings::StrCat("assign_", i))
.WithAttr("_class", {strings::StrCat("loc:@assign_", i % 3)}));
}
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
for (int i = 0; i < 10; ++i) {
EXPECT_COLOCATED(g, strings::StrCat("var_", i),
strings::StrCat("assign_", i));
}
for (int i = 10; i < 100; ++i) {
EXPECT_COLOCATED(g, strings::StrCat("var_", i),
strings::StrCat("assign_", i));
EXPECT_COLOCATED(g, strings::StrCat("var_", i),
strings::StrCat("var_", i % 6));
EXPECT_COLOCATED(g, strings::StrCat("assign_", i),
strings::StrCat("assign_", i % 3));
}
}
TEST_F(PlacerTest, TestEmptyDeviceSet) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet empty;
Status s = Place(&g, &empty);
EXPECT_TRUE(absl::StrContains(s.message(), "No devices are registered"));
}
TEST_F(PlacerTest, TestHeterogeneousDeviceSetFailure) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* in = ops::SourceOp("TestInput", b.opts().WithName("in"));
Node* var = ops::SourceOp("VariableGPU", b.opts().WithName("var"));
ops::BinaryOp("TestAssign", var, in,
b.opts().WithName("assign").WithDevice("/job:b/task:1"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet heterogeneous;
std::unique_ptr<Device> gpu(
FakeDevice::MakeGPU("/job:b/replica:0/task:0/device:FakeGPU:0"));
heterogeneous.AddDevice(gpu.get());
std::unique_ptr<Device> cpu(
FakeDevice::MakeCPU("/job:b/replica:0/task:1/device:FakeCPU:0"));
heterogeneous.AddDevice(cpu.get());
Status s = Place(&g, &heterogeneous);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"colocated with a group of nodes that required "
"incompatible device"));
EXPECT_TRUE(absl::StrContains(s.message(), "VariableGPU: FakeGPU")) << s;
EXPECT_TRUE(absl::StrContains(s.message(), "TestAssign: FakeGPU FakeCPU"))
<< s;
}
TEST_F(PlacerTest, TestUnknownDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:foo"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(), "/job:foo"));
}
TEST_F(PlacerTest, TestUnknownMergedDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:foo"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(), "/job:foo"));
}
TEST_F(PlacerTest, TestUnknownAssignedDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/job:foo");
Status s = Place(&g);
EXPECT_EQ(error::INTERNAL, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(), "Assigned device '/job:foo' does not match any device"));
}
TEST_F(PlacerTest, TestNoKernelsRegisteredWithNoRequestedDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableNoKernels", b.opts().WithName("var"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"No OpKernel was registered to support Op "
"'VariableNoKernels' used by {{node var}}"));
EXPECT_TRUE(absl::StrContains(s.message(), "<no registered kernels>"));
}
TEST_F(PlacerTest, TestNoKernelsRegisteredWithRequestedDeviceLocal) {
const string cpu_device = "/job:b/replica:0/task:0/device:FakeCPU:0";
const string gpu_device = "/job:b/replica:0/task:0/device:FakeGPU:0";
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableNoKernels", b.opts().WithName("var"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "var")->set_requested_device(gpu_device);
DeviceSet devices;
std::unique_ptr<Device> gpu(FakeDevice::MakeGPU(gpu_device));
devices.AddDevice(gpu.get());
std::unique_ptr<Device> cpu(FakeDevice::MakeCPU(cpu_device));
devices.AddDevice(cpu.get());
Status s = Place(&g, &devices, cpu.get(), false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"No OpKernel was registered to support Op "
"'VariableNoKernels' used by {{node var}}"));
EXPECT_TRUE(absl::StrContains(s.message(), "<no registered kernels>"));
}
TEST_F(PlacerTest, TestNoKernelsRegisteredWithRequestedDeviceRemote) {
const string local_device = "/job:b/replica:0/task:0/device:FakeCPU:0";
const string remote_device = "/job:b/replica:0/task:1/device:FakeGPU:0";
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableNoKernels", b.opts().WithName("var"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "var")->set_requested_device(remote_device);
DeviceSet heterogeneous;
std::unique_ptr<Device> gpu(FakeDevice::MakeGPU(remote_device));
heterogeneous.AddDevice(gpu.get());
std::unique_ptr<Device> cpu(FakeDevice::MakeCPU(local_device));
heterogeneous.AddDevice(cpu.get());
TF_EXPECT_OK(Place(&g, &heterogeneous, cpu.get(), false, false));
EXPECT_DEVICE_CONTAINS(g, "var", remote_device);
}
TEST_F(PlacerTest, TestNoDevicesRegistered) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU", b.opts().WithName("var"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet cpu_only;
std::unique_ptr<Device> cpu(
FakeDevice::MakeCPU("/job:a/replica:0/task:0/device:FakeCPU:0"));
cpu_only.AddDevice(cpu.get());
Status s = Place(&g, &cpu_only);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"No OpKernel was registered to support Op "
"'VariableGPU' used by {{node var}}"));
EXPECT_TRUE(absl::StrContains(s.message(), "device='FakeGPU'"));
}
TEST_F(PlacerTest, TestMalformedDeviceSpecification) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/foo:bar"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"Malformed device specification '/foo:bar'"));
}
TEST_F(PlacerTest, TestMalformedAssignedDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/foo:bar");
Status s = Place(&g);
EXPECT_EQ(error::INTERNAL, s.code());
EXPECT_TRUE(
absl::StrContains(s.message(), "Malformed assigned device '/foo:bar'"));
}
TEST_F(PlacerTest, TestNonUniqueAssignedDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/job:a");
Status s = Place(&g);
EXPECT_EQ(error::INTERNAL, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(), "Assigned device '/job:a' does not match any device"));
}
TEST_F(PlacerTest, TestNonexistentGpuAllowSoftPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestDevice",
b.opts().WithName("in").WithDevice("/device:FakeGPU:11"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g, true, false));
EXPECT_DEVICE_CONTAINS(g, "in", "/device:FakeGPU:0");
}
TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestDevice",
b.opts().WithName("in").WithDevice("/device:FakeGPU:11"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g, false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(), "/device:FakeGPU:11"));
}
TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementFormatTag) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestDevice",
b.opts().WithName("in").WithDevice("/device:FakeGPU:11"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g, false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
LOG(WARNING) << s.message();
EXPECT_TRUE(absl::StrContains(s.message(),
"Cannot assign a device for operation in"));
EXPECT_TRUE(absl::StrContains(s.message(), "{{node in}}"));
}
TEST_F(PlacerTest, TestUnsupportedDeviceNoAllowSoftPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU",
b.opts().WithName("var").WithDevice("/device:FakeCPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g, false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(s.message(), "/device:FakeCPU:0"))
<< s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(), "no supported kernel for FakeCPU devices is available"))
<< s.ToString();
}
TEST_F(PlacerTest, TestNonExistentDevice) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU",
b.opts().WithName("var").WithDevice("/job:foo/replica:17"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g, false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
LOG(WARNING) << s.message();
EXPECT_TRUE(absl::StrContains(
s.message(), "was explicitly assigned to /job:foo/replica:17"));
EXPECT_TRUE(absl::StrContains(s.message(), "but available devices"));
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
TEST_F(PlacerTest, TestUseGpuWithNoCuda) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU",
b.opts().WithName("var").WithDevice("/device:gpu:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g, false, false);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
LOG(WARNING) << s.message();
EXPECT_TRUE(absl::StrContains(
s.message(),
"The requested device appears to be a GPU, but CUDA is not enabled."));
}
#endif
TEST_F(PlacerTest, TestUnsupportedDeviceAllowSoftPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput",
b.opts().WithName("a").WithDevice("/device:FakeGPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g, true, false));
}
TEST_F(PlacerTest, TestDeviceTypeConstraintsAllowSoftPlacement) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var_gpu = ops::SourceOp("VariableGPU", b.opts().WithName("var_gpu"));
ops::UnaryOp(
"TestDeviceEnforce", var_gpu,
b.opts().WithName("force_gpu").WithDevice("/device:FakeCPU:0"));
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
ops::UnaryOp(
"TestDeviceEnforce", var_cpu,
b.opts().WithName("force_cpu").WithDevice("/device:FakeGPU:0"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g, true, false));
EXPECT_DEVICE_TYPE(g, "var_gpu", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "force_gpu", "FakeGPU");
EXPECT_COLOCATED(g, "var_gpu", "force_gpu");
EXPECT_DEVICE_TYPE(g, "var_cpu", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "force_cpu", "FakeCPU");
EXPECT_COLOCATED(g, "var_cpu", "force_cpu");
}
TEST_F(PlacerTest, TestUnsatisfiableConstraintWithReferenceConnections) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var = ops::SourceOp("VariableGPU", b.opts().WithName("var"));
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::BinaryOp("AssignCPU", var, input, b.opts().WithName("assign"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"Cannot colocate nodes {{colocation_node "
"var}} and {{colocation_node assign}}"));
}
TEST_F(PlacerTest, TestGeneratorNodeFollowsConsumerNode) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var1_cpu =
ops::SourceOp("VariableCPU", b.opts().WithName("var1_cpu"));
Node* var2_cpu =
ops::SourceOp("VariableCPU", b.opts().WithName("var2_cpu"));
Node* input = ops::SourceOp("TestCPUGPUOutput", b.opts().WithName("in"));
ops::BinaryOp("TestAssign", var1_cpu, input, b.opts().WithName("assign1"));
ops::BinaryOp("TestAssign", var2_cpu, input, b.opts().WithName("assign2"));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "var1_cpu", "in");
EXPECT_COLOCATED(g, "assign1", "in");
EXPECT_COLOCATED(g, "var2_cpu", "in");
EXPECT_COLOCATED(g, "assign2", "in");
}
TEST_F(PlacerTest, TestGeneratorNodeDoesntFollowNonColocatedConsumers) {
Graph g(OpRegistry::Global());
{
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Node* var1_cpu =
ops::SourceOp("VariableCPU", b.opts().WithName("var1_cpu"));
Node* var2_cpu =
ops::SourceOp("VariableCPU", b.opts().WithName("var2_cpu"));
Node* input = ops::SourceOp("TestCPUGPUOutput", b.opts().WithName("in"));
ops::BinaryOp("TestAssign", var1_cpu, input, b.opts().WithName("assign1"));
ops::BinaryOp("TestAssign", var2_cpu, input, b.opts().WithName("assign2"));
TF_EXPECT_OK(BuildGraph(b, &g));
GetNodeByName(g, "var1_cpu")
->set_assigned_device_name("/job:a/replica:0/task:0/device:FakeCPU:1");
GetNodeByName(g, "var2_cpu")
->set_assigned_device_name("/job:a/replica:0/task:0/device:FakeCPU:2");
}
TF_EXPECT_OK(Place(&g));
EXPECT_COLOCATED(g, "assign1", "var1_cpu");
EXPECT_COLOCATED(g, "assign2", "var2_cpu");
EXPECT_DEVICE_TYPE(g, "in", "FakeGPU");
}
REGISTER_KERNEL_BUILDER(Name("_Arg").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("_Arg").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("_Retval").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("_Retval").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Identity").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Identity").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Const").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Const").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Mul").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Mul").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Add").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("Add").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device("FakeCPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("PartitionedCall").Device("FakeGPU"), DummyOp);
REGISTER_KERNEL_BUILDER(Name("ConvertToListOfCooTensorsV2").Device("FakeCPU"),
DummyOp);
REGISTER_KERNEL_BUILDER(Name("Cast").Device("FakeCPU"), DummyOp);
TEST_P(SoftPlacementPlacerTest,
RequestedDeviceOnResourceGeneratorIsTreatedAsAssigned) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("id1", "Identity", {"a"},
{{"T", DT_RESOURCE},
{"_class", absl::Span<const string>({"loc:@id2"})}}),
NDef("id2", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
bool allow_soft_placement = GetParam();
Status s = Place(&g, allow_soft_placement, true);
if (allow_soft_placement) {
EXPECT_EQ(error::OK, s.code()) << s.ToString();
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "id1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "id2", "FakeCPU");
} else {
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot colocate nodes {{colocation_node id2}} and {{colocation_node "
"id1}}: Cannot merge devices with incompatible types: "
"'/device:FakeCPU:0' and '/device:FakeGPU:0'"))
<< s.ToString();
}
}
TEST_F(PlacerTest, RequestedDeviceCanBeOverridden) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("id_a", "Identity", {"a"}, {{"T", DT_RESOURCE}}, kGPU),
NDef("id_b", "Identity", {"b"}, {{"T", DT_RESOURCE}}, kCPU),
NDef("id1", "Identity", {"id_a"},
{{"T", DT_RESOURCE},
{"_class", absl::Span<const string>({"loc:@id2"})}}),
NDef("id2", "Identity", {"id_b"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(Place(&g));
EXPECT_COLOCATED(g, "a", "b");
EXPECT_COLOCATED(g, "id_a", "id_b");
EXPECT_COLOCATED(g, "id1", "id2");
EXPECT_COLOCATED(g, "a", "id_a");
EXPECT_COLOCATED(g, "a", "id1");
}
TEST_F(PlacerTest, AssignedDeviceOfColocatedNodeIsRespected) {
GraphDef graph = GDef({
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("iter", "IteratorGPU", {"a"}),
});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
GetNodeByName(g, "a")->set_assigned_device_name(kFullCPU);
Status s = Place(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(
absl::StrContains(s.message(),
"{{colocation_node iter}} was colocated with a "
"group of nodes that required incompatible device "
"'/job:a/replica:0/task:0/device:FakeCPU:0'"))
<< s.ToString();
}
TEST_P(SoftPlacementPlacerTest,
AssignedDevicesAreNotOverriddenDueToResourcesAndColocation) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("id_a", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("id_b", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("id1", "Identity", {"id_a"},
{{"T", DT_RESOURCE},
{"_class", absl::Span<const string>({"loc:@id2"})}}),
NDef("id2", "Identity", {"id_b"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
GetNodeByName(g, "id_a")->set_assigned_device_name(kFullGPU);
GetNodeByName(g, "id_b")->set_assigned_device_name(kFullCPU);
bool allow_soft_placement = GetParam();
Status s = Place(&g, allow_soft_placement, false);
if (allow_soft_placement) {
EXPECT_EQ(error::OK, s.code()) << s.ToString();
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "id_a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "id1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "id_b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "id2", "FakeCPU");
} else {
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot colocate nodes {{colocation_node id2}} and {{colocation_node "
"id1}}: Cannot merge devices with incompatible types: "
"'/job:a/replica:0/task:0/device:FakeCPU:0' and "
"'/job:a/replica:0/task:0/device:FakeGPU:0'"))
<< s.ToString();
}
}
class NestedPlacerTest : public PlacerTest {
public:
NestedPlacerTest() : PlacerTest(1) {}
};
TEST_F(NestedPlacerTest, OutputOneResource) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, kGPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("ResourceOutput", {})}}),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_FLOAT}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeGPU");
}
TEST_F(NestedPlacerTest, OutputOneResource_ExtraIdentities) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, kGPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("ai", "Identity", {"a"}, {{"T", DT_FLOAT}}),
NDef("bi", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"ai", "bi"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("ResourceOutput", {})}}),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_FLOAT}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "ai", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "bi", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeGPU");
}
TEST_F(NestedPlacerTest, OutputOneResource_OverrideOutputResourceDevice) {
FunctionDef func = test::function::ResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, kGPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("ResourceOutput", {})}}),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}, kGPU),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_FLOAT}}),
},
{func});
Graph g(OpRegistry::Global());
TF_ASSERT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeGPU");
}
TEST_F(NestedPlacerTest, OutputTwoResources) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_EXPECT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeCPU");
}
TEST_F(NestedPlacerTest, OutputTwoResources_PCOOnCPU) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kCPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_EXPECT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "y", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeCPU");
}
TEST_F(NestedPlacerTest, OutputTwoResources_UnassignedResource) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kCPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeGPU");
}
TEST_F(NestedPlacerTest, OutputTwoResources_UnassignedResource_CPU) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kCPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeGPU");
}
TEST_F(NestedPlacerTest, OutputResourceConsumedByMultipleOps) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r3", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}, kGPU),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "a", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "b", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r3", "FakeGPU");
}
TEST_F(NestedPlacerTest, DuplicateInputResource) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"a", "a"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kGPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}, kCPU),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "a", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeCPU");
}
TEST_F(NestedPlacerTest, DuplicateInputs_OutputResourceConsumedByMultipleOps) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"a", "a"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kGPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}, kCPU),
NDef("r3", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_DEVICE_TYPE(g, "a", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r2", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r3", "FakeCPU");
}
TEST_F(NestedPlacerTest, DuplicateInputResource_Conflict) {
FunctionDef func = test::function::Swap();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"a", "a"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}},
kGPU),
NDef("r1", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}, kGPU),
NDef("r2", "Identity", {"y:1"}, {{"T", DT_RESOURCE}}, kCPU),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_ASSERT_OK(CallOptPassesAndPlace(&g, false, true));
EXPECT_SAME_TYPE(g, "a", "r1");
EXPECT_SAME_TYPE(g, "a", "r2");
}
TEST_F(NestedPlacerTest, TestDstDeviceIsIgnoredWhenConstrainedByResourceEdge) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("r1", "_Retval", {"y:0"}, {{"T", DT_RESOURCE}},
kGPU
),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_EXPECT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "a", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
}
TEST_F(
NestedPlacerTest,
TestDstDeviceIsIgnoredWhenConstrainedByResourceEdge_EvenWhenPCOIsPlaced) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}},
kGPU),
NDef("r1", "_Retval", {"y:0"}, {{"T", DT_RESOURCE}},
kGPU
),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
TF_EXPECT_OK(CallOptPassesAndPlace(&g));
EXPECT_DEVICE_TYPE(g, "r1", "FakeCPU");
EXPECT_DEVICE_TYPE(g, "y", "FakeGPU");
}
TEST_F(NestedPlacerTest, ResourceConflictInvolvingPCO) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("add", "Add", {"y:0", "b"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot place the graph because a reference or resource edge connects "
"colocation groups with incompatible resource devices: /device:FakeCPU:0 "
"vs /device:FakeGPU:0"))
<< s.ToString();
}
TEST_F(NestedPlacerTest, ResourceConflictInvolvingTwoPCOs) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("z", "PartitionedCall", {"b"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("add", "Add", {"y:0", "z:0"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot place the graph because a reference or resource edge connects "
"colocation groups with incompatible resource devices: /device:FakeCPU:0 "
"vs /device:FakeGPU:0"))
<< s.ToString();
}
FunctionDef CPUResourceOutput() {
return FDH::Create(
"CPUResourceOutput",
{"x: float"},
{"ds: resource", "x_out: float"},
{},
{
{{"make_ds"}, "CreateDatasetCPU", {}},
},
{{"ds", "make_ds:o:0"}, {"x_out", "x"}});
}
TEST_F(NestedPlacerTest, DeepDeviceConstraintsPropagated) {
FunctionDef func = CPUResourceOutput();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("CPUResourceOutput", {})}}),
NDef("id", "Identity", {"y:0"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
GetNodeByName(g, "id")->set_assigned_device_name(kFullGPU);
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(), "Could not satisfy explicit device specification"))
<< s.ToString();
}
FunctionDef NestedCPUResourceOutput() {
return FDH::Create(
"NestedCPUResourceOutput",
{"x: float"},
{"ds: resource", "x_out: float"},
{},
{
{{"y"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("CPUResourceOutput", {})}}},
},
{{"ds", "y:output:0"}, {"x_out", "y:output:1"}});
}
TEST_F(NestedPlacerTest, NestedDeepDeviceConstraintsPropagated) {
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_FLOAT}},
{"f", FDH::FunctionRef("NestedCPUResourceOutput", {})}}),
NDef("id", "_Retval", {"y:0"}, {{"T", DT_RESOURCE}}),
},
{CPUResourceOutput(), NestedCPUResourceOutput()});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
GetNodeByName(g, "id")->set_assigned_device_name(kFullGPU);
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(), "Could not satisfy explicit device specification"))
<< s.ToString();
}
TEST_F(NestedPlacerTest, TwoFunctionsBackToBack) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}, kCPU),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}, kGPU),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("w", "PartitionedCall", {"y:0"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("z", "PartitionedCall", {"b"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("add", "Add", {"w:0", "z:0"}, {{"T", DT_RESOURCE}}),
},
{func});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Cannot place the graph because a reference or resource edge connects "
"colocation groups with incompatible resource devices: /device:FakeCPU:0 "
"vs /device:FakeGPU:0"))
<< s.ToString();
}
FunctionDef NestedCallFunctionsBackToBack() {
return FDH::Create(
"NestedCallFunctionsBackToBack",
{},
{"output: resource"},
{},
{
{{"cpu_ds"}, "CreateDatasetCPU", {}},
{{"y"},
"PartitionedCall",
{"cpu_ds:o:0"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}},
{{"w"},
"PartitionedCall",
{"y:output:0"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}},
{{"gpu_ds"}, "CreateDatasetGPU", {}},
{{"z"},
"PartitionedCall",
{"gpu_ds:o:0"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}},
{{"add"}, "Add", {"w:output:0", "z:output:0"}, {{"T", DT_RESOURCE}}},
},
{{"output", "add:z:0"}});
}
TEST_F(NestedPlacerTest, NestedTwoFunctionsBackToBack) {
FunctionDef func = NestedCallFunctionsBackToBack();
GraphDef graph = GDef(
{
NDef("y", "PartitionedCall", {},
{{"Tin", {}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("NestedCallFunctionsBackToBack", {})}}),
},
{NestedCallFunctionsBackToBack(), test::function::ResourceIdentity()});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Nodes were connected by a reference or resource connection (requiring "
"them to be on the same device), but the two nodes were assigned two "
"different devices"))
<< s.ToString();
}
FunctionDef RecursiveResourceIdentity() {
return FDH::Create(
"RecursiveResourceIdentity",
{"x: resource"},
{"y: resource"},
{},
{
{{"out"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("RecursiveResourceIdentity", {})}}},
},
{{"y", "out:output:0"}});
}
TEST_F(NestedPlacerTest, DirectRecursion) {
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("RecursiveResourceIdentity", {})}}),
NDef("r1", "_Retval", {"y:0"}, {{"T", DT_RESOURCE}}),
},
{RecursiveResourceIdentity()});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::UNIMPLEMENTED, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Recursive function calls are not supported. Node {{node out}} inside "
"the body of {{function_node RecursiveResourceIdentity}} calls function "
"{{function_node RecursiveResourceIdentity}}"))
<< s.ToString();
}
FunctionDef RecursiveF1() {
return FDH::Create(
"RecursiveF1",
{"x: resource"},
{"y: resource"},
{},
{
{{"out"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("RecursiveF2", {})}}},
},
{{"y", "out:output:0"}});
}
FunctionDef RecursiveF2() {
return FDH::Create(
"RecursiveF2",
{"x: resource"},
{"y: resource"},
{},
{
{{"out"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("RecursiveF1", {})}}},
},
{{"y", "out:output:0"}});
}
TEST_F(NestedPlacerTest, IndirectRecursion) {
GraphDef graph = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("y", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("RecursiveF1", {})}}),
NDef("r1", "_Retval", {"y:0"}, {{"T", DT_RESOURCE}}),
},
{RecursiveF1(), RecursiveF2()});
Graph g(OpRegistry::Global());
TF_EXPECT_OK(BuildGraph(graph, &g));
Status s = CallOptPassesAndPlace(&g);
EXPECT_EQ(error::UNIMPLEMENTED, s.code()) << s.ToString();
EXPECT_TRUE(absl::StrContains(
s.message(),
"Recursive function calls are not supported. Node {{node out}} inside "
"the body of {{function_node RecursiveF2}} calls function "
"{{function_node RecursiveF1}} which is already present in the call "
"stack"))
<< s.ToString();
}
TEST_F(PlacerTest, IdentityMatchesInputAndOutputPlacement) {
const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0";
const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0";
GraphDef graph = GDef({
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}),
NDef("cast", "Cast", {"identity2:0"},
{{"SrcT", DT_FLOAT}, {"DstT", DT_INT32}}, task1_device),
NDef("COO", "ConvertToListOfCooTensorsV2", {"cast:0"}, {{"T", DT_INT32}},
task1_device),
});
Graph g(OpRegistry::Global());
DeviceSet multiple_tasks;
std::unique_ptr<Device> task0_cpu(FakeDevice::MakeCPU(task0_device));
multiple_tasks.AddDevice(task0_cpu.get());
std::unique_ptr<Device> task1_cpu(FakeDevice::MakeCPU(task1_device));
multiple_tasks.AddDevice(task1_cpu.get());
TF_ASSERT_OK(BuildGraph(graph, &g));
absl::Status s = Place(&g, &multiple_tasks);
TF_ASSERT_OK(s);
Node* identity2 = GetNodeByName(g, "identity2");
EXPECT_EQ(identity2->assigned_device_name().c_str(), task1_device);
}
TEST_F(PlacerTest, IdentityWithoutOutputDoesntCrash) {
const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0";
const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0";
GraphDef graph = GDef({
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}),
});
Graph g(OpRegistry::Global());
DeviceSet multiple_tasks;
std::unique_ptr<Device> task0_cpu(FakeDevice::MakeCPU(task0_device));
multiple_tasks.AddDevice(task0_cpu.get());
std::unique_ptr<Device> task1_cpu(FakeDevice::MakeCPU(task1_device));
multiple_tasks.AddDevice(task1_cpu.get());
TF_ASSERT_OK(BuildGraph(graph, &g));
Node* identity2 = GetNodeByName(g, "identity2");
const Edge* out_edge = *identity2->out_edges().begin();
g.RemoveEdge(out_edge);
absl::Status s = Place(&g, &multiple_tasks);
TF_ASSERT_OK(s);
}
TEST_F(PlacerTest, IdentityDoesntMatchWithMultipleOutput) {
const std::string task0_device = "/job:b/replica:0/task:0/device:FakeCPU:0";
const std::string task1_device = "/job:b/replica:0/task:1/device:FakeCPU:0";
GraphDef graph = GDef({
NDef("a", "_Arg", {}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity1", "Identity", {"a"}, {{"T", DT_FLOAT}}, task1_device),
NDef("identity2", "Identity", {"identity1:0"}, {{"T", DT_FLOAT}}),
NDef("cast", "Cast", {"identity2:0"},
{{"SrcT", DT_FLOAT}, {"DstT", DT_INT32}}, task1_device),
NDef("COO", "ConvertToListOfCooTensorsV2", {"cast:0"}, {{"T", DT_INT32}},
task1_device),
NDef("identity3", "Identity", {"identity2:0"}, {{"T", DT_FLOAT}}),
});
Graph g(OpRegistry::Global());
DeviceSet multiple_tasks;
std::unique_ptr<Device> task0_cpu(FakeDevice::MakeCPU(task0_device));
multiple_tasks.AddDevice(task0_cpu.get());
std::unique_ptr<Device> task1_cpu(FakeDevice::MakeCPU(task1_device));
multiple_tasks.AddDevice(task1_cpu.get());
TF_ASSERT_OK(BuildGraph(graph, &g));
absl::Status s = Place(&g, &multiple_tasks);
TF_ASSERT_OK(s);
Node* identity2 = GetNodeByName(g, "identity2");
EXPECT_EQ(identity2->assigned_device_name().c_str(), task0_device);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/placer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/placer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6c1c0925-8ddf-4895-8858-8f20be4ec17b | cpp | tensorflow/tensorflow | graph_view | tensorflow/core/grappler/utils/graph_view.cc | tensorflow/core/grappler/utils/graph_view_test.cc | #include "tensorflow/core/grappler/utils/graph_view.h"
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view_internal.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace utils {
FaninView::FaninView(NodeView* node_view, int index)
: NodeIndexAndPortIndex(node_view->graph_view_, node_view->node_index_,
index) {}
FanoutView::FanoutView(NodeView* node_view, int index)
: NodeIndexAndPortIndex(node_view->graph_view_, node_view->node_index_,
index) {}
const NodeDef* NodeView::node() const {
return &graph_view_->graph()->node(node_index_);
}
bool NodeView::HasFanin(const FanoutView& fanin) const {
if (fanin.index() < Graph::kControlSlot || graph_view_ != fanin.graph_view_) {
return false;
}
return fanins_set_.contains(
{&graph_view_->graph_->node(fanin.node_index_), fanin.index()});
}
bool NodeView::HasFanout(const FaninView& fanout) const {
if (fanout.index() < Graph::kControlSlot ||
graph_view_ != fanout.graph_view_) {
return false;
}
NodeView* view = fanout.node_view();
if (view == nullptr) {
return false;
} else if (fanout.index() == Graph::kControlSlot) {
return view->fanins_set_.contains({this->node(), Graph::kControlSlot});
} else if (fanout.index() >= static_cast<int>(view->regular_fanins_.size())) {
return false;
}
return view->regular_fanins_[fanout.index()].node_index_ == node_index_;
}
inline const FanoutView& NodeView::GetMissingFanin() const {
return graph_view_->missing_fanin_;
}
inline const std::vector<FaninView>& NodeView::GetMissingFanout() const {
return graph_view_->missing_fanout_;
}
namespace {
const char kGraphViewError[] = "GraphView::GraphView error: ";
}
GraphView::GraphView(const GraphDef* graph, Status* status)
: GraphViewInternal(graph) {
const int num_nodes = graph->node_size();
node_index_by_name_.reserve(num_nodes);
nodes_.reserve(num_nodes);
for (const NodeDef& node : graph->node()) {
if (!AddUniqueNodeInternal(&node)) {
*status = errors::InvalidArgument(
kGraphViewError, "graph has multiple nodes with the name '",
node.name(), "'.");
Reset();
return;
}
}
Status s;
for (NodeView& node_view : nodes_) {
s = CheckAndAddFaninsInternal(&node_view);
if (!s.ok()) {
*status = s;
Reset();
return;
}
}
*status = absl::OkStatus();
}
bool GraphView::AddUniqueNodeInternal(const NodeDef* node) {
const int node_index = node_index_by_name_.size();
auto it = node_index_by_name_.emplace(node->name(), node_index);
if (it.second) {
nodes_.emplace_back(this, node_index);
return true;
}
return false;
}
Status GraphView::CheckAndAddFaninsInternal(NodeView* node_view) {
bool has_observed_control = false;
const NodeDef* node = node_view->node();
const string& node_name = node->name();
const int node_index = node_view->node_index_;
node_view->fanins_set_.reserve(node->input_size());
for (const string& input : node->input()) {
TensorId fanin_id = ParseTensorName(input);
if (fanin_id.node() == node_name) {
return errors::InvalidArgument(kGraphViewError, "node '", node_name,
"' has self cycle fanin '", input, "'.");
}
bool is_control = IsTensorIdControl(fanin_id);
if (!is_control && has_observed_control) {
return errors::InvalidArgument(kGraphViewError, "node '", node_name,
"' has regular fanin '", input,
"' after controlling fanins.");
}
auto it = node_index_by_name_.find(fanin_id.node());
if (it == node_index_by_name_.end()) {
return errors::InvalidArgument(kGraphViewError, "node '", node_name,
"' has missing fanin '", input, "'.");
}
const int fanin_node_index = it->second;
NodeView& fanin_node_view = nodes_[fanin_node_index];
if (is_control) {
fanin_node_view.controlled_fanouts_.emplace_back(this, node_index,
Graph::kControlSlot);
node_view->controlling_fanins_.emplace_back(this, fanin_node_index,
Graph::kControlSlot);
node_view->fanins_set_.emplace(fanin_node_view.node(),
Graph::kControlSlot);
has_observed_control = true;
} else {
int fanin_node_view_regular_fanouts_by_port_size =
fanin_node_view.regular_fanouts_by_port_.size();
if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) {
fanin_node_view.regular_fanouts_by_port_.resize(fanin_id.index() + 1);
}
fanin_node_view.regular_fanouts_by_port_[fanin_id.index()].emplace_back(
this, node_index, node_view->regular_fanins_.size());
++fanin_node_view.num_regular_fanouts_;
node_view->regular_fanins_.emplace_back(this, fanin_node_index,
fanin_id.index());
node_view->fanins_set_.emplace(fanin_node_view.node(), fanin_id.index());
}
}
return absl::OkStatus();
}
MutableFaninView::MutableFaninView(MutableNodeView* node_view, int index)
: NodeIndexAndPortIndex(node_view->graph_view_, node_view->node_index_,
index) {}
MutableFanoutView::MutableFanoutView(MutableNodeView* node_view, int index)
: NodeIndexAndPortIndex(node_view->graph_view_, node_view->node_index_,
index) {}
NodeDef* MutableNodeView::node() const {
return graph_view_->graph()->mutable_node(node_index_);
}
bool MutableNodeView::HasFanin(const MutableFanoutView& fanin) const {
if (fanin.index() < Graph::kControlSlot || graph_view_ != fanin.graph_view_) {
return false;
}
return fanins_count_.contains(
{&graph_view_->graph_->node(fanin.node_index_), fanin.index()});
}
bool MutableNodeView::HasFanout(const MutableFaninView& fanout) const {
if (fanout.index() < Graph::kControlSlot ||
graph_view_ != fanout.graph_view_) {
return false;
}
MutableNodeView* view = fanout.node_view();
if (view == nullptr) {
return false;
} else if (fanout.index() == Graph::kControlSlot) {
return view->fanins_count_.contains({this->node(), Graph::kControlSlot});
} else if (fanout.index() >= static_cast<int>(view->regular_fanins_.size())) {
return false;
}
return view->regular_fanins_[fanout.index()].node_index_ == node_index_;
}
const MutableFanoutView& MutableNodeView::GetMissingFanin() const {
return graph_view_->missing_fanin_;
}
const std::vector<MutableFaninView>& MutableNodeView::GetMissingFanout() const {
return graph_view_->missing_fanout_;
}
namespace {
const char kMutationAddNodeError[] = "Mutation::AddNode error: ";
bool IsTensorIdRegular(const TensorId& tensor_id) {
return tensor_id.index() >= 0;
}
}
Mutation::Mutation(MutableGraphView* graph_view) : graph_view_(graph_view) {}
MutationNewNode Mutation::AddNode(NodeDef&& node, Status* status) {
bool has_observed_control = false;
const string& node_name = node.name();
std::vector<SafeTensorId> regular_fanins;
absl::flat_hash_set<string> controlling_fanins;
const int num_fanins = node.input_size();
for (int i = 0; i < num_fanins; ++i) {
const string& input = node.input(i);
TensorId fanin_id = ParseTensorName(input);
if (fanin_id.node() == node_name) {
*status =
errors::InvalidArgument(kMutationAddNodeError, "node '", node_name,
"' has self cycle fanin '", input, "'.");
return MutationNewNode(this, mutation_counter_, internal::kMissingIndex);
}
bool is_control = IsTensorIdControl(fanin_id);
if (is_control) {
has_observed_control = true;
controlling_fanins.emplace(fanin_id.node());
} else if (has_observed_control) {
*status = errors::InvalidArgument(kMutationAddNodeError, "node '",
node_name, "' has regular fanin '",
input, "' after controlling fanins.");
return MutationNewNode(this, mutation_counter_, internal::kMissingIndex);
} else {
regular_fanins.emplace_back(fanin_id);
}
}
node.mutable_input()->Clear();
new_nodes_.emplace_back(graph_view_, std::move(node));
MutationNewNodeHolder& mutation_node = new_nodes_.back();
mutation_node.regular_fanins = std::move(regular_fanins);
mutation_node.num_regular_fanins = mutation_node.regular_fanins.size();
mutation_node.controlling_fanins = std::move(controlling_fanins);
*status = absl::OkStatus();
return MutationNewNode(this, mutation_counter_, new_nodes_.size() - 1);
}
void Mutation::AddMutation(
MutableNodeView* node,
std::function<bool(MutableNodeViewDiff*)> mutate_fn) {
DCHECK(node->graph_view_ == graph_view_);
if (node->update_index_ == internal::kMissingIndex) {
MutableNodeViewDiff diff(graph_view_, node->node_index_);
if (!mutate_fn(&diff)) return;
node->update_index_ = updated_nodes_.size();
updated_nodes_.push_back(std::move(diff));
} else if (!removed_nodes_.contains(node->node_index_)) {
MutableNodeViewDiff& diff = updated_nodes_[node->update_index_];
mutate_fn(&diff);
}
}
void Mutation::RemoveNode(MutableNodeView* node) {
auto& update_index = node->update_index_;
if (update_index != internal::kMissingIndex) {
int updated_nodes_size = updated_nodes_.size();
if (update_index < updated_nodes_size - 1) {
graph_view_->nodes_[updated_nodes_.back().node_index].update_index_ =
update_index;
std::swap(updated_nodes_[update_index], updated_nodes_.back());
}
updated_nodes_.pop_back();
update_index = internal::kMissingIndex;
}
removed_nodes_.insert(node->node_index_);
}
void Mutation::UpdateNodeName(MutableNodeView* node, absl::string_view name) {
AddMutation(node, [name](MutableNodeViewDiff* diff) {
return internal::UpdateName(diff, name);
});
}
void Mutation::UpdateNodeName(const MutationNewNode& node,
absl::string_view name) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::UpdateName(&new_nodes_[node.index_], name);
}
void Mutation::UpdateNodeOp(MutableNodeView* node, absl::string_view op) {
AddMutation(node, [op](MutableNodeViewDiff* diff) {
return internal::UpdateOp(diff, op);
});
}
void Mutation::UpdateNodeOp(const MutationNewNode& node, absl::string_view op) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::UpdateOp(&new_nodes_[node.index_], op);
}
void Mutation::UpdateNodeDevice(MutableNodeView* node,
absl::string_view device) {
AddMutation(node, [device](MutableNodeViewDiff* diff) {
return internal::UpdateDevice(diff, device);
});
}
void Mutation::UpdateNodeDevice(const MutationNewNode& node,
absl::string_view device) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::UpdateDevice(&new_nodes_[node.index_], device);
}
void Mutation::AddOrUpdateRegularFanin(MutableNodeView* node, int index,
const TensorId& fanin) {
AddMutation(node, [index, fanin](MutableNodeViewDiff* diff) {
return internal::AddOrUpdateRegularFanin(diff, index, fanin);
});
}
void Mutation::AddOrUpdateRegularFanin(const MutationNewNode& node, int index,
const TensorId& fanin) {
DCHECK(node.mutation_ == this &&
node.mutation_counter_ == mutation_counter_ && index >= 0 &&
IsTensorIdRegular(fanin));
internal::AddOrUpdateRegularFanin(&new_nodes_[node.index_], index, fanin);
}
void Mutation::RemoveRegularFanin(MutableNodeView* node, int index) {
AddMutation(node, [index](MutableNodeViewDiff* diff) {
return internal::RemoveRegularFanin(diff, index);
});
}
void Mutation::RemoveRegularFanin(const MutationNewNode& node, int index) {
DCHECK(node.mutation_ == this &&
node.mutation_counter_ == mutation_counter_ && index >= 0);
internal::RemoveRegularFanin(&new_nodes_[node.index_], index);
}
void Mutation::AddControllingFanin(MutableNodeView* node,
absl::string_view fanin_node_name) {
AddMutation(node, [node, fanin_node_name](MutableNodeViewDiff* diff) {
auto it = node->controlling_fanins_index_.find(fanin_node_name);
const int control_index = it != node->controlling_fanins_index_.end()
? it->second
: internal::kMissingIndex;
return internal::AddControllingFanin(diff, control_index, fanin_node_name);
});
}
void Mutation::AddControllingFanin(const MutationNewNode& node,
absl::string_view fanin_node_name) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::AddControllingFanin(&new_nodes_[node.index_], fanin_node_name);
}
void Mutation::RemoveControllingFanin(MutableNodeView* node,
absl::string_view fanin_node_name) {
AddMutation(node, [node, fanin_node_name](MutableNodeViewDiff* diff) {
auto it = node->controlling_fanins_index_.find(fanin_node_name);
const int control_index = it != node->controlling_fanins_index_.end()
? it->second
: internal::kMissingIndex;
return internal::RemoveControllingFanin(diff, control_index,
fanin_node_name);
});
}
void Mutation::RemoveControllingFanin(const MutationNewNode& node,
absl::string_view fanin_node_name) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::RemoveControllingFanin(&new_nodes_[node.index_], fanin_node_name);
}
void Mutation::AddOrUpdateNodeAttr(MutableNodeView* node,
absl::string_view attr_name,
const AttrValue& attr_value) {
AddMutation(node, [attr_name, attr_value](MutableNodeViewDiff* diff) {
return internal::AddOrUpdateAttribute(diff, attr_name, attr_value);
});
}
void Mutation::AddOrUpdateNodeAttr(const MutationNewNode& node,
absl::string_view attr_name,
const AttrValue& attr_value) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::AddOrUpdateAttribute(&new_nodes_[node.index_], attr_name,
attr_value);
}
void Mutation::RemoveNodeAttr(MutableNodeView* node,
absl::string_view attr_name) {
AddMutation(node, [attr_name](MutableNodeViewDiff* diff) {
return internal::RemoveAttribute(diff, attr_name);
});
}
void Mutation::RemoveNodeAttr(const MutationNewNode& node,
absl::string_view attr_name) {
DCHECK(node.mutation_ == this && node.mutation_counter_ == mutation_counter_);
internal::RemoveAttribute(&new_nodes_[node.index_], attr_name);
}
void Mutation::ResetInternal() {
updated_nodes_.clear();
removed_nodes_.clear();
new_nodes_.clear();
}
void Mutation::Reset() {
for (const auto& update : updated_nodes_) {
graph_view_->nodes_[update.node_index].update_index_ =
internal::kMissingIndex;
}
ResetInternal();
}
Status Mutation::Apply() { return graph_view_->ApplyMutationInternal(); }
namespace {
const char kMutableGraphViewError[] =
"MutableGraphView::MutableGraphView error: ";
const char kMutableGraphViewApplyError[] = "Mutation::Apply error: ";
inline void IncrementFaninCount(
absl::flat_hash_map<internal::NodeDefAndPortIndex, int>* fanins_count,
const internal::NodeDefAndPortIndex& fanin) {
++(*fanins_count)[fanin];
}
inline void DecrementFaninCount(
absl::flat_hash_map<internal::NodeDefAndPortIndex, int>* fanins_count,
const internal::NodeDefAndPortIndex& fanin) {
auto it = fanins_count->find(fanin);
if (it != fanins_count->end()) {
if (it->second <= 1) {
fanins_count->erase(it);
} else {
--it->second;
}
}
}
}
MutableGraphView::MutableGraphView(GraphDef* graph, Status* status)
: GraphViewInternal(graph), mutation_(Mutation(this)) {
const int num_nodes = graph->node_size();
node_index_by_name_.reserve(num_nodes);
nodes_.reserve(num_nodes);
for (NodeDef& node : *graph->mutable_node()) {
if (!AddUniqueNodeInternal(&node)) {
*status = errors::InvalidArgument(
kMutableGraphViewError, "graph has multiple nodes with the name '",
node.name(), "'.");
Reset();
return;
}
}
std::vector<std::vector<TensorId>> fanins;
Status s = CheckFaninsInternal(&fanins);
if (!s.ok()) {
*status = s;
Reset();
return;
}
AddFaninsInternal(&fanins);
mutation_.ResetInternal();
*status = absl::OkStatus();
}
Mutation* MutableGraphView::GetMutationBuilder() { return &mutation_; }
bool MutableGraphView::AddUniqueNodeInternal(NodeDef* node) {
const int node_index = node_index_by_name_.size();
auto it = node_index_by_name_.emplace(node->name(), node_index);
if (it.second) {
nodes_.emplace_back(this, node_index);
return true;
}
return false;
}
Status MutableGraphView::CheckFaninsInternal(
std::vector<std::vector<TensorId>>* fanins) {
const int num_nodes = nodes_.size();
fanins->reserve(num_nodes);
for (int i = 0; i < num_nodes; ++i) {
bool has_observed_control = false;
const NodeDef* node = nodes_[i].node();
const string& node_name = node->name();
std::vector<TensorId> node_fanins;
node_fanins.reserve(node->input_size());
for (const string& input : node->input()) {
TensorId fanin_id = ParseTensorName(input);
if (fanin_id.node() == node_name) {
return errors::InvalidArgument(kMutableGraphViewError, "node '",
node_name, "' has self cycle fanin '",
input, "'.");
}
bool is_control = IsTensorIdControl(fanin_id);
if (!is_control && has_observed_control) {
return errors::InvalidArgument(kMutableGraphViewError, "node '",
node_name, "' has regular fanin '",
input, "' after controlling fanins.");
}
if (!node_index_by_name_.contains(fanin_id.node())) {
return errors::InvalidArgument(kMutableGraphViewError, "node '",
node_name, "' has missing fanin '",
input, "'.");
}
if (is_control) {
has_observed_control = true;
}
node_fanins.push_back(std::move(fanin_id));
}
fanins->push_back(std::move(node_fanins));
}
return absl::OkStatus();
}
void MutableGraphView::AddFaninsInternal(
std::vector<std::vector<TensorId>>* fanins) {
const int num_nodes = nodes_.size();
for (int i = 0; i < num_nodes; ++i) {
MutableNodeView& node_view = nodes_[i];
NodeDef* node = node_view.node();
std::vector<TensorId>& node_fanins = fanins->at(i);
absl::flat_hash_set<absl::string_view> observed_controls;
int pos = 0;
const int last_idx = node_fanins.size() - 1;
int last_pos = last_idx;
node_view.fanins_count_.reserve(node->input_size());
node_view.controlling_fanins_index_.reserve(node->input_size());
while (pos <= last_pos) {
const TensorId& fanin_id = node_fanins[pos];
bool is_control = IsTensorIdControl(fanin_id);
const int fanin_node_index = node_index_by_name_[fanin_id.node()];
MutableNodeView& fanin_node_view = nodes_[fanin_node_index];
if (is_control) {
if (gtl::InsertIfNotPresent(&observed_controls, fanin_id.node())) {
fanin_node_view.controlled_fanouts_.emplace_back(
this, i, Graph::kControlSlot,
node_view.controlling_fanins_.size());
node_view.controlling_fanins_.emplace_back(
this, fanin_node_index, Graph::kControlSlot,
fanin_node_view.controlled_fanouts_.size() - 1);
IncrementFaninCount(
&node_view.fanins_count_,
{&graph_->node(fanin_node_index), Graph::kControlSlot});
node_view.controlling_fanins_index_.emplace(
fanin_id.node(), pos - node_view.NumRegularFanins());
++pos;
} else {
node->mutable_input()->SwapElements(pos, last_pos);
std::swap(node_fanins[pos], node_fanins[last_pos]);
--last_pos;
}
} else {
int fanin_node_view_regular_fanouts_by_port_size =
fanin_node_view.regular_fanouts_by_port_.size();
if (fanin_node_view_regular_fanouts_by_port_size <
fanin_id.index() + 1) {
fanin_node_view.regular_fanouts_by_port_.resize(fanin_id.index() + 1);
}
auto& fanin_regular_fanouts =
fanin_node_view.regular_fanouts_by_port_[fanin_id.index()];
fanin_regular_fanouts.emplace_back(this, i,
node_view.regular_fanins_.size(),
node_view.regular_fanins_.size());
++fanin_node_view.num_regular_fanouts_;
node_view.regular_fanins_.emplace_back(
this, fanin_node_index, fanin_id.index(),
fanin_regular_fanouts.size() - 1);
IncrementFaninCount(
&node_view.fanins_count_,
{&graph_->node(fanin_node_index), fanin_id.index()});
++pos;
}
}
if (last_pos < last_idx) {
node->mutable_input()->DeleteSubrange(last_pos + 1, last_idx - last_pos);
}
}
}
Status MutableGraphView::GetNodeNamesAndPartitionUpdatedNodes(
absl::flat_hash_map<absl::string_view, int>* node_names,
std::vector<RenamedOrOverwrittenNode>* renamed_nodes,
std::vector<int>* inplace_nodes,
std::vector<int>* empty_diff_node_indices) {
for (const auto& diff : mutation_.updated_nodes_) {
if (diff.update_name) {
const int index = diff.node_index;
const string& node_name = nodes_[index].GetName();
node_names->emplace(node_name, index);
}
}
for (int node_index : mutation_.removed_nodes_) {
const string& node_name = nodes_[node_index].GetName();
node_names->emplace(node_name, node_index);
}
auto name_conflict = [](const absl::string_view node_name) {
return errors::InvalidArgument(kMutableGraphViewApplyError,
"multiple nodes with the name: '", node_name,
"' exists in Mutation.");
};
const int num_updated_nodes = mutation_.updated_nodes_.size();
renamed_nodes->reserve(num_updated_nodes);
inplace_nodes->reserve(num_updated_nodes);
empty_diff_node_indices->reserve(num_updated_nodes);
for (int i = 0; i < num_updated_nodes; ++i) {
auto& diff = mutation_.updated_nodes_[i];
if (internal::IsEmpty(&diff)) {
empty_diff_node_indices->emplace_back(diff.node_index);
continue;
}
const string& node_name =
diff.update_name ? diff.name : nodes_[diff.node_index].GetName();
auto it = node_names->insert({node_name, internal::kNodeNamePresent});
if (!it.second) {
if (it.first->second == internal::kNodeNamePresent) {
return name_conflict(node_name);
} else {
it.first->second = internal::kNodeNamePresent;
}
}
if (diff.update_name) {
auto node_name_it = node_index_by_name_.find(node_name);
const int overwritten_node_index =
node_name_it != node_index_by_name_.end() ? node_name_it->second
: internal::kMissingIndex;
renamed_nodes->emplace_back(i, overwritten_node_index);
} else {
inplace_nodes->push_back(i);
}
}
for (const auto& new_node : mutation_.new_nodes_) {
const string& node_name = new_node.node.name();
auto it = node_names->insert({node_name, internal::kNodeNamePresent});
if (it.second) {
continue;
}
if (it.first->second == internal::kNodeNamePresent) {
return name_conflict(node_name);
} else {
it.first->second = internal::kNodeNamePresent;
}
}
return absl::OkStatus();
}
Status MutableGraphView::RemovedOrMissingNodeFanoutsWellFormed(
const absl::flat_hash_map<absl::string_view, int>& node_names,
const std::vector<RenamedOrOverwrittenNode>& renamed_nodes) {
auto bad_fanout = [](absl::string_view fanout_node_name,
absl::string_view node_name) {
return errors::InvalidArgument(
kMutableGraphViewApplyError, "fanout '", fanout_node_name,
"' exist for missing node '", node_name, "'.");
};
std::vector<bool> overwritten_nodes(NumNodes());
for (auto& renamed_node : renamed_nodes) {
if (renamed_node.overwritten_node_index_ == internal::kMissingIndex) {
continue;
}
overwritten_nodes[renamed_node.overwritten_node_index_] = true;
}
for (const auto& node_name_state : node_names) {
if (node_name_state.second == internal::kNodeNamePresent) {
continue;
}
const MutableNodeView& node_view = nodes_[node_name_state.second];
for (const auto& regular_fanouts : node_view.GetRegularFanouts()) {
for (const auto& regular_fanout : regular_fanouts) {
MutableNodeView* fanout_view = regular_fanout.node_view();
if (fanout_view->update_index_ == internal::kMissingIndex) {
if (mutation_.removed_nodes_.contains(fanout_view->node_index_)) {
continue;
} else if (!overwritten_nodes[fanout_view->node_index_]) {
return bad_fanout(fanout_view->GetName(), node_name_state.first);
}
} else {
auto& diff = mutation_.updated_nodes_[fanout_view->update_index_];
const int last_index = fanout_view->NumRegularFanins() -
diff.num_regular_inputs_to_remove - 1;
if (regular_fanout.index() > last_index) {
continue;
}
if (diff.regular_inputs_to_update.find(regular_fanout.index()) ==
diff.regular_inputs_to_update.end()) {
return bad_fanout(fanout_view->GetName(), node_name_state.first);
}
}
}
}
for (const auto& controlled_fanout : node_view.GetControlledFanouts()) {
MutableNodeView* fanout_view = controlled_fanout.node_view();
if (fanout_view->update_index_ == internal::kMissingIndex) {
if (mutation_.removed_nodes_.contains(fanout_view->node_index_)) {
continue;
} else if (!overwritten_nodes[fanout_view->node_index_]) {
return bad_fanout(fanout_view->GetName(), node_name_state.first);
}
} else {
auto& diff = mutation_.updated_nodes_[fanout_view->update_index_];
if (diff.controlling_inputs_to_remove.find(
controlled_fanout.fanin_index_) ==
diff.controlling_inputs_to_remove.end()) {
return bad_fanout(fanout_view->GetName(), node_name_state.first);
}
}
}
}
return absl::OkStatus();
}
Status MutableGraphView::CheckNodeNamesAndFanins(
const absl::flat_hash_map<absl::string_view, int>& node_names,
const std::vector<RenamedOrOverwrittenNode>& renamed_nodes,
const std::vector<int>& inplace_nodes) {
TF_RETURN_IF_ERROR(
RemovedOrMissingNodeFanoutsWellFormed(node_names, renamed_nodes));
for (auto& inplace_node : inplace_nodes) {
auto& diff = mutation_.updated_nodes_[inplace_node];
if (!internal::IsWellFormed(&diff, node_names)) {
return errors::InvalidArgument(
kMutableGraphViewApplyError, "inplace updated node '",
nodes_[diff.node_index].GetName(), "' is ill-formed.");
}
}
for (auto& renamed_node : renamed_nodes) {
auto& diff = mutation_.updated_nodes_[renamed_node.renamed_update_index_];
if (!internal::IsWellFormed(&diff, node_names)) {
return errors::InvalidArgument(
kMutableGraphViewApplyError, "renamed updated node '", diff.name,
"' ('", nodes_[diff.node_index].GetName(), "') is ill-formed.");
}
}
for (auto& new_node : mutation_.new_nodes_) {
if (!internal::IsWellFormed(&new_node, node_names)) {
return errors::InvalidArgument(kMutableGraphViewApplyError, "new node '",
new_node.node.name(), "' is ill-formed.");
}
}
return absl::OkStatus();
}
Status MutableGraphView::CheckKernelRegisteredForNodes() {
Status s;
for (auto& diff : mutation_.updated_nodes_) {
if (internal::IsEmpty(&diff)) {
continue;
}
NodeDef* node = nodes_[diff.node_index].node();
diff.processed_attrs =
AttrValueMap(node->attr().begin(), node->attr().end());
for (const auto& attr_to_remove : diff.attrs_to_remove) {
(*diff.processed_attrs).erase(attr_to_remove);
}
for (const auto& attr_to_add : diff.attrs_to_add) {
gtl::InsertOrUpdate(&(*diff.processed_attrs), attr_to_add.first,
attr_to_add.second);
}
const string& device = diff.update_device ? diff.device : node->device();
DeviceNameUtils::ParsedName name;
if (device.empty() || !DeviceNameUtils::ParseFullName(device, &name) ||
!name.has_type) {
continue;
}
s = IsKernelRegisteredForNode(diff.update_name ? diff.name : node->name(),
node->has_experimental_debug_info(),
node->experimental_debug_info(),
diff.update_op ? diff.op : node->op(), device,
AttrSlice(&(*diff.processed_attrs)));
if (!s.ok()) {
LOG(WARNING) << s.message();
}
}
for (const auto& new_node_holder : mutation_.new_nodes_) {
const auto& new_node_def = new_node_holder.node;
DeviceNameUtils::ParsedName name;
if (new_node_def.device().empty() ||
!DeviceNameUtils::ParseFullName(new_node_def.device(), &name) ||
!name.has_type) {
continue;
}
s = IsKernelRegisteredForNode(new_node_def);
if (!s.ok()) {
LOG(WARNING) << s.message();
}
}
return absl::OkStatus();
}
template <typename T>
void MutableGraphView::ReplaceNodeFanouts(MutableNodeView* node, T* fanouts) {
node->num_regular_fanouts_ = fanouts->num_regular_fanouts_;
node->regular_fanouts_by_port_ = std::move(fanouts->regular_fanouts_by_port_);
for (int i = 0, i_max = node->regular_fanouts_by_port_.size(); i < i_max;
++i) {
for (int j = 0, j_max = node->regular_fanouts_by_port_[i].size(); j < j_max;
++j) {
auto& fanout = node->regular_fanouts_by_port_[i][j];
auto* fanout_node_view = fanout.node_view();
auto& fanout_fanin = fanout_node_view->regular_fanins_[fanout.index()];
auto* fanout_fanins_count = &fanout_node_view->fanins_count_;
DecrementFaninCount(
fanout_fanins_count,
{&graph_->node(fanout_fanin.node_index_), fanout_fanin.index()});
fanout_fanin.node_index_ = node->node_index_;
IncrementFaninCount(
fanout_fanins_count,
{&graph_->node(node->node_index_), fanout_fanin.index()});
}
}
node->controlled_fanouts_ = std::move(fanouts->controlled_fanouts_);
for (int i = 0, i_max = node->controlled_fanouts_.size(); i < i_max; ++i) {
auto& fanout = node->controlled_fanouts_[i];
auto* fanout_node_view = fanout.node_view();
auto& fanout_fanin =
fanout_node_view->controlling_fanins_[fanout.fanin_index_];
auto* fanout_fanins_count = &fanout_node_view->fanins_count_;
DecrementFaninCount(
fanout_fanins_count,
{&graph_->node(fanout_fanin.node_index_), Graph::kControlSlot});
fanout_fanin.node_index_ = node->node_index_;
fanout_fanin.fanout_index_ = i;
IncrementFaninCount(fanout_fanins_count, {&graph_->node(node->node_index_),
Graph::kControlSlot});
}
}
void MutableGraphView::FixRenamedNodes(
std::vector<RenamedOrOverwrittenNode>* renamed_nodes,
absl::flat_hash_map<string, NodeViewFanouts>* renamed_fanouts,
std::vector<bool>* overwritten_name_removed_nodes) {
renamed_fanouts->reserve(renamed_nodes->size());
for (auto& renamed : *renamed_nodes) {
auto& diff = mutation_.updated_nodes_[renamed.renamed_update_index_];
node_index_by_name_.erase(nodes_[diff.node_index].GetName());
MutableNodeView& renamed_node = nodes_[diff.node_index];
renamed_fanouts->try_emplace(
renamed_node.GetName(),
std::move(renamed_node.regular_fanouts_by_port_),
renamed_node.num_regular_fanouts_,
std::move(renamed_node.controlled_fanouts_));
}
for (auto& renamed : *renamed_nodes) {
auto& diff = mutation_.updated_nodes_[renamed.renamed_update_index_];
MutableNodeView& renamed_node = nodes_[diff.node_index];
auto fanouts_it = renamed_fanouts->find(diff.name);
if (fanouts_it != renamed_fanouts->end()) {
auto& fanouts = fanouts_it->second;
ReplaceNodeFanouts(&renamed_node, &fanouts);
renamed_fanouts->erase(fanouts_it);
renamed.overwritten_node_index_ = internal::kMissingIndex;
} else if (renamed.overwritten_node_index_ != internal::kMissingIndex) {
MutableNodeView& node_to_overwrite =
nodes_[renamed.overwritten_node_index_];
ReplaceNodeFanouts(&renamed_node, &node_to_overwrite);
node_index_by_name_.erase(node_to_overwrite.GetName());
if (mutation_.removed_nodes_.contains(node_to_overwrite.node_index_)) {
(*overwritten_name_removed_nodes)[node_to_overwrite.node_index_] = true;
}
} else {
renamed_node.num_regular_fanouts_ = 0;
}
renamed_node.node()->set_name(diff.name);
diff.update_name = false;
diff.name.clear();
node_index_by_name_.emplace(renamed_node.GetName(), diff.node_index);
}
}
void MutableGraphView::AddNewNodes(
absl::flat_hash_map<string, NodeViewFanouts>* renamed_fanouts,
std::vector<int>* new_node_indices) {
new_node_indices->reserve(mutation_.new_nodes_.size());
for (auto& new_node : mutation_.new_nodes_) {
int node_index;
auto graph_it = node_index_by_name_.find(new_node.node.name());
if (graph_it != node_index_by_name_.end()) {
node_index = graph_it->second;
MutableNodeView& node_view = nodes_[node_index];
RemoveAllFaninFanoutInternal(&node_view);
auto* node_def = graph_->mutable_node(node_index);
node_def->mutable_op()->swap(*new_node.node.mutable_op());
node_def->mutable_device()->swap(*new_node.node.mutable_device());
node_def->mutable_input()->Clear();
node_def->mutable_attr()->swap(*new_node.node.mutable_attr());
mutation_.removed_nodes_.erase(node_index);
} else {
auto* new_node_def = graph_->add_node();
*new_node_def = std::move(new_node.node);
node_index = nodes_.size();
nodes_.emplace_back(this, node_index);
MutableNodeView& new_node_view = nodes_.back();
auto it = renamed_fanouts->find(new_node_view.GetName());
if (it != renamed_fanouts->end()) {
NodeViewFanouts& fanouts = it->second;
ReplaceNodeFanouts(&new_node_view, &fanouts);
renamed_fanouts->erase(it);
}
node_index_by_name_.emplace(new_node_view.GetName(), node_index);
}
new_node_indices->emplace_back(node_index);
}
}
void MutableGraphView::FixRenamedFanouts(
const absl::flat_hash_map<string, NodeViewFanouts>& renamed_fanouts) {
for (auto& renamed_fanout : renamed_fanouts) {
for (auto& regular_fanouts :
renamed_fanout.second.regular_fanouts_by_port_) {
for (auto& fanout : regular_fanouts) {
auto* fanout_node_view = fanout.node_view();
auto& fanin = fanout_node_view->regular_fanins_[fanout.index()];
fanout_node_view->fanins_count_.erase(
{fanin.node_view()->node(), fanin.index()});
fanin.fanout_index_ = internal::kMissingIndex;
}
}
for (auto& fanout : renamed_fanout.second.controlled_fanouts_) {
auto* fanout_node_view = fanout.node_view();
auto& fanin = fanout_node_view->controlling_fanins_[fanout.fanin_index_];
fanout_node_view->fanins_count_.erase(
{fanin.node_view()->node(), Graph::kControlSlot});
fanout_node_view->controlling_fanins_index_.erase(renamed_fanout.first);
fanin.fanout_index_ = internal::kMissingIndex;
}
}
}
inline void MutableGraphView::RemoveRegularFaninFanoutInternal(
MutableNodeView* node_view, int i) {
MutableFanoutView& fanin = node_view->regular_fanins_[i];
if (fanin.fanout_index_ == internal::kMissingIndex) {
return;
}
DecrementFaninCount(&node_view->fanins_count_,
{&graph_->node(fanin.node_index_), fanin.index()});
auto* fanin_node_view = fanin.node_view();
auto& fanouts = fanin_node_view->regular_fanouts_by_port_[fanin.index()];
int fanouts_size = fanouts.size();
if (fanin.fanout_index_ < fanouts_size - 1) {
MutableFaninView& last_fanout = fanouts.back();
last_fanout.node_view()
->regular_fanins_[last_fanout.index()]
.fanout_index_ = fanin.fanout_index_;
std::swap(last_fanout, fanouts[fanin.fanout_index_]);
}
fanouts.pop_back();
--fanin.node_view()->num_regular_fanouts_;
int last_fanout_index = fanin_node_view->regular_fanouts_by_port_.size();
for (int i = fanin_node_view->regular_fanouts_by_port_.size() - 1; i >= 0;
--i) {
if (fanin_node_view->regular_fanouts_by_port_[i].empty()) {
last_fanout_index = i;
} else {
break;
}
}
int fanin_node_view_regular_fanouts_by_port_size =
fanin_node_view->regular_fanouts_by_port_.size();
if (last_fanout_index < fanin_node_view_regular_fanouts_by_port_size) {
fanin_node_view->regular_fanouts_by_port_.resize(last_fanout_index);
}
}
inline void MutableGraphView::AddRegularFaninInternal(
MutableNodeView* node_view, const SafeTensorId& fanin_id) {
MutableNodeView* fanin_node_view = GetNode(fanin_id.node());
int fanin_node_view_regular_fanouts_by_port_size =
fanin_node_view->regular_fanouts_by_port_.size();
if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) {
fanin_node_view->regular_fanouts_by_port_.resize(fanin_id.index() + 1);
}
auto& fanouts = fanin_node_view->regular_fanouts_by_port_[fanin_id.index()];
fanouts.emplace_back(this, node_view->node_index(),
node_view->regular_fanins_.size(),
node_view->regular_fanins_.size());
++fanin_node_view->num_regular_fanouts_;
node_view->regular_fanins_.emplace_back(this, fanin_node_view->node_index(),
fanin_id.index(), fanouts.size() - 1);
IncrementFaninCount(
&node_view->fanins_count_,
{&graph_->node(fanin_node_view->node_index()), fanin_id.index()});
}
inline void MutableGraphView::UpdateRegularFaninInternal(
MutableNodeView* node_view, const int i, const SafeTensorId& fanin_id) {
RemoveRegularFaninFanoutInternal(node_view, i);
MutableNodeView* fanin_node_view = GetNode(fanin_id.node());
int fanin_node_view_regular_fanouts_by_port_size =
fanin_node_view->regular_fanouts_by_port_.size();
if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) {
fanin_node_view->regular_fanouts_by_port_.resize(fanin_id.index() + 1);
}
auto& fanouts = fanin_node_view->regular_fanouts_by_port_[fanin_id.index()];
fanouts.emplace_back(this, node_view->node_index(), i, i);
++fanin_node_view->num_regular_fanouts_;
node_view->regular_fanins_[i] =
MutableFanoutView(this, fanin_node_view->node_index(), fanin_id.index(),
fanouts.size() - 1);
IncrementFaninCount(
&node_view->fanins_count_,
{&graph_->node(fanin_node_view->node_index()), fanin_id.index()});
}
inline void MutableGraphView::RemoveControllingFaninFanoutInternal(
MutableNodeView* node_view, int i) {
auto& control_to_remove = node_view->controlling_fanins_[i];
if (control_to_remove.fanout_index_ != internal::kMissingIndex) {
node_view->fanins_count_.erase(
{control_to_remove.node_view()->node(), Graph::kControlSlot});
node_view->controlling_fanins_index_.erase(
control_to_remove.node_view()->GetName());
auto* control_to_remove_view = control_to_remove.node_view();
int control_to_remove_view_controlled_fanouts_size =
control_to_remove_view->controlled_fanouts_.size();
if (control_to_remove.fanout_index_ <
control_to_remove_view_controlled_fanouts_size - 1) {
auto& control_to_remove_view_last_control =
control_to_remove_view->controlled_fanouts_.back();
control_to_remove_view_last_control.node_view()
->controlling_fanins_[control_to_remove_view_last_control
.fanin_index_]
.fanout_index_ = control_to_remove.fanout_index_;
std::swap(control_to_remove_view_last_control,
control_to_remove_view
->controlled_fanouts_[control_to_remove.fanout_index_]);
}
control_to_remove_view->controlled_fanouts_.pop_back();
}
}
inline void MutableGraphView::RemoveControllingFaninInternal(
MutableNodeView* node_view, const std::set<int>& indices_to_remove) {
const int num_regular_fanins = node_view->NumRegularFanins();
auto* mutable_input = node_view->node()->mutable_input();
for (auto rit = indices_to_remove.rbegin(); rit != indices_to_remove.rend();
++rit) {
const int control_index = *rit;
RemoveControllingFaninFanoutInternal(node_view, control_index);
int node_view_controlling_fanins_size =
node_view->controlling_fanins_.size();
if (control_index < node_view_controlling_fanins_size - 1) {
auto& last_control = node_view->controlling_fanins_.back();
auto* last_control_view = last_control.node_view();
last_control_view->controlled_fanouts_[last_control.fanout_index_]
.fanin_index_ = control_index;
node_view->controlling_fanins_index_.find(last_control_view->GetName())
->second = control_index;
mutable_input->SwapElements(
num_regular_fanins + control_index,
num_regular_fanins + node_view->NumControllingFanins() - 1);
std::swap(last_control, node_view->controlling_fanins_[control_index]);
}
mutable_input->RemoveLast();
node_view->controlling_fanins_.pop_back();
}
}
inline void MutableGraphView::AddControllingFaninInternal(
MutableNodeView* node_view, absl::string_view fanin_node_name) {
NodeDef* node = node_view->node();
node->add_input(AsControlDependency(string(fanin_node_name)));
MutableNodeView* fanin_node_view = GetNode(fanin_node_name);
const int index = node_view->controlling_fanins_.size();
fanin_node_view->controlled_fanouts_.emplace_back(
this, node_view->node_index(), Graph::kControlSlot, index);
node_view->controlling_fanins_.emplace_back(
this, fanin_node_view->node_index(), Graph::kControlSlot,
fanin_node_view->controlled_fanouts_.size() - 1);
IncrementFaninCount(
&node_view->fanins_count_,
{&graph_->node(fanin_node_view->node_index()), Graph::kControlSlot});
TensorId tensor_id = ParseTensorName(node->input(node->input_size() - 1));
node_view->controlling_fanins_index_.emplace(tensor_id.node(), index);
}
void MutableGraphView::ApplyNodeUpdates() {
for (auto& diff : mutation_.updated_nodes_) {
if (internal::IsEmpty(&diff)) {
continue;
}
MutableNodeView& node_view = nodes_[diff.node_index];
diff.node_index = internal::kMissingIndex;
node_view.update_index_ = internal::kMissingIndex;
NodeDef* node_def = node_view.node();
if (diff.update_op) {
node_def->set_op(diff.op);
}
if (diff.update_device) {
node_def->set_device(diff.device);
}
node_def->mutable_attr()->swap((*diff.processed_attrs));
if (diff.num_regular_inputs_to_remove > 0) {
const int first_index =
node_view.NumRegularFanins() - diff.num_regular_inputs_to_remove;
for (int i = first_index; i < node_view.NumRegularFanins(); ++i) {
RemoveRegularFaninFanoutInternal(&node_view, i);
}
node_view.regular_fanins_.resize(first_index);
node_def->mutable_input()->DeleteSubrange(
node_view.NumRegularFanins(), diff.num_regular_inputs_to_remove);
} else if (diff.num_regular_inputs_to_add > 0) {
node_def->mutable_input()->Reserve(node_def->mutable_input()->size() +
diff.num_regular_inputs_to_add);
int curr_index = node_view.NumRegularFanins();
int curr_control_start = curr_index;
for (const SafeTensorId& fanin : diff.regular_inputs_to_add) {
AddRegularFaninInternal(&node_view, fanin);
node_def->add_input(SafeTensorIdToString(fanin));
node_def->mutable_input()->SwapElements(curr_index,
node_def->input_size() - 1);
if (curr_control_start == curr_index) {
curr_control_start = node_def->input_size() - 1;
}
++curr_index;
}
if (node_view.NumControllingFanins() > 1 &&
curr_control_start != node_view.NumRegularFanins()) {
std::rotate(
node_def->mutable_input()->begin() + node_view.NumRegularFanins(),
node_def->mutable_input()->begin() + curr_control_start,
node_def->mutable_input()->end());
}
}
for (const auto& update_fanin : diff.regular_inputs_to_update) {
UpdateRegularFaninInternal(&node_view, update_fanin.first,
update_fanin.second);
node_def->set_input(update_fanin.first,
SafeTensorIdToString(update_fanin.second));
}
RemoveControllingFaninInternal(&node_view,
diff.controlling_inputs_to_remove);
node_def->mutable_input()->Reserve(node_def->mutable_input()->size() +
diff.controlling_inputs_to_add.size());
for (const auto& control_to_add : diff.controlling_inputs_to_add) {
AddControllingFaninInternal(&node_view, control_to_add);
}
}
}
void MutableGraphView::SetNewNodesFanins(
const std::vector<int>& new_node_indices) {
auto new_node = mutation_.new_nodes_.begin();
for (const int new_node_index : new_node_indices) {
MutableNodeView& new_node_view = nodes_[new_node_index];
NodeDef* new_node_def = new_node_view.node();
new_node_def->mutable_input()->Reserve(new_node->num_regular_fanins +
new_node->controlling_fanins.size());
for (const SafeTensorId& fanin : new_node->regular_fanins) {
AddRegularFaninInternal(&new_node_view, fanin);
new_node_def->add_input(SafeTensorIdToString(fanin));
}
for (const string& control_to_add : new_node->controlling_fanins) {
AddControllingFaninInternal(&new_node_view, control_to_add);
}
++new_node;
}
}
inline void MutableGraphView::RemoveAllFaninFanoutInternal(
MutableNodeView* node_view) {
const int num_regular_fanins = node_view->NumRegularFanins();
for (int i = 0; i < num_regular_fanins; ++i) {
RemoveRegularFaninFanoutInternal(node_view, i);
}
std::vector<MutableFanoutView>().swap(node_view->regular_fanins_);
const int num_controlling_fanins = node_view->NumControllingFanins();
for (int i = 0; i < num_controlling_fanins; ++i) {
RemoveControllingFaninFanoutInternal(node_view, i);
}
std::vector<MutableFanoutView>().swap(node_view->controlling_fanins_);
}
void MutableGraphView::RemoveNodesInternal(
const std::vector<RenamedOrOverwrittenNode>& renamed_nodes,
const std::vector<bool>& overwritten_name_removed_nodes) {
std::vector<int> overwritten_nodes;
overwritten_nodes.reserve(renamed_nodes.size());
for (const auto& renamed : renamed_nodes) {
if (renamed.overwritten_node_index_ != internal::kMissingIndex) {
auto& node = nodes_[renamed.overwritten_node_index_];
RemoveAllFaninFanoutInternal(&node);
overwritten_nodes.emplace_back(renamed.overwritten_node_index_);
}
}
std::vector<int> node_indices_to_remove;
node_indices_to_remove.reserve(mutation_.updated_nodes_.size() +
overwritten_nodes.size());
for (int node_index : mutation_.removed_nodes_) {
auto& node = nodes_[node_index];
RemoveAllFaninFanoutInternal(&node);
node_indices_to_remove.push_back(node_index);
if (!overwritten_name_removed_nodes[node_index]) {
node_index_by_name_.erase(node.GetName());
}
}
node_indices_to_remove.insert(node_indices_to_remove.end(),
overwritten_nodes.begin(),
overwritten_nodes.end());
std::set<int> sorted_node_indices_to_remove(node_indices_to_remove.begin(),
node_indices_to_remove.end());
for (auto rit = sorted_node_indices_to_remove.rbegin();
rit != sorted_node_indices_to_remove.rend(); ++rit) {
const int removed_node_index = *rit;
MutableNodeView& last_node = nodes_.back();
if (last_node.node_index_ > removed_node_index) {
last_node.node_index_ = removed_node_index;
for (auto& regular_fanin : last_node.regular_fanins_) {
regular_fanin.node_view()
->regular_fanouts_by_port_[regular_fanin.index()]
[regular_fanin.fanout_index_]
.node_index_ = removed_node_index;
}
for (auto& controlling_fanin : last_node.controlling_fanins_) {
controlling_fanin.node_view()
->controlled_fanouts_[controlling_fanin.fanout_index_]
.node_index_ = removed_node_index;
}
for (auto& regular_fanouts : last_node.regular_fanouts_by_port_) {
for (auto& regular_fanout : regular_fanouts) {
MutableNodeView* fanout_node_view = regular_fanout.node_view();
fanout_node_view->regular_fanins_[regular_fanout.fanin_index_]
.node_index_ = removed_node_index;
}
}
for (auto& controlled_fanout : last_node.controlled_fanouts_) {
MutableNodeView* fanout_node_view = controlled_fanout.node_view();
fanout_node_view->controlling_fanins_[controlled_fanout.fanin_index_]
.node_index_ = removed_node_index;
}
const int last_node_index = nodes_.size() - 1;
std::swap(nodes_[last_node_index], nodes_[removed_node_index]);
graph()->mutable_node()->SwapElements(last_node_index,
removed_node_index);
node_index_by_name_.find(nodes_[removed_node_index].GetName())->second =
removed_node_index;
}
nodes_.pop_back();
}
if (!sorted_node_indices_to_remove.empty()) {
const int current_size = graph()->node_size();
const int num_to_remove = sorted_node_indices_to_remove.size();
graph()->mutable_node()->DeleteSubrange(current_size - num_to_remove,
num_to_remove);
}
}
namespace {
constexpr int kTopologicalSortDone = -1;
const char kMutableGraphViewSortTopologicallyError[] =
"MutableGraphView::SortTopologically error: ";
enum TraversalState : uint8_t { PENDING, PROCESSING, PROCESSED };
enum RecursionStackState : bool { ENTER, EXIT };
struct RecursionStackEntry {
RecursionStackEntry(int node_index, RecursionStackState recursion_state)
: node_index(node_index), recursion_state(recursion_state) {}
const int node_index;
const RecursionStackState recursion_state;
};
struct Edge {
Edge(int from, int to) : from(from), to(to) {}
const int from;
const int to;
};
}
Status MutableGraphView::SortTopologically(
bool ignore_cycles,
absl::Span<const TopologicalDependency> extra_dependencies) {
if (!mutation_.updated_nodes_.empty() || !mutation_.new_nodes_.empty()) {
return errors::InvalidArgument(kMutableGraphViewSortTopologicallyError,
"active mutation exists.");
}
const int num_nodes = nodes_.size();
absl::flat_hash_map<int, std::vector<int>> extra_dependencies_by_parent;
for (const auto& extra_dependency : extra_dependencies) {
if (extra_dependency.graph_view_ != this ||
extra_dependency.from_ == extra_dependency.to_ ||
extra_dependency.from_ < 0 || extra_dependency.from_ >= num_nodes ||
extra_dependency.to_ < 0 || extra_dependency.to_ >= num_nodes) {
return errors::InvalidArgument(kMutableGraphViewSortTopologicallyError,
"invalid extra dependencies.");
}
extra_dependencies_by_parent[extra_dependency.from_].push_back(
extra_dependency.to_);
}
std::vector<TraversalState> traversal_state(num_nodes, PENDING);
int curr_pos = num_nodes - 1;
std::vector<int> order(num_nodes);
std::vector<Edge> edges_in_cycle;
auto push_onto_stack = [this](
const int curr_index, const int fanout_index,
std::vector<RecursionStackEntry>* recursion_stack,
std::vector<TraversalState>* traversal_state,
std::vector<Edge>* edges_in_cycle) {
if (IsNextIteration(graph_->node(curr_index)) &&
IsMerge(graph_->node(fanout_index))) {
return;
}
auto& fanout_traversal_state = (*traversal_state)[fanout_index];
if (fanout_traversal_state == PROCESSING) {
edges_in_cycle->push_back({curr_index, fanout_index});
} else if (fanout_traversal_state == PENDING) {
recursion_stack->push_back({fanout_index, ENTER});
}
};
auto process_fanouts = [this, &extra_dependencies_by_parent,
&push_onto_stack](
const int curr_index,
std::vector<RecursionStackEntry>* recursion_stack,
std::vector<TraversalState>* traversal_state,
std::vector<Edge>* edges_in_cycle) {
const auto& node_view = nodes_[curr_index];
for (const auto& regular_fanouts_port_i : node_view.GetRegularFanouts()) {
for (const auto& regular_fanout : regular_fanouts_port_i) {
push_onto_stack(curr_index, regular_fanout.node_index_, recursion_stack,
traversal_state, edges_in_cycle);
}
}
for (const auto& controlled_fanout : node_view.GetControlledFanouts()) {
push_onto_stack(curr_index, controlled_fanout.node_index_,
recursion_stack, traversal_state, edges_in_cycle);
}
auto it = extra_dependencies_by_parent.find(curr_index);
if (it != extra_dependencies_by_parent.end()) {
for (const auto& extra_fanout : it->second) {
push_onto_stack(curr_index, extra_fanout, recursion_stack,
traversal_state, edges_in_cycle);
}
}
};
auto reversed_postorder_dfs =
[&process_fanouts](const MutableNodeView& root_node_view,
std::vector<int>* order,
std::vector<TraversalState>* traversal_state,
int* curr_pos, std::vector<Edge>* edges_in_cycle) {
std::vector<RecursionStackEntry> recursion_stack;
const int root_index = root_node_view.node_index_;
auto& root_traversal_state = (*traversal_state)[root_index];
if (root_traversal_state == PENDING) {
recursion_stack.push_back({root_index, ENTER});
}
while (!recursion_stack.empty()) {
auto curr_entry = recursion_stack.back();
recursion_stack.pop_back();
const int curr_index = curr_entry.node_index;
auto& curr_traversal_state = (*traversal_state)[curr_index];
if (curr_traversal_state == PROCESSED) {
continue;
} else if (curr_entry.recursion_state == EXIT) {
(*order)[curr_index] = *curr_pos;
curr_traversal_state = PROCESSED;
--(*curr_pos);
} else {
curr_traversal_state = PROCESSING;
recursion_stack.push_back({curr_index, EXIT});
process_fanouts(curr_index, &recursion_stack, traversal_state,
edges_in_cycle);
}
}
};
for (int i = num_nodes - 1; i >= 0; --i) {
auto& node = nodes_[i];
if (node.NumRegularFanins() + node.NumControllingFanins() == 0) {
reversed_postorder_dfs(node, &order, &traversal_state, &curr_pos,
&edges_in_cycle);
}
}
if (!ignore_cycles && !edges_in_cycle.empty()) {
std::vector<string> edges_formatted;
edges_formatted.reserve(edges_in_cycle.size());
for (const auto& edge : edges_in_cycle) {
edges_formatted.push_back(
absl::StrCat("'", graph_->node(edge.from).name(), "' -> '",
graph_->node(edge.to).name(), "'"));
}
const string edges_str =
absl::StrCat("{", absl::StrJoin(edges_formatted, ", "), "}");
return errors::InvalidArgument(kMutableGraphViewSortTopologicallyError,
"detected edge(s) creating cycle(s) ",
edges_str, ".");
}
if (curr_pos != kTopologicalSortDone) {
if (!ignore_cycles) {
return errors::InvalidArgument(
kMutableGraphViewSortTopologicallyError,
"was not able to sort all nodes topologically.");
}
for (const auto& node : nodes_) {
reversed_postorder_dfs(node, &order, &traversal_state, &curr_pos,
&edges_in_cycle);
}
}
std::vector<MutableNodeView> permuted_nodes(num_nodes);
for (int i = 0; i < num_nodes; ++i) {
permuted_nodes[order[i]] = std::move(nodes_[i]);
}
nodes_.swap(permuted_nodes);
for (MutableNodeView& node_view : nodes_) {
const int prev_node_index = node_view.node_index_;
if (prev_node_index != order[prev_node_index]) {
const string& node_name = graph_->node(prev_node_index).name();
node_view.node_index_ = order[prev_node_index];
node_index_by_name_.find(node_name)->second = node_view.node_index_;
}
for (MutableFanoutView& regular_fanin : node_view.regular_fanins_) {
regular_fanin.node_index_ = order[regular_fanin.node_index_];
}
for (MutableFanoutView& controlling_fanin : node_view.controlling_fanins_) {
controlling_fanin.node_index_ = order[controlling_fanin.node_index_];
}
for (std::vector<MutableFaninView>& regular_fanouts_port_i :
node_view.regular_fanouts_by_port_) {
for (MutableFaninView& regular_fanout : regular_fanouts_port_i) {
regular_fanout.node_index_ = order[regular_fanout.node_index_];
}
}
for (MutableFaninView& controlled_fanout : node_view.controlled_fanouts_) {
controlled_fanout.node_index_ = order[controlled_fanout.node_index_];
}
}
PermuteNodesInPlace(graph_, &order, false);
return absl::OkStatus();
}
inline Status MutableGraphView::ValidateInternal(
absl::flat_hash_map<absl::string_view, int>* node_names,
std::vector<RenamedOrOverwrittenNode>* renamed_nodes,
std::vector<int>* inplace_nodes,
std::vector<int>* empty_diff_node_indices) {
TF_RETURN_IF_ERROR(GetNodeNamesAndPartitionUpdatedNodes(
node_names, renamed_nodes, inplace_nodes, empty_diff_node_indices));
TF_RETURN_IF_ERROR(
CheckNodeNamesAndFanins(*node_names, *renamed_nodes, *inplace_nodes));
TF_RETURN_IF_ERROR(CheckKernelRegisteredForNodes());
return absl::OkStatus();
}
Status MutableGraphView::ApplyMutationInternal() {
absl::flat_hash_map<absl::string_view, int> node_names;
std::vector<RenamedOrOverwrittenNode> renamed_nodes;
std::vector<int> inplace_nodes;
std::vector<int> empty_diff_node_indices;
TF_RETURN_IF_ERROR(ValidateInternal(
&node_names, &renamed_nodes, &inplace_nodes, &empty_diff_node_indices));
for (const int empty_diff_node_index : empty_diff_node_indices) {
nodes_[empty_diff_node_index].update_index_ = internal::kMissingIndex;
}
absl::flat_hash_map<string, NodeViewFanouts> renamed_fanouts;
std::vector<bool> overwritten_name_removed_nodes(nodes_.size());
FixRenamedNodes(&renamed_nodes, &renamed_fanouts,
&overwritten_name_removed_nodes);
std::vector<int> new_node_indices;
AddNewNodes(&renamed_fanouts, &new_node_indices);
FixRenamedFanouts(renamed_fanouts);
ApplyNodeUpdates();
SetNewNodesFanins(new_node_indices);
RemoveNodesInternal(renamed_nodes, overwritten_name_removed_nodes);
mutation_.ResetInternal();
mutation_.mutation_counter_++;
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/grappler/utils/graph_view.h"
#include <type_traits>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
constexpr char kNoOp[] = "NoOp";
GraphDef SimpleTestGraph() {
return GDef({NDef("a", kNoOp, {"b:2", "d:3", "b:2", "d:3", "^c"}),
NDef("b", kNoOp, {"d:2", "c:5", "^c"}),
NDef("c", kNoOp, {"^d", "^d"}), NDef("d", kNoOp, {})},
{});
}
template <typename T>
const string GetGraphViewTypeAsString() {
return std::is_same<T, class GraphView>::value ? "GraphView"
: "MutableGraphView";
}
using GraphViewTypes = ::testing::Types<GraphView, MutableGraphView>;
template <typename T>
class TypedGraphViewTest : public ::testing::Test {};
TYPED_TEST_SUITE(TypedGraphViewTest, GraphViewTypes);
TYPED_TEST(TypedGraphViewTest, GraphWithDuplicateNodeNames) {
GraphDef graph =
GDef({NDef("a", kNoOp, {}), NDef("a", kNoOp, {})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute(
"$0::$0 error: graph has multiple nodes with the name 'a'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithMissingFanins) {
GraphDef graph = GDef({NDef("a", kNoOp, {"b:3"})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute("$0::$0 error: node 'a' has missing fanin 'b:3'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithSelfCycles) {
GraphDef graph = GDef({NDef("a", kNoOp, {"a:4"})}, {});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(
s.message(),
absl::Substitute("$0::$0 error: node 'a' has self cycle fanin 'a:4'.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GraphWithMisorderedFanins) {
GraphDef graph = GDef({NDef("a", kNoOp, {"^b", "b:4"}), NDef("b", kNoOp, {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
absl::Substitute("$0::$0 error: node 'a' has regular fanin 'b:4' "
"after controlling fanins.",
GetGraphViewTypeAsString<TypeParam>()));
}
TYPED_TEST(TypedGraphViewTest, GetNodeWithIndex) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const int num_nodes = graph_view.NumNodes();
ASSERT_EQ(graph_view.NumNodes(), graph.node_size());
for (int i = 0; i < num_nodes; ++i) {
const auto* node = graph_view.GetNode(i);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->node(), graph.mutable_node(i));
}
const auto* bad_node = graph_view.GetNode(-1);
ASSERT_EQ(bad_node, nullptr);
bad_node = graph_view.GetNode(num_nodes);
ASSERT_EQ(bad_node, nullptr);
}
TYPED_TEST(TypedGraphViewTest, GetNodeWithName) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
std::vector<string> node_names = {"a", "b", "c", "d"};
for (int i = 0; i < node_names.size(); ++i) {
const string& node_name = node_names[i];
const auto* node = graph_view.GetNode(node_name);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->node(), graph.mutable_node(i));
}
const auto* bad_node = graph_view.GetNode("e");
ASSERT_EQ(bad_node, nullptr);
}
TYPED_TEST(TypedGraphViewTest, GetNodes) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto& nodes = graph_view.GetNodes();
const int num_nodes = nodes.size();
EXPECT_EQ(num_nodes, 4);
ASSERT_EQ(num_nodes, graph.node_size());
for (int i = 0; i < num_nodes; ++i) {
EXPECT_EQ(nodes[i].node(), graph.mutable_node(i));
}
}
TYPED_TEST(TypedGraphViewTest, HasNode) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
for (const string& node_name : {"a", "b", "c", "d"}) {
EXPECT_TRUE(graph_view.HasNode(node_name));
}
EXPECT_FALSE(graph_view.HasNode("e"));
}
TYPED_TEST(TypedGraphViewTest, NumNodes) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 4);
}
TYPED_TEST(TypedGraphViewTest, NumNodesEmptyGraph) {
GraphDef graph;
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 0);
}
TEST(MutableGraphViewTest, DedupControlDependencies) {
GraphDef graph = GDef(
{NDef("a", kNoOp, {}), NDef("b", kNoOp, {}), NDef("c", kNoOp, {}),
NDef("d", kNoOp, {"a:2", "b:1", "^c", "^c", "^a", "^a", "^b", "^c"})},
{});
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view.NumNodes(), 4);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->NumRegularFanins(), 2);
ASSERT_NE(d_node->node(), nullptr);
ASSERT_EQ(d_node->node()->input_size(), 5);
EXPECT_EQ(d_node->node()->input(0), "a:2");
EXPECT_EQ(d_node->node()->input(1), "b:1");
EXPECT_EQ(d_node->node()->input(2), "^c");
EXPECT_EQ(d_node->node()->input(3), "^b");
EXPECT_EQ(d_node->node()->input(4), "^a");
ASSERT_EQ(d_node->NumControllingFanins(), 3);
const auto& d_control_fanins = d_node->GetControllingFanins();
ASSERT_EQ(d_control_fanins.size(), 3);
ASSERT_NE(d_control_fanins[0].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[0].node_view()->GetName(), "c");
ASSERT_NE(d_control_fanins[1].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[1].node_view()->GetName(), "b");
ASSERT_NE(d_control_fanins[2].node_view(), nullptr);
EXPECT_EQ(d_control_fanins[2].node_view()->GetName(), "a");
}
template <typename T>
class TypedNodeViewTest : public ::testing::Test {};
TYPED_TEST_SUITE(TypedNodeViewTest, GraphViewTypes);
TYPED_TEST(TypedNodeViewTest, GetName) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
for (const NodeDef& node : graph.node()) {
const auto* node_view = graph_view.GetNode(node.name());
ASSERT_NE(node_view, nullptr);
EXPECT_EQ(node_view->GetName(), node.name());
EXPECT_EQ(node_view->GetName(), node_view->node()->name());
}
}
TYPED_TEST(TypedNodeViewTest, GetOp) {
GraphDef graph = GDef({NDef("a", "op_a", {}), NDef("b", "op_b", {}),
NDef("c", "op_c", {}), NDef("d", "op_d", {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(a_node->GetOp(), "op_a");
EXPECT_EQ(a_node->node()->op(), "op_a");
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
EXPECT_EQ(b_node->GetOp(), "op_b");
EXPECT_EQ(b_node->node()->op(), "op_b");
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetOp(), "op_c");
EXPECT_EQ(c_node->node()->op(), "op_c");
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->GetOp(), "op_d");
EXPECT_EQ(d_node->node()->op(), "op_d");
}
TYPED_TEST(TypedNodeViewTest, GetDevice) {
GraphDef graph = GDef(
{NDef("a", "", {}, {}, "device_a"), NDef("b", "", {}, {}, "device_b"),
NDef("c", "", {}, {}, "device_c"), NDef("d", "", {}, {})},
{});
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
const auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(a_node->GetDevice(), "device_a");
EXPECT_EQ(a_node->node()->device(), "device_a");
const auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
EXPECT_EQ(b_node->GetDevice(), "device_b");
EXPECT_EQ(b_node->node()->device(), "device_b");
const auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetDevice(), "device_c");
EXPECT_EQ(c_node->node()->device(), "device_c");
const auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(d_node->GetDevice(), "");
EXPECT_EQ(d_node->node()->device(), "");
}
template <typename T>
class TypedFaninTest : public ::testing::Test {};
using FaninTypes =
::testing::Types<std::pair<FanoutView, GraphView>,
std::pair<MutableFanoutView, MutableGraphView>>;
TYPED_TEST_SUITE(TypedFaninTest, FaninTypes);
TYPED_TEST(TypedFaninTest, GetRegularFanins) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanins = a_node->GetRegularFanins();
ASSERT_EQ(a_fanins.size(), 4);
EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, b_node->node_index(), 2));
EXPECT_EQ(a_fanins[1], FanoutViewType(&graph_view, d_node->node_index(), 3));
EXPECT_EQ(a_fanins[2], FanoutViewType(&graph_view, b_node->node_index(), 2));
EXPECT_EQ(a_fanins[3], FanoutViewType(&graph_view, d_node->node_index(), 3));
const auto& d_fanins = d_node->GetRegularFanins();
EXPECT_EQ(d_fanins.size(), 0);
}
TYPED_TEST(TypedFaninTest, GetRegularFanin) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanin_0 = a_node->GetRegularFanin(0);
EXPECT_EQ(a_fanin_0, FanoutViewType(&graph_view, b_node->node_index(), 2));
const auto& a_fanin_1 = a_node->GetRegularFanin(1);
EXPECT_EQ(a_fanin_1, FanoutViewType(&graph_view, d_node->node_index(), 3));
const auto& a_fanin_2 = a_node->GetRegularFanin(2);
EXPECT_EQ(a_fanin_2, FanoutViewType(&graph_view, b_node->node_index(), 2));
const auto& a_fanin_3 = a_node->GetRegularFanin(3);
EXPECT_EQ(a_fanin_3, FanoutViewType(&graph_view, d_node->node_index(), 3));
const FanoutViewType missing_fanin;
EXPECT_EQ(missing_fanin, FanoutViewType(nullptr, -1, -2));
EXPECT_EQ(missing_fanin.node_view(), nullptr);
const auto& a_fanin_4 = a_node->GetRegularFanin(4);
EXPECT_EQ(a_fanin_4, missing_fanin);
const auto& a_fanin_5 = a_node->GetRegularFanin(5);
EXPECT_EQ(a_fanin_5, missing_fanin);
const auto& a_fanin_control = a_node->GetRegularFanin(Graph::kControlSlot);
EXPECT_EQ(a_fanin_control, missing_fanin);
const auto& a_fanin_bad = a_node->GetRegularFanin(-2);
EXPECT_EQ(a_fanin_bad, missing_fanin);
}
TYPED_TEST(TypedFaninTest, GetControllingFanins) {
using FanoutViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& a_fanins = a_node->GetControllingFanins();
ASSERT_EQ(a_fanins.size(), 1);
EXPECT_EQ(a_fanins[0], FanoutViewType(&graph_view, c_node->node_index(),
Graph::kControlSlot));
const auto& c_fanins = c_node->GetControllingFanins();
FanoutViewType d_control_fanin(&graph_view, d_node->node_index(),
Graph::kControlSlot);
if (std::is_same<GraphViewType, GraphView>::value) {
ASSERT_EQ(c_fanins.size(), 2);
EXPECT_EQ(c_fanins[0], d_control_fanin);
EXPECT_EQ(c_fanins[1], d_control_fanin);
} else {
ASSERT_EQ(c_fanins.size(), 1);
EXPECT_EQ(c_fanins[0], d_control_fanin);
}
const auto& d_fanins = d_node->GetControllingFanins();
EXPECT_EQ(d_fanins.size(), 0);
}
template <typename T>
class TypedFanoutTest : public ::testing::Test {};
using FanoutTypes =
::testing::Types<std::pair<FaninView, GraphView>,
std::pair<MutableFaninView, MutableGraphView>>;
TYPED_TEST_SUITE(TypedFanoutTest, FanoutTypes);
TYPED_TEST(TypedFanoutTest, GetRegularFanouts) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& d_fanouts = d_node->GetRegularFanouts();
ASSERT_EQ(d_fanouts.size(), 4);
for (int i = 0; i < d_fanouts.size(); ++i) {
if (i == 2) {
ASSERT_EQ(d_fanouts[i].size(), 1);
EXPECT_EQ(d_fanouts[i][0],
FaninViewType(&graph_view, b_node->node_index(), 0));
} else if (i == 3) {
ASSERT_EQ(d_fanouts[i].size(), 2);
absl::flat_hash_set<FaninViewType> fanouts(d_fanouts[i].begin(),
d_fanouts[i].end());
EXPECT_TRUE(fanouts.contains(
FaninViewType(&graph_view, a_node->node_index(), 1)));
EXPECT_TRUE(fanouts.contains(
FaninViewType(&graph_view, a_node->node_index(), 3)));
} else {
EXPECT_EQ(d_fanouts[i].size(), 0);
}
}
const auto& a_fanouts = a_node->GetRegularFanouts();
EXPECT_EQ(a_fanouts.size(), 0);
}
TYPED_TEST(TypedFanoutTest, GetRegularFanout) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& d_fanouts_2 = d_node->GetRegularFanout(2);
ASSERT_EQ(d_fanouts_2.size(), 1);
EXPECT_EQ(d_fanouts_2.at(0),
FaninViewType(&graph_view, b_node->node_index(), 0));
const auto& d_fanouts_3 = d_node->GetRegularFanout(3);
EXPECT_EQ(d_fanouts_3.size(), 2);
absl::flat_hash_set<FaninViewType> d_fanouts_3_set(d_fanouts_3.begin(),
d_fanouts_3.end());
EXPECT_TRUE(d_fanouts_3_set.contains(
FaninViewType(&graph_view, a_node->node_index(), 1)));
EXPECT_TRUE(d_fanouts_3_set.contains(
FaninViewType(&graph_view, a_node->node_index(), 3)));
const std::vector<FaninViewType> no_fanouts;
EXPECT_EQ(d_node->GetRegularFanout(-2), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(Graph::kControlSlot), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(0), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(1), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(4), no_fanouts);
EXPECT_EQ(d_node->GetRegularFanout(5), no_fanouts);
}
TYPED_TEST(TypedFanoutTest, GetControlledFanouts) {
using FaninViewType = typename TypeParam::first_type;
using GraphViewType = typename TypeParam::second_type;
GraphDef graph = SimpleTestGraph();
Status s;
GraphViewType graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
const auto& c_fanouts = c_node->GetControlledFanouts();
EXPECT_EQ(c_fanouts.size(), 2);
absl::flat_hash_set<FaninViewType> c_fanouts_set(c_fanouts.begin(),
c_fanouts.end());
EXPECT_TRUE(c_fanouts_set.contains(
FaninViewType(&graph_view, b_node->node_index(), Graph::kControlSlot)));
EXPECT_TRUE(c_fanouts_set.contains(
FaninViewType(&graph_view, a_node->node_index(), Graph::kControlSlot)));
const auto& d_fanouts = d_node->GetControlledFanouts();
FaninViewType c_control_fanout(&graph_view, c_node->node_index(),
Graph::kControlSlot);
if (std::is_same<GraphViewType, GraphView>::value) {
ASSERT_EQ(d_fanouts.size(), 2);
EXPECT_EQ(d_fanouts[0], c_control_fanout);
EXPECT_EQ(d_fanouts[1], c_control_fanout);
} else {
ASSERT_EQ(d_fanouts.size(), 1);
EXPECT_EQ(d_fanouts[0], c_control_fanout);
}
const auto& a_fanouts = a_node->GetControlledFanouts();
EXPECT_EQ(a_fanouts.size(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumRegularFanins) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumRegularFanins(), 4);
EXPECT_EQ(b_node->NumRegularFanins(), 2);
EXPECT_EQ(c_node->NumRegularFanins(), 0);
EXPECT_EQ(d_node->NumRegularFanins(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumControllingFanins) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumControllingFanins(), 1);
EXPECT_EQ(b_node->NumControllingFanins(), 1);
if (std::is_same<TypeParam, GraphView>::value) {
EXPECT_EQ(c_node->NumControllingFanins(), 2);
} else {
EXPECT_EQ(c_node->NumControllingFanins(), 1);
}
EXPECT_EQ(d_node->NumControllingFanins(), 0);
}
TYPED_TEST(TypedNodeViewTest, NumRegularFanouts) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumRegularFanouts(), 0);
EXPECT_EQ(b_node->NumRegularFanouts(), 2);
EXPECT_EQ(c_node->NumRegularFanouts(), 1);
EXPECT_EQ(d_node->NumRegularFanouts(), 3);
}
TYPED_TEST(TypedNodeViewTest, NumControlledFanouts) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_EQ(a_node->NumControlledFanouts(), 0);
EXPECT_EQ(b_node->NumControlledFanouts(), 0);
EXPECT_EQ(c_node->NumControlledFanouts(), 2);
if (std::is_same<TypeParam, GraphView>::value) {
EXPECT_EQ(d_node->NumControlledFanouts(), 2);
} else {
EXPECT_EQ(d_node->NumControlledFanouts(), 1);
}
}
TYPED_TEST(TypedNodeViewTest, HasFanin) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_TRUE(a_node->HasFanin({&graph_view, b_node->node_index(), 2}));
EXPECT_FALSE(a_node->HasFanin({&graph_view, c_node->node_index(), 4}));
EXPECT_TRUE(a_node->HasFanin(
{&graph_view, c_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(a_node->HasFanin(
{&graph_view, b_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(a_node->HasFanin({&graph_view, a_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanin(
{&graph_view, b_node->node_index(), internal::kMissingSlot}));
}
TYPED_TEST(TypedNodeViewTest, HasFanout) {
GraphDef graph = SimpleTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
auto* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
EXPECT_TRUE(b_node->HasFanout({&graph_view, a_node->node_index(), 2}));
EXPECT_FALSE(b_node->HasFanout({&graph_view, a_node->node_index(), 1}));
EXPECT_TRUE(d_node->HasFanout(
{&graph_view, c_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(d_node->HasFanout(
{&graph_view, a_node->node_index(), Graph::kControlSlot}));
EXPECT_FALSE(d_node->HasFanout({&graph_view, d_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanout({&graph_view, b_node->node_index(), 0}));
EXPECT_FALSE(a_node->HasFanout({&graph_view, 4, 0}));
EXPECT_FALSE(d_node->HasFanout(
{&graph_view, b_node->node_index(), internal::kMissingSlot}));
}
GraphDef SimpleAttrTestGraph() {
return GDef({NDef("a", kNoOp, {}), NDef("b", kNoOp, {}, {{"attr", 1}}),
NDef("c", kNoOp, {}, {{"attr_1", "a"}, {"attr_2", 2.0f}})},
{});
}
TYPED_TEST(TypedNodeViewTest, GetAttr) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(c_node->GetAttr("attr_1")->s(), "a");
}
TYPED_TEST(TypedNodeViewTest, GetAttrs) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
const auto& actual_attrs = c_node->GetAttrs();
EXPECT_EQ(actual_attrs.size(), 2);
const auto* attr_1 = actual_attrs.Find("attr_1");
EXPECT_NE(attr_1, nullptr);
EXPECT_EQ(attr_1->s(), "a");
const auto* attr_2 = actual_attrs.Find("attr_2");
EXPECT_NE(attr_2, nullptr);
EXPECT_EQ(attr_2->f(), 2.0f);
}
TYPED_TEST(TypedNodeViewTest, NumAttrs) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
auto* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_EQ(a_node->NumAttrs(), 0);
EXPECT_EQ(b_node->NumAttrs(), 1);
EXPECT_EQ(c_node->NumAttrs(), 2);
}
TYPED_TEST(TypedNodeViewTest, HasAttr) {
GraphDef graph = SimpleAttrTestGraph();
Status s;
TypeParam graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
EXPECT_TRUE(c_node->HasAttr("attr_1"));
EXPECT_FALSE(c_node->HasAttr("attr"));
}
class CompareGraphTest : public GrapplerTest {
public:
void CompareGraphViewWithGraph(MutableGraphView* graph_view,
const GraphDef& expected_graph) {
Status s;
GraphView expected_graph_view(&expected_graph, &s);
TF_ASSERT_OK(s);
EXPECT_EQ(graph_view->NumNodes(), expected_graph_view.NumNodes());
for (const NodeView& expected_node_view : expected_graph_view.GetNodes()) {
const string& node_name = expected_node_view.GetName();
MutableNodeView* node_view = graph_view->GetNode(node_name);
ASSERT_NE(node_view, nullptr);
EXPECT_EQ(node_view->GetName(), expected_node_view.GetName());
EXPECT_EQ(node_view->GetOp(), expected_node_view.GetOp());
EXPECT_EQ(node_view->GetDevice(), expected_node_view.GetDevice());
const int actual_num_fanins = node_view->node()->input_size();
EXPECT_EQ(actual_num_fanins, expected_node_view.node()->input_size());
const int expected_num_regular_fanins =
expected_node_view.NumRegularFanins();
bool same_num_regular_fanins =
node_view->NumRegularFanins() == expected_num_regular_fanins;
EXPECT_TRUE(same_num_regular_fanins);
for (int i = 0; i < expected_num_regular_fanins; ++i) {
const auto& expected_fanin = expected_node_view.GetRegularFanin(i);
auto* actual_fanin_node =
graph_view->GetNode(expected_fanin.node_view()->GetName());
ASSERT_NE(actual_fanin_node, nullptr);
EXPECT_TRUE(
node_view->HasFanin({actual_fanin_node, expected_fanin.index()}));
if (i < node_view->NumRegularFanins()) {
auto& actual_fanin = node_view->GetRegularFanin(i);
EXPECT_EQ(actual_fanin, MutableFanoutView(actual_fanin_node,
expected_fanin.index()));
EXPECT_EQ(actual_fanin.node_index(),
actual_fanin.node_view()->node_index());
}
}
if (same_num_regular_fanins) {
for (int i = 0; i < expected_num_regular_fanins; ++i) {
const auto& fanin = node_view->GetRegularFanin(i);
EXPECT_EQ(ParseTensorName(node_view->node()->input(i)),
TensorId(fanin.node_view()->GetName(), fanin.index()));
}
}
const int expected_num_controlling_fanins =
expected_node_view.NumControllingFanins();
bool same_num_controlling_fanins =
node_view->NumControllingFanins() == expected_num_controlling_fanins;
EXPECT_TRUE(same_num_controlling_fanins);
for (int i = 0; i < expected_num_controlling_fanins; ++i) {
auto& expected_fanin = expected_node_view.GetControllingFanins()[i];
auto* actual_fanin_node =
graph_view->GetNode(expected_fanin.node_view()->GetName());
ASSERT_NE(actual_fanin_node, nullptr);
MutableFanoutView actual_fanin(actual_fanin_node,
expected_fanin.index());
EXPECT_TRUE(node_view->HasFanin(actual_fanin));
int found = 0;
for (const auto& actual_fanin : node_view->GetControllingFanins()) {
if (actual_fanin.index() == expected_fanin.index() &&
actual_fanin.node_view()->GetName() ==
expected_fanin.node_view()->GetName()) {
EXPECT_EQ(actual_fanin.node_index(),
actual_fanin.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
if (same_num_controlling_fanins && same_num_regular_fanins) {
for (int i = 0; i < expected_num_controlling_fanins; ++i) {
const auto& fanin = node_view->GetControllingFanins()[i];
EXPECT_EQ(ParseTensorName(node_view->node()->input(
i + expected_num_regular_fanins)),
TensorId(fanin.node_view()->GetName(), fanin.index()));
}
}
EXPECT_EQ(node_view->NumRegularFanouts(),
expected_node_view.NumRegularFanouts());
const int num_output_ports =
expected_node_view.GetRegularFanouts().size();
ASSERT_EQ(node_view->GetRegularFanouts().size(), num_output_ports);
for (int i = 0; i < num_output_ports; ++i) {
auto& expected_fanouts_at_port_i = node_view->GetRegularFanouts()[i];
const int num_fanouts_at_port = expected_fanouts_at_port_i.size();
auto& actual_fanouts_at_port_i = node_view->GetRegularFanouts()[i];
EXPECT_EQ(actual_fanouts_at_port_i.size(), num_fanouts_at_port);
for (int j = 0; j < num_fanouts_at_port; ++j) {
auto& expected_fanout = expected_fanouts_at_port_i[j];
auto* actual_fanout_node =
graph_view->GetNode(expected_fanout.node_view()->GetName());
ASSERT_NE(actual_fanout_node, nullptr);
MutableFaninView actual_fanout(actual_fanout_node,
expected_fanout.index());
EXPECT_TRUE(node_view->HasFanout(actual_fanout));
int found = 0;
for (const auto& fanout : actual_fanouts_at_port_i) {
if (fanout.index() == expected_fanout.index() &&
fanout.node_view()->GetName() ==
expected_fanout.node_view()->GetName()) {
EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
}
const int num_controlled_fanouts =
expected_node_view.NumControlledFanouts();
EXPECT_EQ(node_view->NumControlledFanouts(), num_controlled_fanouts);
for (int i = 0; i < num_controlled_fanouts; ++i) {
const auto& expected_fanout =
expected_node_view.GetControlledFanouts()[i];
auto* actual_fanout_node =
graph_view->GetNode(expected_fanout.node_view()->GetName());
ASSERT_NE(actual_fanout_node, nullptr);
MutableFaninView actual_fanout(actual_fanout_node,
expected_fanout.index());
EXPECT_TRUE(node_view->HasFanout(actual_fanout));
int found = 0;
for (const auto& fanout : node_view->GetControlledFanouts()) {
if (fanout.index() == expected_fanout.index() &&
fanout.node_view()->GetName() ==
expected_fanout.node_view()->GetName()) {
EXPECT_EQ(fanout.node_index(), fanout.node_view()->node_index());
++found;
}
}
EXPECT_EQ(found, 1);
}
EXPECT_EQ(node_view->NumAttrs(), expected_node_view.NumAttrs());
for (const auto& expected_attr : expected_node_view.GetAttrs()) {
auto* attr = node_view->GetAttr(expected_attr.first);
EXPECT_TRUE(AreAttrValuesEqual(*attr, expected_attr.second));
}
}
CompareGraphs(*graph_view->graph(), expected_graph);
}
};
class MutationTest : public CompareGraphTest {};
constexpr char kDeviceCPU0[] = "/device:CPU:0";
constexpr char kDeviceGPU0[] = "/device:GPU:0";
GraphDef SimpleTestGraphForMutation() {
return GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, kDeviceCPU0)},
{});
}
TEST_F(MutationTest, AddNewNode) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef empty_node;
mutation->AddNode(std::move(empty_node), &s);
TF_EXPECT_OK(s);
s = errors::Internal("error");
NodeDef valid_node =
NDef("valid", "IdentityN", {"a:1", "^b"}, {{"N", 1}}, "foo");
mutation->AddNode(std::move(valid_node), &s);
TF_EXPECT_OK(s);
NodeDef bad_node_1 =
NDef("bad", "IdentityN", {"^b", "a:1"}, {{"N", 1}}, "foo");
mutation->AddNode(std::move(bad_node_1), &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Mutation::AddNode error: node 'bad' has regular fanin 'a:1' after "
"controlling fanins.");
NodeDef bad_node_2 = NDef("bad", "IdentityN", {"bad:1"}, {}, "foo");
mutation->AddNode(std::move(bad_node_2), &s);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Mutation::AddNode error: node 'bad' has self cycle fanin "
"'bad:1'.");
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, NewNodeBadFaninsAfterAdd) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef valid_node =
NDef("valid", "IdentityN", {"a:1", "^b"}, {{"N", 1}}, "foo");
MutationNewNode new_node = mutation->AddNode(std::move(valid_node), &s);
mutation->AddOrUpdateRegularFanin(new_node, 1, {"valid", 2});
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: new node 'valid' is ill-formed.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, NewNodesConflictingNames) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node_1 = NDef("a", "", {});
mutation->AddNode(std::move(new_node_1), &s);
TF_EXPECT_OK(s);
NodeDef new_node_2 = NDef("a", "", {});
mutation->AddNode(std::move(new_node_2), &s);
TF_EXPECT_OK(s);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: multiple nodes with the name: 'a' exists in "
"Mutation.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, UpdateNodeAndAddSelfLoop) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->AddControllingFanin(d_node, "d");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: inplace updated node 'd' is ill-formed.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, RenameNodeAndAddSelfLoop) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->UpdateNodeName(d_node, "e");
mutation->AddControllingFanin(d_node, "e");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: renamed updated node 'e' ('d') is ill-formed.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, ExistingNodesConflictingNames) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
mutation->UpdateNodeName(a_node, "b");
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
mutation->UpdateNodeOp(b_node, "Identity");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: multiple nodes with the name: 'b' exists in "
"Mutation.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, NewAndExistingNodesConflictingNames) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node = NDef("a", "", {});
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
mutation->UpdateNodeDevice(a_node, "foo");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: multiple nodes with the name: 'a' exists in "
"Mutation.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, NewAndExistingRenamedNodesConflictingNames) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node = NDef("e", "", {});
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->UpdateNodeName(d_node, "e");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: multiple nodes with the name: 'e' exists in "
"Mutation.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, RemoveNodesWithFanouts) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
mutation->RemoveNode(b_node);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'b'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->RemoveNode(d_node);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, SwapNodeNamesWithCycle) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->UpdateNodeName(d_node, "b");
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
mutation->UpdateNodeName(b_node, "d");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: renamed updated node 'b' ('d') is ill-formed.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
mutation->AddOrUpdateRegularFanin(d_node, 1, {"d", 3});
mutation->RemoveControllingFanin(d_node, "b");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("d", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {"a:2", "d:3", "a:4", "^c"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, RenamedNodeWithFanouts) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
mutation->UpdateNodeName(a_node, "b");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'a'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
mutation->UpdateNodeName(a_node, "a");
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
mutation->UpdateNodeName(b_node, "e");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing "
"node 'b'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
TEST_F(MutationTest, RemoveExistingNodeAndReplaceWithNewNode) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->RemoveNode(d_node);
NodeDef new_node = NDef("d", kNoOp, {"c:8", "^a"}, {}, kDeviceCPU0);
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0),
NDef("d", kNoOp, {"c:8", "^a"}, {}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, UpdateNodeNameAndRemoveFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->UpdateNodeName(d_node, "e");
mutation->RemoveRegularFanin(d_node, 1);
mutation->RemoveRegularFanin(d_node, 2);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0),
NDef("e", kNoOp, {"a:2", "^c", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, UpdateNodeNameAndRemoveRegularFanout) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
mutation->UpdateNodeName(a_node, "e");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'a'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->RemoveRegularFanin(d_node, 2);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'a'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
mutation->AddOrUpdateRegularFanin(d_node, 0, {"b", 1});
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("e", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {}, {}, kDeviceCPU0),
NDef("c", kNoOp, {}, {}, kDeviceCPU0),
NDef("d", kNoOp, {"b:1", "b:3", "^c", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, UpdateNodeNameAndRemoveControlledFanout) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
mutation->UpdateNodeName(c_node, "e");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'c'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->UpdateNodeDevice(d_node, kDeviceGPU0);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
expected_error_msg =
"Mutation::Apply error: fanout 'd' exist for missing node 'c'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
mutation->RemoveControllingFanin(d_node, "c");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {}, kDeviceCPU0),
NDef("b", kNoOp, {}, {}, kDeviceCPU0),
NDef("e", kNoOp, {}, {}, kDeviceCPU0),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, kDeviceGPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, EmptyMutation) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
TF_EXPECT_OK(mutation->Apply());
CompareGraphViewWithGraph(&graph_view, SimpleTestGraphForMutation());
}
constexpr char kIdentity[] = "Identity";
constexpr char kDeviceCPU1[] = "/device:CPU:1";
constexpr char kDeviceGPU1[] = "/device:GPU:1";
GraphDef TestGraphForMutation() {
return GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1)},
{});
}
TEST_F(MutationTest, SwapNodeNamesWithNoCycle) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
MutableNodeView* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
mutation->UpdateNodeName(b_node, "c");
mutation->UpdateNodeName(c_node, "b");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("c", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("b", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, RemoveMultipleDependentNodes) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->RemoveNode(c_node);
mutation->RemoveNode(d_node);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
constexpr char kDeviceGPU2[] = "/device:GPU:2";
TEST_F(MutationTest, AddSimpleNewNode) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node =
NDef("new_node", kIdentity, {}, {{"T", DT_INT64}}, kDeviceGPU2);
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("new_node", kIdentity, {}, {{"T", DT_INT64}}, kDeviceGPU2)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
constexpr char kDeviceGPU3[] = "/device:GPU:3";
TEST_F(MutationTest, AddAndUpdateNodesWithFanins) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node_1 = NDef("new_node_1", kNoOp, {"a:2", "d:5", "^b", "^c"},
{{"new_node_1_attr_1", 5.0f}}, kDeviceGPU2);
mutation->AddNode(std::move(new_node_1), &s);
TF_EXPECT_OK(s);
NodeDef new_node_2 =
NDef("new_node_2", kNoOp, {"a:3", "new_node_1:5", "^d", "^new_node_1"},
{{"new_node_2_attr_1", 9}}, kDeviceGPU3);
mutation->AddNode(std::move(new_node_2), &s);
TF_EXPECT_OK(s);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->AddOrUpdateRegularFanin(d_node, 3, {"c", 6});
mutation->AddOrUpdateRegularFanin(d_node, 1, {"new_node_1", 5});
mutation->AddControllingFanin(d_node, "new_node_2");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp,
{"a:2", "new_node_1:5", "a:4", "c:6", "^c", "^b", "^new_node_2"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("new_node_1", kNoOp, {"a:2", "d:5", "^b", "^c"},
{{"new_node_1_attr_1", 5.0f}}, kDeviceGPU2),
NDef("new_node_2", kNoOp, {"a:3", "new_node_1:5", "^d", "^new_node_1"},
{{"new_node_2_attr_1", 9}}, kDeviceGPU3)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, UpdateNodeNameToReplaceExistingNode) {
auto test_graph = []() {
return GDef(
{NDef("a", kNoOp, {}, {{"attr_a", 8}}, kDeviceCPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU1),
NDef("c", kNoOp, {"b:4", "^a"}, {{"attr_c", "test"}}, kDeviceGPU2),
NDef("d", kNoOp, {"a:2", "c:5", "a:4", "^a", "^c"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU3)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
mutation->UpdateNodeName(b_node, "c");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {{"attr_a", 8}}, kDeviceCPU0),
NDef("c", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "c:5", "a:4", "^a", "^c"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU3)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, NewNodeWithMutations) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef new_node_def = NDef("node", kNoOp, {"a:2", "b:3", "^c"},
{{"attr_1", 1}, {"attr_2", 2.0f}}, kDeviceGPU3);
MutationNewNode new_node = mutation->AddNode(std::move(new_node_def), &s);
TF_EXPECT_OK(s);
mutation->AddControllingFanin(new_node, "a");
mutation->RemoveControllingFanin(new_node, "c");
mutation->AddOrUpdateRegularFanin(new_node, 0, {"b", 6});
mutation->RemoveRegularFanin(new_node, 1);
mutation->UpdateNodeName(new_node, "new_node");
mutation->UpdateNodeOp(new_node, kIdentity);
mutation->UpdateNodeDevice(new_node, kDeviceGPU2);
AttrValue attr_3;
attr_3.set_s("new_node_attr");
mutation->AddOrUpdateNodeAttr(new_node, "attr_3", attr_3);
AttrValue attr_1;
attr_1.set_b(true);
mutation->AddOrUpdateNodeAttr(new_node, "attr_1", attr_1);
mutation->RemoveNodeAttr(new_node, "attr_2");
AttrValue attr_4;
attr_4.set_type(DT_FLOAT);
mutation->AddOrUpdateNodeAttr(new_node, "T", attr_4);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("new_node", kIdentity, {"b:6", "^a"},
{{"attr_1", true}, {"attr_3", "new_node_attr"}, {"T", DT_FLOAT}},
kDeviceGPU2)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, UpdatedNodeWithNonFaninMutations) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->UpdateNodeName(d_node, "e");
mutation->UpdateNodeOp(d_node, kIdentity);
mutation->UpdateNodeDevice(d_node, kDeviceGPU2);
AttrValue attr_d_1;
attr_d_1.set_b(false);
mutation->AddOrUpdateNodeAttr(d_node, "attr_d_1", attr_d_1);
AttrValue attr_e_3;
attr_e_3.set_s("test_string");
mutation->AddOrUpdateNodeAttr(d_node, "attr_e_3", attr_e_3);
mutation->RemoveNodeAttr(d_node, "attr_d_2");
AttrValue attr_e_4;
attr_e_4.set_type(DT_INT64);
mutation->AddOrUpdateNodeAttr(d_node, "T", attr_e_4);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("e", kIdentity, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", false}, {"attr_e_3", "test_string"}, {"T", DT_INT64}},
kDeviceGPU2)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, Reset) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->UpdateNodeName(a_node, "e");
mutation->AddNode({}, &s);
TF_EXPECT_OK(s);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
string expected_error_msg =
"Mutation::Apply error: fanout 'b' exist for missing node 'a'.";
EXPECT_EQ(s.message(), expected_error_msg);
CompareGraphViewWithGraph(&graph_view, TestGraphForMutation());
mutation->Reset();
TF_EXPECT_OK(mutation->Apply());
CompareGraphViewWithGraph(&graph_view, TestGraphForMutation());
}
TEST_F(MutationTest, RenameNodeAndAddNewNodeWithRenamedNodeOldName) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->UpdateNodeName(b_node, "e");
NodeDef new_node =
NDef("b", kIdentity, {"c:2"}, {{"T", DT_INT64}}, kDeviceGPU3);
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("e", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("b", kIdentity, {"c:2"}, {{"T", DT_INT64}}, kDeviceGPU3)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, ShiftNodesWithFanouts) {
auto test_graph = []() {
return GDef({NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^a", "^c", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}},
kDeviceGPU0)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* c_node = graph_view.GetNode("c");
ASSERT_NE(c_node, nullptr);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->RemoveControllingFanin(d_node, "c");
mutation->RemoveNode(c_node);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("d", kNoOp, {"a:2", "b:3", "a:4", "^a", "^b"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("b", kNoOp, {"a:2"}, {{"attr_b", 3.0f}}, kDeviceCPU0),
NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, RemoveFaninFanoutAndShiftFanout) {
auto test_graph = []() {
return GDef({NDef("a", kNoOp, {}, {}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2", "a:1"}, {}, kDeviceGPU1),
NDef("c", kNoOp, {"a:1", "a:2"}, {}, kDeviceGPU2)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->RemoveRegularFanin(b_node, 1);
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph =
GDef({NDef("a", kNoOp, {}, {}, kDeviceGPU0),
NDef("b", kNoOp, {"a:2"}, {}, kDeviceGPU1),
NDef("c", kNoOp, {"a:1", "a:2"}, {}, kDeviceGPU2)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
TEST_F(MutationTest, ConsecutiveMutations) {
GraphDef graph = TestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* b_node = graph_view.GetNode("b");
ASSERT_NE(b_node, nullptr);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->RemoveNode(b_node);
mutation->AddOrUpdateRegularFanin(d_node, 1, {"c", 5});
mutation->RemoveControllingFanin(d_node, "b");
NodeDef new_node_1 = NDef("new_node_1", kIdentity, {"a:3", "d:5", "^d"},
{{"T", DT_FLOAT}}, kDeviceGPU2);
MutationNewNode new_node_1_node =
mutation->AddNode(std::move(new_node_1), &s);
TF_EXPECT_OK(s);
mutation->AddOrUpdateRegularFanin(new_node_1_node, 0, {"c", 5});
mutation->RemoveRegularFanin(new_node_1_node, 1);
mutation->AddOrUpdateRegularFanin(new_node_1_node, 1, {"a", 6});
mutation->AddControllingFanin(new_node_1_node, "a");
mutation->RemoveControllingFanin(new_node_1_node, "d");
TF_EXPECT_OK(mutation->Apply());
GraphDef expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp, {"a:2", "c:5", "a:4", "^c"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("new_node_1", kIdentity, {"c:5", "a:6", "^a"}, {{"T", DT_FLOAT}},
kDeviceGPU2)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
mutation->AddOrUpdateRegularFanin(d_node, 3, {"new_node_2", 6});
mutation->AddOrUpdateRegularFanin(d_node, 1, {"new_node_1", 8});
mutation->AddControllingFanin(d_node, "new_node_2");
mutation->AddControllingFanin(d_node, "a");
mutation->RemoveControllingFanin(d_node, "c");
NodeDef new_node_2 =
NDef("new_node_2", kNoOp, {"c:4", "new_node_1:5", "^d", "^c"});
MutationNewNode new_node_2_node =
mutation->AddNode(std::move(new_node_2), &s);
TF_EXPECT_OK(s);
mutation->UpdateNodeDevice(new_node_2_node, kDeviceGPU3);
mutation->AddOrUpdateRegularFanin(new_node_2_node, 0, {"new_node_1", 4});
mutation->RemoveRegularFanin(new_node_2_node, 1);
mutation->RemoveControllingFanin(new_node_2_node, "c");
mutation->AddControllingFanin(new_node_2_node, "a");
mutation->AddControllingFanin(new_node_2_node, "new_node_1");
TF_EXPECT_OK(mutation->Apply());
expected_graph = GDef(
{NDef("a", kIdentity, {}, {{"attr_a", 8}, {"T", DT_FLOAT}}, kDeviceGPU0),
NDef("c", kNoOp, {"^a"}, {{"attr_c", "test"}}, kDeviceCPU1),
NDef("d", kNoOp,
{"a:2", "new_node_1:8", "a:4", "new_node_2:6", "^new_node_2", "^a"},
{{"attr_d_1", "a"}, {"attr_d_2", 2.0f}}, kDeviceGPU1),
NDef("new_node_1", kIdentity, {"c:5", "a:6", "^a"}, {{"T", DT_FLOAT}},
kDeviceGPU2),
NDef("new_node_2", kNoOp, {"new_node_1:4", "^d", "^a", "^new_node_1"},
{}, kDeviceGPU3)},
{});
CompareGraphViewWithGraph(&graph_view, expected_graph);
}
constexpr char kMatchingFiles[] = "MatchingFiles";
TEST_F(MutationTest, OpWithUnsupportedDevice) {
GTEST_SKIP() << "Reenable once offline optimization tests enable CUDA.";
auto test_graph = []() {
return GDef({NDef("a", kMatchingFiles, {}, {}, kDeviceCPU0)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->UpdateNodeDevice(a_node, kDeviceGPU1);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
CompareGraphViewWithGraph(&graph_view, test_graph());
mutation->Reset();
NodeDef new_node = NDef("new_node", kMatchingFiles, {}, {}, kDeviceGPU2);
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
CompareGraphViewWithGraph(&graph_view, test_graph());
}
TEST_F(MutationTest, OpMissingAttribute) {
GTEST_SKIP() << "Reenable once offline optimization tests enable CUDA.";
auto test_graph = []() {
return GDef({NDef("a", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU0)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->RemoveNodeAttr(a_node, "T");
s = mutation->Apply();
EXPECT_FALSE(s.ok());
CompareGraphViewWithGraph(&graph_view, test_graph());
mutation->Reset();
NodeDef new_node = NDef("new_node", kIdentity, {}, {}, kDeviceGPU2);
mutation->AddNode(std::move(new_node), &s);
TF_EXPECT_OK(s);
s = mutation->Apply();
EXPECT_FALSE(s.ok());
CompareGraphViewWithGraph(&graph_view, test_graph());
}
TEST_F(MutationTest, EmptyMutationUpdateIndexPersisting) {
auto test_graph = []() {
return GDef({NDef("a", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU0)},
{});
};
GraphDef graph = test_graph();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
MutableNodeView* a_node = graph_view.GetNode("a");
ASSERT_NE(a_node, nullptr);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->UpdateNodeName(a_node, "a");
TF_EXPECT_OK(mutation->Apply());
CompareGraphViewWithGraph(&graph_view, test_graph());
mutation->Reset();
mutation->UpdateNodeName(a_node, "a");
TF_EXPECT_OK(mutation->Apply());
CompareGraphViewWithGraph(&graph_view, test_graph());
}
class TopologicalSortTest : public CompareGraphTest {
protected:
void CompareGraphOrder(const MutableGraphView& graph_view,
absl::Span<const string> node_names) {
const int num_nodes = graph_view.NumNodes();
ASSERT_EQ(num_nodes, node_names.size());
for (int i = 0; i < num_nodes; ++i) {
EXPECT_EQ(graph_view.GetNode(i)->GetName(), node_names[i]);
}
}
void CompareGraphNodePrecedences(
const MutableGraphView& graph_view,
absl::Span<const std::pair<string, string>> node_precedences) {
for (const auto& node_precedence : node_precedences) {
auto* parent_node = graph_view.GetNode(node_precedence.first);
ASSERT_NE(parent_node, nullptr);
auto* child_node = graph_view.GetNode(node_precedence.second);
ASSERT_NE(child_node, nullptr);
EXPECT_TRUE(parent_node->node_index() < child_node->node_index());
}
}
};
TEST_F(TopologicalSortTest, ActiveMutationSort) {
auto test_graph = []() {
return GDef({NDef("a", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kIdentity, {"a"}, {{"T", DT_FLOAT}}, kDeviceGPU1)},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
Mutation* mutation = graph_view.GetMutationBuilder();
mutation->AddNode({}, &status);
TF_ASSERT_OK(status);
for (bool ignore_cycles : {false, true}) {
status = graph_view.SortTopologically(ignore_cycles, {});
EXPECT_FALSE(status.ok());
EXPECT_EQ(
status.message(),
"MutableGraphView::SortTopologically error: active mutation exists.");
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"a", "b"});
}
}
TEST_F(TopologicalSortTest, BadExtraDependenciesSort) {
auto test_graph = []() {
return GDef({NDef("a", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU1)},
{});
};
GraphDef graph_1 = test_graph();
Status status;
MutableGraphView graph_view_1(&graph_1, &status);
TF_ASSERT_OK(status);
MutableNodeView* a_node_1 = graph_view_1.GetNode("a");
GraphDef graph_2 = test_graph();
MutableGraphView graph_view_2(&graph_2, &status);
TF_ASSERT_OK(status);
MutableNodeView* b_node_2 = graph_view_2.GetNode("b");
for (bool ignore_cycles : {false, true}) {
status =
graph_view_2.SortTopologically(ignore_cycles, {{a_node_1, b_node_2}});
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::SortTopologically error: invalid extra "
"dependencies.");
CompareGraphViewWithGraph(&graph_view_2, test_graph());
CompareGraphOrder(graph_view_2, {"a", "b"});
}
}
TEST_F(TopologicalSortTest, NoCyclesAllowed) {
auto test_graph = []() {
return GDef(
{NDef("a", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kIdentity, {"a", "c"}, {{"T", DT_FLOAT}}, kDeviceGPU1),
NDef("c", kIdentity, {"b"}, {{"T", DT_FLOAT}}, kDeviceGPU1)},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
status = graph_view.SortTopologically(false, {});
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::SortTopologically error: detected edge(s) "
"creating cycle(s) {'c' -> 'b'}.");
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"a", "b", "c"});
TF_EXPECT_OK(graph_view.SortTopologically(true, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphNodePrecedences(graph_view, {{"a", "b"}, {"a", "c"}});
}
TEST_F(TopologicalSortTest, NoNodesWithZeroFanins) {
auto test_graph = []() {
return GDef({NDef("a", kIdentity, {"b"}, {{"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kIdentity, {"a"}, {{"T", DT_FLOAT}}, kDeviceGPU1)},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
status = graph_view.SortTopologically(false, {});
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::SortTopologically error: was not able to sort "
"all nodes topologically.");
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"a", "b"});
TF_EXPECT_OK(graph_view.SortTopologically(true, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
}
TEST_F(TopologicalSortTest, DidNotReachAllNodes) {
auto test_graph = []() {
return GDef({NDef("c", kIdentity, {}, {{"T", DT_FLOAT}}, kDeviceGPU2),
NDef("a", kIdentity, {"b"}, {{"T", DT_FLOAT}}, kDeviceGPU0),
NDef("b", kIdentity, {"a"}, {{"T", DT_FLOAT}}, kDeviceGPU1)},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
status = graph_view.SortTopologically(false, {});
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::SortTopologically error: was not able to sort "
"all nodes topologically.");
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"c", "a", "b"});
TF_EXPECT_OK(graph_view.SortTopologically(true, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"a", "b", "c"});
}
TEST_F(TopologicalSortTest, NoLoopGraph) {
auto test_graph = []() {
return GDef({NDef("c", kIdentity, {"f"}), NDef("a", kIdentity, {"f", "e"}),
NDef("b", kIdentity, {"e", "d"}), NDef("d", kIdentity, {"c"}),
NDef("f", kIdentity, {}), NDef("e", kIdentity, {})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphNodePrecedences(
graph_view,
{{"f", "a"}, {"f", "c"}, {"e", "a"}, {"e", "b"}, {"c", "d"}, {"d", "b"}});
}
TEST_F(TopologicalSortTest, ValidLoopGraph) {
auto test_graph = []() {
return GDef(
{NDef("while/Const_1", "Const", {}),
NDef("while/Enter_2", "Enter", {"while/Const_1"},
{{"frame_name", "while/while_context"}}),
NDef("while/Const", "Const", {}),
NDef("while/Enter_1", "Enter", {"while/Const"},
{{"frame_name", "while/while_context"}}),
NDef("while/iteration_counter", "Const", {}),
NDef("while/Enter", "Enter", {"while/iteration_counter"},
{{"frame_name", "while/while_context"}}),
NDef("while/maximum_iterations", "Const", {}),
NDef("while/Less/Enter", "Enter", {"while/maximum_iterations"},
{{"frame_name", "while/while_context"}}),
NDef("while/Less", "Less", {"while/Merge", "while/Less/Enter"}),
NDef("while/LogicalAnd", "LogicalAnd",
{"while/Less", "while/cond/Merge"}),
NDef("while/LoopCond", "LoopCond", {"while/LogicalAnd"}),
NDef("while/Switch", "Switch", {"while/Merge", "while/LoopCond"},
{{"_class", "loc:@while/Merge"}}),
NDef("while/Identity", "Identity", {"while/Switch:1"}),
NDef("while/add", "Add", {"while/Identity", "while/add/y"}),
NDef("while/NextIteration", "NextIteration", {"while/add"}),
NDef("while/Merge", "Merge", {"while/Enter", "while/NextIteration"}),
NDef("while/Less_1/y", "Const", {"^while/Merge"}),
NDef("while/add/y", "Const", {"^while/Identity"}),
NDef("while/mul/y", "Const", {"^while/Identity"}),
NDef("while/add_2/y", "Const", {"^while/Identity"}),
NDef("while/Switch_1", "Switch", {"while/Merge_1", "while/LoopCond"},
{{"_class", "loc:@while/Merge_1"}}),
NDef("while/Identity_1", "Identity", {"while/Switch_1:1"}),
NDef("while/add_2", "Add", {"while/Identity_1", "while/add_2/y"}),
NDef("while/NextIteration_1", "NextIteration", {"while/add_2"}),
NDef("while/Merge_1", "Merge",
{"while/Enter_1", "while/NextIteration_1"}),
NDef("while/Less_1", "Less", {"while/Merge_1", "while/Less_1/y"}),
NDef("while/cond/Switch", "Switch", {"while/Less_1", "while/Less_1"}),
NDef("while/cond/switch_f", "Identity", {"while/cond/Switch"}),
NDef("while/cond/Const_1", "Const", {"^while/cond/switch_f"}),
NDef("while/cond/switch_t", "Identity", {"while/cond/Switch:1"}),
NDef("while/cond/Const", "Const", {"^while/cond/switch_t"}),
NDef("while/cond/Merge", "Merge",
{"while/cond/Const_1", "while/cond/Const"}),
NDef("TensorArrayUnstack/range/delta", "Const", {}),
NDef("TensorArrayUnstack/range/start", "Const", {}),
NDef("TensorArrayUnstack/strided_slice/stack_2", "Const", {}),
NDef("TensorArrayUnstack/strided_slice/stack_1", "Const", {}),
NDef("TensorArrayUnstack/strided_slice/stack", "Const", {}),
NDef("TensorArrayUnstack/Shape", "Const", {}),
NDef("TensorArrayUnstack/strided_slice", "StridedSlice",
{"TensorArrayUnstack/Shape",
"TensorArrayUnstack/strided_slice/stack",
"TensorArrayUnstack/strided_slice/stack_1",
"TensorArrayUnstack/strided_slice/stack_2"}),
NDef("TensorArrayUnstack/range", "Range",
{"TensorArrayUnstack/range/start",
"TensorArrayUnstack/strided_slice",
"TensorArrayUnstack/range/delta"}),
NDef("TensorArray/size", "Const", {}),
NDef("TensorArray", "TensorArrayV3", {"TensorArray/size"}),
NDef("while/TensorArrayReadV3/Enter", "Enter", {"TensorArray"},
{{"frame_name", "while/while_context"}}),
NDef("Const", "Const", {}),
NDef("TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3",
"TensorArrayScatterV3",
{"TensorArray", "TensorArrayUnstack/range", "Const",
"TensorArray:1"},
{{"_class", "loc@Const"}}),
NDef("while/TensorArrayReadV3/Enter_1", "Enter",
{"TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3"},
{{"frame_name", "while/while_context"}}),
NDef("while/TensorArrayReadV3", "TensorArrayReadV3",
{"while/TensorArrayReadV3/Enter", "while/Identity_1",
"while/TensorArrayReadV3/Enter_1"}),
NDef("while/add_1", "Add", {"while/mul", "while/TensorArrayReadV3"}),
NDef("while/NextIteration_2", "NextIteration", {"while/add_1"}),
NDef("while/Merge_2", "Merge",
{"while/Enter_2", "while/NextIteration_2"}),
NDef("while/Switch_2", "Switch", {"while/Merge_2", "while/LoopCond"},
{{"_class", "loc@while/Merge_2"}}),
NDef("while/Exit_2", "Exit", {"while/Switch_2"}),
NDef("while/Identity_2", "Identity", {"while/Switch_2:1"}),
NDef("while/mul", "Mul", {"while/Identity_2", "while/mul/y"})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
}
TEST_F(TopologicalSortTest, DuplicateFanins) {
auto test_graph = []() {
return GDef(
{NDef("b", kIdentity, {"a", "a", "^a"}), NDef("a", "Const", {})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphOrder(graph_view, {"a", "b"});
}
TEST_F(TopologicalSortTest, DiamondDependencyNotACycle) {
auto test_graph = []() {
return GDef({NDef("e", kIdentity, {"b", "c", "d"}),
NDef("b", kIdentity, {"a"}), NDef("a", "Const", {}),
NDef("d", kIdentity, {"a"}), NDef("c", kIdentity, {"a"})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphNodePrecedences(
graph_view,
{{"a", "b"}, {"a", "c"}, {"a", "d"}, {"b", "e"}, {"c", "e"}, {"d", "e"}});
}
TEST_F(TopologicalSortTest, ExtraDependencies) {
auto test_graph = []() {
return GDef({NDef("c", kIdentity, {"f"}), NDef("a", kIdentity, {"f", "e"}),
NDef("b", kIdentity, {"e", "d"}), NDef("d", kIdentity, {"c"}),
NDef("f", kIdentity, {}), NDef("e", kIdentity, {})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
auto* e_node = graph_view.GetNode("e");
ASSERT_NE(e_node, nullptr);
auto* f_node = graph_view.GetNode("f");
ASSERT_NE(f_node, nullptr);
TF_EXPECT_OK(graph_view.SortTopologically(false,
{{e_node, f_node}}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphNodePrecedences(graph_view, {{"f", "a"},
{"f", "c"},
{"e", "a"},
{"e", "b"},
{"c", "d"},
{"d", "b"},
{"e", "f"}});
}
TEST_F(TopologicalSortTest, PushVisitedNodes) {
auto test_graph = []() {
return GDef({NDef("d", kIdentity, {"c"}), NDef("c", kIdentity, {"b", "a"}),
NDef("b", kIdentity, {"a"}), NDef("a", kIdentity, {})},
{});
};
GraphDef graph = test_graph();
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
CompareGraphViewWithGraph(&graph_view, test_graph());
CompareGraphNodePrecedences(graph_view,
{{"a", "b"}, {"a", "c"}, {"b", "c"}, {"c", "d"}});
}
#define RUN_NUM_NODE_NUM_EDGE_BENCHMARK(name) \
BENCHMARK(name) \
->ArgPair(10, 2) \
->ArgPair(100, 2) \
->ArgPair(1000, 2) \
->ArgPair(10000, 2) \
->ArgPair(25000, 2) \
->ArgPair(50000, 2) \
->ArgPair(100000, 2) \
->ArgPair(10, 4) \
->ArgPair(100, 4) \
->ArgPair(1000, 4) \
->ArgPair(10000, 4) \
->ArgPair(25000, 4) \
->ArgPair(50000, 4) \
->ArgPair(100000, 4) \
->ArgPair(10, 8) \
->ArgPair(100, 8) \
->ArgPair(1000, 8) \
->ArgPair(10000, 8) \
->ArgPair(25000, 8) \
->ArgPair(50000, 8) \
->ArgPair(100000, 8) \
->ArgPair(10, 16) \
->ArgPair(100, 16) \
->ArgPair(1000, 16) \
->ArgPair(10000, 16) \
->ArgPair(25000, 16) \
->ArgPair(50000, 16) \
->ArgPair(100000, 16);
template <typename GraphViewT>
void BM_GraphViewTConstruction(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
GraphDef graph_def = test::CreateGraphDef(num_nodes, num_edges_per_node);
for (auto i : state) {
Status s;
GraphViewT graph_view(&graph_def, &s);
}
}
void BM_GraphViewConstruction(::testing::benchmark::State& state) {
BM_GraphViewTConstruction<GraphView>(state);
}
void BM_MutableGraphViewConstruction(::testing::benchmark::State& state) {
BM_GraphViewTConstruction<MutableGraphView>(state);
}
void BM_MutableGraphViewClearAttrs(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
const int num_edges_per_node = state.range(1);
GraphDef graph_def = test::CreateGraphDef(num_nodes, num_edges_per_node);
Status s;
MutableGraphView graph_view(&graph_def, &s);
for (auto i : state) {
utils::Mutation* mutation = graph_view.GetMutationBuilder();
for (int j = 0; j < num_nodes; ++j) {
mutation->RemoveNodeAttr(graph_view.GetNode(j), "_some_random_attr");
}
s = mutation->Apply();
}
}
RUN_NUM_NODE_NUM_EDGE_BENCHMARK(BM_GraphViewConstruction);
RUN_NUM_NODE_NUM_EDGE_BENCHMARK(BM_MutableGraphViewConstruction);
RUN_NUM_NODE_NUM_EDGE_BENCHMARK(BM_MutableGraphViewClearAttrs);
#define RUN_NUM_NODE_BENCHMARK(name) \
BENCHMARK(name) \
->Arg(10) \
->Arg(100) \
->Arg(1000) \
->Arg(10000) \
->Arg(25000) \
->Arg(50000) \
->Arg(100000);
template <typename GraphViewT>
void BM_GraphViewTConstructionWithControlDependencies(
::testing::benchmark::State& state) {
const int num_fanins_fanouts = state.range(0);
GraphDef graph_def =
test::CreateFaninFanoutNodeGraph(num_fanins_fanouts, num_fanins_fanouts,
num_fanins_fanouts, num_fanins_fanouts,
true);
for (auto i : state) {
Status s;
GraphViewT graph_view(&graph_def, &s);
}
}
void BM_GraphViewConstructionWithControlDependencies(
::testing::benchmark::State& state) {
BM_GraphViewTConstructionWithControlDependencies<GraphView>(state);
}
void BM_MutableGraphViewConstructionWithControlDependencies(
::testing::benchmark::State& state) {
BM_GraphViewTConstructionWithControlDependencies<MutableGraphView>(state);
}
RUN_NUM_NODE_BENCHMARK(BM_GraphViewConstructionWithControlDependencies);
RUN_NUM_NODE_BENCHMARK(BM_MutableGraphViewConstructionWithControlDependencies);
template <typename GraphViewT>
void BM_GraphViewTGetNode(::testing::benchmark::State& state) {
const int num_nodes = state.range(0);
GraphDef graph_def =
test::CreateGraphDef(num_nodes, 16);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
graph_view.GetNode("out");
}
}
void BM_GraphViewGetNode(::testing::benchmark::State& state) {
BM_GraphViewTGetNode<GraphView>(state);
}
void BM_MutableGraphViewGetNode(::testing::benchmark::State& state) {
BM_GraphViewTGetNode<MutableGraphView>(state);
}
RUN_NUM_NODE_BENCHMARK(BM_GraphViewGetNode);
RUN_NUM_NODE_BENCHMARK(BM_MutableGraphViewGetNode);
#define RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(name) \
BENCHMARK(name) \
->ArgPair(10, 10) \
->ArgPair(10, 100) \
->ArgPair(10, 1000) \
->ArgPair(10, 10000) \
->ArgPair(10, 100000) \
->ArgPair(100, 10) \
->ArgPair(100, 100) \
->ArgPair(100, 1000) \
->ArgPair(100, 10000) \
->ArgPair(100, 100000) \
->ArgPair(1000, 10) \
->ArgPair(1000, 100) \
->ArgPair(1000, 1000) \
->ArgPair(1000, 10000) \
->ArgPair(1000, 100000) \
->ArgPair(10000, 10) \
->ArgPair(10000, 100) \
->ArgPair(10000, 1000) \
->ArgPair(10000, 10000) \
->ArgPair(10000, 100000) \
->ArgPair(100000, 10) \
->ArgPair(100000, 100) \
->ArgPair(100000, 1000) \
->ArgPair(100000, 10000) \
->ArgPair(100000, 100000);
template <typename GraphViewT>
void BM_GraphViewTGetRegularFanin(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetRegularFanin(0);
}
}
void BM_GraphViewGetRegularFanin(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanin<GraphView>(state);
}
void BM_MutableGraphViewGetRegularFanin(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanin<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetRegularFanin);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetRegularFanin);
template <typename GraphViewT>
void BM_GraphViewTGetRegularFanout(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetRegularFanout(0);
}
}
void BM_GraphViewGetRegularFanout(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanout<GraphView>(state);
}
void BM_MutableGraphViewGetRegularFanout(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanout<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetRegularFanout);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetRegularFanout);
template <typename GraphViewT>
void BM_GraphViewTGetRegularFanins(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetRegularFanins();
}
}
void BM_GraphViewGetRegularFanins(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanins<GraphView>(state);
}
void BM_MutableGraphViewGetRegularFanins(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanins<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetRegularFanins);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetRegularFanins);
template <typename GraphViewT>
void BM_GraphViewTGetRegularFanouts(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetRegularFanouts();
}
}
void BM_GraphViewGetRegularFanouts(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanouts<GraphView>(state);
}
void BM_MutableGraphViewGetRegularFanouts(::testing::benchmark::State& state) {
BM_GraphViewTGetRegularFanouts<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetRegularFanouts);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetRegularFanouts);
template <typename GraphViewT>
void BM_GraphViewTGetControllingFanins(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetControllingFanins();
}
}
void BM_GraphViewGetControllingFanins(::testing::benchmark::State& state) {
BM_GraphViewTGetControllingFanins<GraphView>(state);
}
void BM_MutableGraphViewGetControllingFanins(
::testing::benchmark::State& state) {
BM_GraphViewTGetControllingFanins<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetControllingFanins);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetControllingFanins);
template <typename GraphViewT>
void BM_GraphViewTGetControlledFanouts(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
for (auto i : state) {
auto* node = graph_view.GetNode("node");
node->GetControlledFanouts();
}
}
void BM_GraphViewGetControlledFanouts(::testing::benchmark::State& state) {
BM_GraphViewTGetControlledFanouts<GraphView>(state);
}
void BM_MutableGraphViewGetControlledFanouts(
::testing::benchmark::State& state) {
BM_GraphViewTGetControlledFanouts<MutableGraphView>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewGetControlledFanouts);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewGetControlledFanouts);
template <typename GraphViewT, bool IsLast>
inline void BM_GraphViewTHasRegularFanin(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, 0,
0, false);
Status s;
GraphViewT graph_view(&graph_def, &s);
const int index = IsLast ? num_fanouts - 1 : 0;
auto* node = graph_view.GetNode(absl::StrFormat("out%05d", index));
auto* fanin = graph_view.GetNode("node");
for (auto i : state) {
node->HasFanin({&graph_view, fanin->node_index(), 0});
}
}
void BM_GraphViewHasRegularFaninFirst(::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanin<GraphView, false>(state);
}
void BM_GraphViewHasRegularFaninLast(::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanin<GraphView, true>(state);
}
void BM_MutableGraphViewHasRegularFaninFirst(
::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanin<MutableGraphView, false>(state);
}
void BM_MutableGraphViewHasRegularFaninLast(
::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanin<MutableGraphView, true>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasRegularFaninFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasRegularFaninLast);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasRegularFaninFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasRegularFaninLast);
template <typename GraphViewT, bool IsLast>
inline void BM_GraphViewTHasControllingFanin(
::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
true);
Status s;
GraphViewT graph_view(&graph_def, &s);
const int index = IsLast ? num_fanouts - 1 : 0;
auto* node = graph_view.GetNode(absl::StrFormat("control_out%05d", index));
auto* fanin = graph_view.GetNode("node");
for (auto i : state) {
node->HasFanin({&graph_view, fanin->node_index(), Graph::kControlSlot});
}
}
void BM_GraphViewHasControllingFaninFirst(::testing::benchmark::State& state) {
BM_GraphViewTHasControllingFanin<GraphView, false>(state);
}
void BM_GraphViewHasControllingFaninLast(::testing::benchmark::State& state) {
BM_GraphViewTHasControllingFanin<GraphView, true>(state);
}
void BM_MutableGraphViewHasControllingFaninFirst(
::testing::benchmark::State& state) {
BM_GraphViewTHasControllingFanin<MutableGraphView, false>(state);
}
void BM_MutableGraphViewHasControllingFaninLast(
::testing::benchmark::State& state) {
BM_GraphViewTHasControllingFanin<MutableGraphView, true>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasControllingFaninFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasControllingFaninLast);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasControllingFaninFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasControllingFaninLast);
template <typename GraphViewT, bool IsLast>
inline void BM_GraphViewTHasRegularFanout(::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, 0,
0, false);
Status s;
GraphViewT graph_view(&graph_def, &s);
const int index = IsLast ? num_fanins - 1 : 0;
auto* node = graph_view.GetNode(absl::StrFormat("in%05d", index));
auto* fanout = graph_view.GetNode("node");
for (auto i : state) {
node->HasFanout({&graph_view, fanout->node_index(), index});
}
}
void BM_GraphViewHasRegularFanoutFirst(::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanout<GraphView, false>(state);
}
void BM_GraphViewHasRegularFanoutLast(::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanout<GraphView, true>(state);
}
void BM_MutableGraphViewHasRegularFanoutFirst(
::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanout<MutableGraphView, false>(state);
}
void BM_MutableGraphViewHasRegularFanoutLast(
::testing::benchmark::State& state) {
BM_GraphViewTHasRegularFanout<MutableGraphView, true>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasRegularFanoutFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasRegularFanoutLast);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasRegularFanoutFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasRegularFanoutLast);
template <typename GraphViewT, bool IsLast>
inline void BM_GraphViewTHasControlledFanout(
::testing::benchmark::State& state) {
const int num_fanins = state.range(0);
const int num_fanouts = state.range(1);
GraphDef graph_def = test::CreateFaninFanoutNodeGraph(
num_fanins, num_fanouts, num_fanins, num_fanouts,
false);
Status s;
GraphViewT graph_view(&graph_def, &s);
const int index = IsLast ? num_fanins - 1 : 0;
auto* node = graph_view.GetNode(absl::StrFormat("control_in%05d", index));
auto* fanout = graph_view.GetNode("node");
for (auto i : state) {
node->HasFanout({&graph_view, fanout->node_index(), Graph::kControlSlot});
}
}
void BM_GraphViewHasControlledFanoutFirst(::testing::benchmark::State& state) {
BM_GraphViewTHasControlledFanout<GraphView, false>(state);
}
void BM_GraphViewHasControlledFanoutLast(::testing::benchmark::State& state) {
BM_GraphViewTHasControlledFanout<GraphView, true>(state);
}
void BM_MutableGraphViewHasControlledFanoutFirst(
::testing::benchmark::State& state) {
BM_GraphViewTHasControlledFanout<MutableGraphView, false>(state);
}
void BM_MutableGraphViewHasControlledFanoutLast(
::testing::benchmark::State& state) {
BM_GraphViewTHasControlledFanout<MutableGraphView, true>(state);
}
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasControlledFanoutFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_GraphViewHasControlledFanoutLast);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasControlledFanoutFirst);
RUN_NUM_FANIN_NUM_FANOUT_BENCHMARK(BM_MutableGraphViewHasControlledFanoutLast);
void BM_SortTopologically(::testing::benchmark::State& state) {
const int size = state.range(0);
GraphDef graph = test::CreateRandomGraph(size);
Status status;
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
for (auto i : state) {
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
}
}
RUN_NUM_NODE_BENCHMARK(BM_SortTopologically);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4bef952-252d-42c3-bf1d-7f04f232b2ad | cpp | tensorflow/tensorflow | lower_function_call_op | tensorflow/core/common_runtime/lower_function_call_op.cc | tensorflow/core/common_runtime/lower_function_call_op_test.cc | #include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_function_call_inline_policy.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
Status RewriteFunctionCallNode(Node* n, Graph* g,
const FunctionLibraryDefinition& flib_def,
bool keep_caller_fetchable) {
VLOG(2) << "Lower function call node: " << SummarizeNode(*n);
InlineFunctionBodyOptions inline_options;
inline_options.keep_caller_node = keep_caller_fetchable
? KeepCallerNode::kFetchable
: KeepCallerNode::kTargetable;
FunctionCallInlinePolicy policy = GetFunctionCallInlinePolicy(n);
if (policy == FunctionCallInlinePolicy::kMultiDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kControlOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::MultiDevice();
} else if (policy == FunctionCallInlinePolicy::kSingleDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kDataOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::SingleDevice();
} else {
return errors::InvalidArgument("Unsupported function inlining policy");
}
core::RefCountPtr<FunctionRecord> fdef;
if (n->IsPartitionedCall()) {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "f", &func));
fdef = flib_def.FindRecord(func.name());
} else if (n->type_string() == FunctionLibraryDefinition::kGradientOp) {
VLOG(2) << "Skip SymbolicGradient lowering";
return absl::OkStatus();
} else {
fdef = flib_def.FindRecord(n->type_string());
}
if (fdef == nullptr) {
return errors::Internal("Can't find a function: node=", SummarizeNode(*n));
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), n->attrs(), &flib_def, &fbody));
if (flags::Global().enable_function_pruning_before_inlining.value()) {
VLOG(2) << "Pruning enabled before inlining";
PruneFunctionBody(
fbody->record->fdef(), fbody->graph,
absl::Span<Node*>(fbody->arg_nodes.data(), fbody->arg_nodes.size()));
} else {
VLOG(2) << "Pruning disabled before inlining";
}
Status can_inline_function_call =
ValidateInlining(n, fbody.get(), inline_options);
if (can_inline_function_call.ok()) {
TF_RETURN_IF_ERROR(
InlineFunctionBody(flib_def, g, n, fbody.get(), inline_options));
} else {
VLOG(2) << "Failed to inline function call node: "
<< can_inline_function_call.message();
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
AttrValue FuncAttr(const string& name, const DataType type) {
AttrValue attr;
attr.mutable_func()->set_name(name);
(*attr.mutable_func()->mutable_attr())["T"].set_type(type);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerFunctionCallTest, InlineFunctionCall) {
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) =
FDH::Create("AddAndMul", {"i: int32"}, {"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
}
TEST(LowerFunctionCallTest, InlineFunctionCallAfterPruning) {
flags::Global().enable_function_pruning_before_inlining.reset(true);
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = FDH::Create(
"AddAndMul", {"i: int32", "j: int32", "k: int32", "r: resource"},
{"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"div"}, "FloorDiv", {"i", "i"}, {{"T", DT_INT32}}},
{{"gather"},
"ResourceGather",
{"r", "i"},
{{"Tindices", DT_INT32}, {"dtype", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Placeholder(root.WithOpName("X"), DT_INT32);
auto y = ops::Placeholder(root.WithOpName("Y"), DT_INT32);
auto z = ops::Placeholder(root.WithOpName("Z"), DT_INT32);
auto r = ops::Placeholder(root.WithOpName("R"), DT_RESOURCE);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(x.node()), NodeBuilder::NodeOut(y.node()),
NodeBuilder::NodeOut(z.node()), NodeBuilder::NodeOut(r.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
int floor_div_count = 0;
int resource_gather_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
if (op->type_string() == "FloorDiv") floor_div_count++;
if (op->type_string() == "ResourceGather") resource_gather_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ASSERT_EQ(floor_div_count, 0);
ASSERT_EQ(resource_gather_count, 0);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(x.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
flags::Global().enable_function_pruning_before_inlining.reset(false);
}
TEST(LowerFunctionCallTest, DoNotInlineTpuOrXlaFunctions) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef tpu_func = test::function::XTimesTwo();
tpu_func.mutable_signature()->set_name("TpuXTimesTwo");
(*tpu_func.mutable_attr())["_tpu_replicate"].set_b(true);
FunctionDef xla_func = test::function::XTimesTwo();
xla_func.mutable_signature()->set_name("XlaXTimesTwo");
(*xla_func.mutable_attr())["_xla_compile_id"].set_s("cluster_0");
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* tpu_call;
TF_ASSERT_OK(NodeBuilder("B", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_tpu_replicate", "cluster")
.Finalize(root.graph(), &tpu_call));
Node* xla_call;
TF_ASSERT_OK(NodeBuilder("C", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_xla_compile_id", "cluster")
.Finalize(root.graph(), &xla_call));
TF_ASSERT_OK(root.DoShapeInference(tpu_call));
TF_ASSERT_OK(root.DoShapeInference(xla_call));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
}
ASSERT_EQ(partitioned_call_count, 2);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(
session.Run(feeds, {Output(tpu_call), Output(xla_call)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_function_call_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_function_call_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bff43165-4287-4ce8-91f3-effe03e6f7a8 | cpp | tensorflow/tensorflow | graph_constructor | tensorflow/core/common_runtime/graph_constructor.cc | tensorflow/core/common_runtime/graph_constructor_test.cc | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
static constexpr const bool kDoNotCheckDuplicates = true;
inline bool IsMerge(const NodeDef& node_def) {
return node_def.op() == "Merge" || node_def.op() == "RefMerge" ||
node_def.op() == "_XlaMerge";
}
inline bool IsNextIteration(const NodeDef& node_def) {
return node_def.op() == "NextIteration" ||
node_def.op() == "RefNextIteration";
}
bool IsValidNodeName(StringPiece s, bool allow_internal_ops) {
using ::tensorflow::strings::Scanner;
Scanner scanner(s);
scanner
.One(allow_internal_ops ? Scanner::LETTER_DIGIT_DOT_UNDERSCORE
: Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
while (true) {
if (!scanner.GetResult())
return false;
if (scanner.empty())
return true;
scanner.One(Scanner::RANGLE)
.One(Scanner::LETTER_DIGIT_DOT)
.Any(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE);
}
}
class GraphConstructor {
public:
struct Options {
Options(const GraphConstructorOptions& in)
: allow_internal_ops(in.allow_internal_ops),
expect_device_spec(in.expect_device_spec),
propagate_device_spec(false),
uniquify_names(false),
uniquify_prefix(false),
skip_mapped_nodes(false),
importing(false),
validate_nodes(in.validate_nodes),
validate_colocation_constraints(false),
add_default_attributes(in.add_default_attributes) {}
Options(const ImportGraphDefOptions& in)
: allow_internal_ops(false),
expect_device_spec(false),
propagate_device_spec(in.propagate_device_spec),
prefix(in.prefix.empty() || absl::EndsWith(in.prefix, "/")
? in.prefix
: in.prefix + "/"),
uniquify_names(in.uniquify_names),
uniquify_prefix(in.uniquify_prefix),
input_map(in.input_map.begin(), in.input_map.end()),
skip_mapped_nodes(in.skip_mapped_nodes),
control_dependencies(in.control_dependencies),
return_tensors(in.return_tensors.begin(), in.return_tensors.end()),
return_nodes(in.return_nodes),
importing(true),
validate_nodes(true),
validate_colocation_constraints(in.validate_colocation_constraints),
validate_shape(in.validate_shape),
default_device(in.default_device) {}
bool allow_internal_ops;
bool expect_device_spec;
bool propagate_device_spec;
string prefix;
bool uniquify_names;
bool uniquify_prefix;
std::map<TensorId, TensorId> input_map;
bool skip_mapped_nodes;
std::vector<string> control_dependencies;
std::vector<TensorId> return_tensors;
std::vector<string> return_nodes;
bool importing;
bool validate_nodes;
bool validate_colocation_constraints;
bool validate_shape = true;
bool add_default_attributes = true;
string default_device;
};
typedef absl::Span<const NodeDef* const> NodeDefSlice;
static Status Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
static Status Construct(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys);
protected:
GraphConstructor(const Options& opts, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: opts_(opts),
g_(g),
original_versions_(g->versions()),
prefix_(opts.prefix),
refiner_(refiner),
return_tensors_(return_tensors),
return_nodes_(return_nodes),
missing_unused_input_map_keys_(missing_unused_input_map_keys) {}
virtual ~GraphConstructor() {}
Status TryImport() {
TF_RETURN_IF_ERROR(EnsureNoNameCollisions());
TF_RETURN_IF_ERROR(ValidateInputMapAndControlDependencies());
TF_RETURN_IF_ERROR(BuildNodeIndex());
TF_RETURN_IF_ERROR(InitFromEdges());
TF_RETURN_IF_ERROR(Convert());
TF_RETURN_IF_ERROR(AddBackEdges());
TF_RETURN_IF_ERROR(UpdateVersionDef());
TF_RETURN_IF_ERROR(PopulateReturnTensors());
TF_RETURN_IF_ERROR(PopulateReturnNodes());
TF_RETURN_IF_ERROR(PopulateMissingUnusedInputMapKeys());
UpdateUniquifiedColocationNames();
FixupSourceAndSinkEdges(g_);
return absl::OkStatus();
}
private:
Status EnsureNoNameCollisions();
Status ValidateInputMapAndControlDependencies();
Status BuildNodeIndex();
Status InitFromEdges();
Status Convert();
Status AddBackEdges();
Status UpdateVersionDef();
Status PopulateReturnTensors();
Status PopulateReturnNodes();
Status PopulateMissingUnusedInputMapKeys();
FunctionDefLibraryStackTraces CreateStackTracesForFunctionDefLibrary(
const FunctionDefLibrary& library) const;
void Undo();
void PrintCycles();
void DFS(int cur_node, std::vector<int>* cur_branch,
std::vector<bool>* is_on_cur_branch,
absl::flat_hash_set<int>* unvisited,
const std::vector<absl::string_view>& node_names);
Status IsNodeFullyMapped(const NodeDef& node_def, bool* is_node_mapped);
Status ValidateColocationConstraints(const NodeDef& node_def);
Status MakeNode(NodeDef&& node_def, Node** node);
Status MakeEdge(Node* src, int output_index, Node* dst, int input_index);
Status ValidateShape(Node* node);
Status ModifyNodeDefForImport(NodeDef* node_def);
void RemapNodeDefInputs(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddControlDependencies(NodeDef* node_def,
std::vector<bool>* input_already_exists);
void AddPrefixToNodeDef(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UniquifyNames(const std::vector<bool>& input_already_exists,
NodeDef* node_def);
void UpdateUniquifiedColocationNames();
bool NameExistsInGraph(StringPiece name);
bool NameExistsInGraphDef(StringPiece name);
string FindUniqueName(StringPiece original_name);
void UpdatePendingCountAndReady(int processed, bool is_next_iteration);
virtual size_t node_def_count() const = 0;
virtual const NodeDef& get_node_def(int i) const = 0;
virtual NodeDef consume_node_def(int i) = 0;
virtual const VersionDef* versions() const = 0;
virtual std::optional<FunctionDefLibrary> consume_library() = 0;
virtual const GraphDebugInfo* debug_info() const = 0;
const Options opts_;
Graph* g_;
const VersionDef original_versions_;
string prefix_;
StackTracesMap traces_;
ShapeRefiner* refiner_;
std::vector<std::pair<Node*, int>>* return_tensors_;
std::vector<Node*>* return_nodes_;
std::vector<SafeTensorId>* missing_unused_input_map_keys_;
std::set<TensorId> used_input_map_keys_;
absl::flat_hash_set<int> merge_node_indices_;
struct NodeInfo {
explicit NodeInfo(int i) : gdef_index(i), node(nullptr) {}
NodeInfo() : NodeInfo(-1) {}
int gdef_index;
Node* node;
};
absl::flat_hash_map<std::string, NodeInfo> gdef_nodes_;
absl::flat_hash_set<StringPiece> gdef_prefixes_;
absl::flat_hash_map<StringPiece, Node*> existing_nodes_;
absl::flat_hash_set<StringPiece> existing_prefixes_;
gtl::FlatMap<string, string> uniquified_names_;
std::set<int> ready_;
std::vector<int> pending_count_;
std::vector<absl::InlinedVector<int, 4UL>> outputs_;
struct InputInfo {
explicit InputInfo(const string& node_name, Node* n, int i)
: name(node_name), node(n), index(i) {}
string name;
Node* node;
int index;
static bool IsControlInput(const InputInfo& input) {
return input.index == Graph::kControlSlot;
}
static int CompareName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name < rhs.name;
}
static bool IsSameName(const InputInfo& lhs, const InputInfo& rhs) {
return lhs.name == rhs.name;
}
};
struct EdgeInfo {
explicit EdgeInfo(const string& name, int i1, Node* n, int i2)
: src_name(name), src_index(i1), dst_node(n), dst_index(i2) {}
string src_name;
int src_index;
Node* dst_node;
int dst_index;
};
std::vector<EdgeInfo> back_edges_;
GraphConstructor(const GraphConstructor&) = delete;
void operator=(const GraphConstructor&) = delete;
};
class NodeDefCopyingGraphConstructor : public GraphConstructor {
public:
NodeDefCopyingGraphConstructor(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
node_defs_(node_defs),
versions_(versions),
library_(library),
debug_info_(debug_info) {}
private:
size_t node_def_count() const override { return node_defs_.size(); }
const NodeDef& get_node_def(int i) const override { return *node_defs_[i]; }
NodeDef consume_node_def(int i) override { return *node_defs_[i]; }
const VersionDef* versions() const override { return versions_; }
std::optional<FunctionDefLibrary> consume_library() override {
if (library_ == nullptr) {
return std::nullopt;
} else {
return *library_;
}
}
const GraphDebugInfo* debug_info() const override { return debug_info_; }
const NodeDefSlice node_defs_;
const VersionDef* const versions_;
const FunctionDefLibrary* const library_;
const GraphDebugInfo* const debug_info_;
};
class NodeDefMovingGraphConstructor : public GraphConstructor {
public:
NodeDefMovingGraphConstructor(
const Options& opts, GraphDef&& graph_def, Graph* g,
ShapeRefiner* refiner, std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys)
: GraphConstructor(opts, g, refiner, return_tensors, return_nodes,
missing_unused_input_map_keys),
graph_def_(std::move(graph_def)),
is_consumed_(graph_def_.node_size(), false) {}
private:
size_t node_def_count() const override { return graph_def_.node().size(); }
const NodeDef& get_node_def(int i) const override {
CHECK(!is_consumed_[i])
<< "NodeDef " << i << " accessed after it was consumed.";
return graph_def_.node(i);
}
NodeDef consume_node_def(int i) override {
CHECK(!is_consumed_[i]) << "NodeDef " << i << " consumed twice.";
is_consumed_[i] = true;
return std::move(*graph_def_.mutable_node(i));
}
const VersionDef* versions() const override { return &graph_def_.versions(); }
std::optional<FunctionDefLibrary> consume_library() override {
return std::move(*graph_def_.mutable_library());
}
const GraphDebugInfo* debug_info() const override {
return &graph_def_.debug_info();
}
GraphDef graph_def_;
std::vector<bool> is_consumed_;
};
bool ForwardCompatibilityWindowPassed(const VersionDef& versions) {
return (versions.producer() - TF_GRAPH_DEF_VERSION) > 21;
}
Status MaybeAppendVersionWarning(const VersionDef* versions,
const Status& import_status) {
if (versions && ForwardCompatibilityWindowPassed(*versions)) {
return Status(
import_status.code(),
absl::StrCat(
"Converting GraphDef to Graph has failed with an error: '",
import_status.message(),
"' The binary trying to import the GraphDef was built when "
"GraphDef version was ",
TF_GRAPH_DEF_VERSION,
". The GraphDef was produced by a binary built when GraphDef "
"version was ",
versions->producer(),
". The difference between these versions is larger than "
"TensorFlow's forward compatibility guarantee, and might be the "
"root cause for failing to import the GraphDef."));
}
return import_status;
}
Status GraphConstructor::Construct(
const Options& opts, NodeDefSlice node_defs, const VersionDef* versions,
const FunctionDefLibrary* library, const GraphDebugInfo* debug_info,
Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
if (versions) {
TF_RETURN_IF_ERROR(CheckVersions(*versions, TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
}
NodeDefCopyingGraphConstructor c(opts, node_defs, versions, library,
debug_info, g, refiner, return_tensors,
return_nodes, missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(versions, s);
}
return s;
}
Status GraphConstructor::Construct(
const Options& opts, GraphDef&& graph_def, Graph* g, ShapeRefiner* refiner,
std::vector<std::pair<Node*, int>>* return_tensors,
std::vector<Node*>* return_nodes,
std::vector<SafeTensorId>* missing_unused_input_map_keys) {
TF_RETURN_IF_ERROR(CheckVersions(graph_def.versions(), TF_GRAPH_DEF_VERSION,
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
"GraphDef", "graph"));
VersionDef version_def = graph_def.versions();
NodeDefMovingGraphConstructor c(opts, std::move(graph_def), g, refiner,
return_tensors, return_nodes,
missing_unused_input_map_keys);
Status s = c.TryImport();
if (!s.ok()) {
c.Undo();
s = MaybeAppendVersionWarning(&version_def, s);
}
return s;
}
void GraphConstructor::UpdatePendingCountAndReady(int processed,
bool is_next_iteration) {
for (size_t i = 0; i < outputs_[processed].size(); ++i) {
const int output = outputs_[processed][i];
bool is_next_iteration_to_merge_edge =
is_next_iteration && merge_node_indices_.count(output) == 1;
if (!is_next_iteration_to_merge_edge) {
int* current_pending_count = &pending_count_[output];
CHECK_GT(*current_pending_count, 0);
(*current_pending_count)--;
if (*current_pending_count == 0) {
ready_.insert(output);
}
}
}
}
bool NodeNameInValues(const std::map<TensorId, TensorId>& input_map,
const StringPiece& node_name) {
for (auto iter = input_map.begin(); iter != input_map.end(); ++iter) {
if (iter->second.first == node_name) return true;
}
return false;
}
bool NodeNameInValues(const std::vector<string>& control_dependencies,
const StringPiece& node_name) {
return std::find(control_dependencies.begin(), control_dependencies.end(),
node_name) != control_dependencies.end();
}
void AddPrefixes(StringPiece node_name,
absl::flat_hash_set<StringPiece>* prefixes) {
size_t idx = -1;
while ((idx = node_name.find('/', idx + 1)) != StringPiece::npos) {
prefixes->insert(node_name.substr(0, idx));
}
}
Status GraphConstructor::EnsureNoNameCollisions() {
existing_nodes_.reserve(g_->num_nodes());
for (Node* n : g_->nodes()) {
bool already_exists = !existing_nodes_.insert({n->name(), n}).second;
if (already_exists) {
if (NodeNameInValues(opts_.input_map, n->name())) {
return errors::InvalidArgument(
"cannot resolve input_map because multiple nodes exist with name '",
n->name(), "'");
}
if (NodeNameInValues(opts_.control_dependencies, n->name())) {
return errors::InvalidArgument(
"cannot resolve control_dependencies because multiple nodes exist "
"with name '",
n->name(), "'");
}
}
AddPrefixes(n->name(), &existing_prefixes_);
}
if (prefix_.empty() && opts_.importing && !opts_.uniquify_names) {
for (size_t i = 0; i < node_def_count(); ++i) {
const string& name = get_node_def(i).name();
if (NameExistsInGraph(name)) {
return errors::InvalidArgument("Node name '", name,
"' already exists in the Graph");
}
}
} else if (!prefix_.empty()) {
StringPiece prefix_no_slash(prefix_);
prefix_no_slash.remove_suffix(1);
if (!IsValidNodeName(prefix_no_slash, false)) {
return errors::InvalidArgument("Imported node name prefix '", prefix_,
"' would lead to invalid node names");
}
if (NameExistsInGraph(prefix_no_slash) && opts_.uniquify_prefix) {
prefix_ = strings::StrCat(FindUniqueName(prefix_no_slash), "/");
}
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateInputMapAndControlDependencies() {
for (const auto& mapping : opts_.input_map) {
TensorId src = mapping.first;
TensorId dst = mapping.second;
if (existing_nodes_.count(dst.first) == 0) {
return errors::InvalidArgument(
"node '", dst.first, "' in input_map does not exist in graph ",
"(input_map entry: ", src.ToString(), "->", dst.ToString(), ")");
}
if ((src.second == Graph::kControlSlot) !=
(dst.second == Graph::kControlSlot)) {
return errors::InvalidArgument("input_map entry ", src.ToString(), "->",
dst.ToString(), " between ",
"control edge and non-control edge");
}
}
for (const string& node : opts_.control_dependencies) {
if (existing_nodes_.count(node) == 0) {
return errors::InvalidArgument(
"node '", node,
"' in control_dependencies does not exist in "
"graph");
}
}
return absl::OkStatus();
}
Status GraphConstructor::BuildNodeIndex() {
for (int n = 0; n < node_def_count(); ++n) {
const NodeDef& node_def = get_node_def(n);
if (!IsValidNodeName(node_def.name(), opts_.allow_internal_ops)) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"': Node name contains invalid characters");
}
if (!gdef_nodes_.insert(std::make_pair(node_def.name(), NodeInfo(n)))
.second) {
return errors::InvalidArgument("Node '", node_def.name(),
"' is not unique");
}
if (node_def.op().empty()) {
return errors::InvalidArgument("Node '", node_def.name(),
"' does not specify an operation");
}
if (opts_.expect_device_spec && node_def.device().empty()) {
return errors::InvalidArgument("Node '", node_def.name(),
"' is missing a device specification");
}
if (IsMerge(node_def)) {
merge_node_indices_.insert(n);
}
bool in_control_dependence = false;
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name = node_def.input(i);
if (!input_name.empty() && absl::StartsWith(input_name, "^")) {
in_control_dependence = true;
} else if (in_control_dependence) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"': Control dependencies must come after regular dependencies");
}
}
AddPrefixes(node_def.name(), &gdef_prefixes_);
}
return absl::OkStatus();
}
Status GraphConstructor::InitFromEdges() {
const int num_nodes = node_def_count();
pending_count_.reserve(num_nodes);
outputs_.resize(num_nodes);
gtl::FlatSet<string> next_iteration_nodes;
for (int n = 0; n < node_def_count(); ++n) {
const NodeDef& node_def = get_node_def(n);
if (IsNextIteration(node_def)) {
next_iteration_nodes.insert(node_def.name());
}
}
for (int n = 0; n < num_nodes; ++n) {
const NodeDef& node_def = get_node_def(n);
int pending_count = node_def.input_size();
if (IsMerge(node_def)) {
int32_t num_control_edges = 0;
bool has_loop_back_edge = false;
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name(node_def.input(i));
if (absl::StartsWith(input_name, "^")) {
num_control_edges++;
} else {
TensorId id(ParseTensorName(input_name));
if (next_iteration_nodes.find(string(id.first)) !=
next_iteration_nodes.end()) {
has_loop_back_edge = true;
}
}
}
if (has_loop_back_edge) {
pending_count = num_control_edges + 1;
}
}
for (int i = 0; i < node_def.input_size(); ++i) {
StringPiece input_name = node_def.input(i);
TensorId id(ParseTensorName(input_name));
if (opts_.input_map.count(id) == 0) {
auto iter = gdef_nodes_.find(id.first);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Node '", node_def.name(),
"': Unknown input node '",
node_def.input(i), "'");
}
outputs_[iter->second.gdef_index].push_back(n);
} else {
--pending_count;
DCHECK_GE(pending_count, 0);
}
}
if (pending_count == 0) {
ready_.insert(n);
}
pending_count_.push_back(pending_count);
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateColocationConstraints(
const NodeDef& node_def) {
if (!opts_.validate_colocation_constraints || !opts_.importing)
return absl::OkStatus();
const auto iter = node_def.attr().find(kColocationAttrName);
if (iter == node_def.attr().end()) return absl::OkStatus();
for (const string& c : iter->second.list().s()) {
StringPiece s(c);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix) &&
gdef_nodes_.find(s) == gdef_nodes_.end()) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"' expects to be colocated with unknown node '", s, "'");
}
}
return absl::OkStatus();
}
Status GraphConstructor::MakeNode(NodeDef&& node_def, Node** node) {
Status status;
*node = g_->AddNode(std::move(node_def), &status);
if (!status.ok()) return status;
if (opts_.expect_device_spec ||
(opts_.propagate_device_spec && !(*node)->def().device().empty())) {
(*node)->set_assigned_device_name((*node)->def().device());
}
return absl::OkStatus();
}
Status GraphConstructor::ValidateShape(Node* node) {
if (!opts_.importing || !opts_.validate_shape) return absl::OkStatus();
TF_RETURN_IF_ERROR(refiner_->AddNode(node));
std::vector<const TensorShapeProto*> shape_attrs;
const char* kAttrName = "_output_shapes";
if (!TryGetNodeAttr(node->attrs(), kAttrName, &shape_attrs)) {
return absl::OkStatus();
}
auto* ic = refiner_->GetContext(node);
DCHECK(ic != nullptr)
<< "ShapeRefiner::AddNode() should have created the InferenceContext";
if (shape_attrs.size() < node->num_outputs()) {
return errors::InvalidArgument(
"Node '", node->name(), "' has ", node->num_outputs(),
" outputs but the ", kAttrName, " attribute specifies shapes for ",
shape_attrs.size(), " outputs");
}
if (shape_attrs.size() > node->num_outputs()) {
LOG(WARNING) << "Node '" << node->name() << "' has " << node->num_outputs()
<< " outputs but the " << kAttrName
<< " attribute specifies shapes for " << shape_attrs.size()
<< " outputs. Output shapes may be inaccurate.";
}
for (int i = 0; i < node->num_outputs(); ++i) {
const TensorShapeProto& p = *shape_attrs[i];
shape_inference::ShapeHandle h;
Status s = ic->MakeShapeFromShapeProto(p, &h);
if (!s.ok()) {
return errors::InvalidArgument("Node '", node->name(), " has an invalid ",
kAttrName, " attribute (shape #", i,
" error:'", s.message(), "'");
}
s = refiner_->SetShape(node, i, h);
if (!s.ok()) {
return errors::InvalidArgument(
"Node '", node->name(), "' has an ", kAttrName,
" attribute inconsistent with the GraphDef for output #", i, ": ",
s.message());
}
}
node->ClearAttr(kAttrName);
return absl::OkStatus();
}
Status GraphConstructor::ModifyNodeDefForImport(NodeDef* node_def) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def->op(), &op_def));
AddDefaultsToNodeDef(*op_def, node_def);
TF_RETURN_IF_ERROR(ValidateNodeDef(*node_def, *op_def));
if (versions()) {
TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, versions()->producer()));
}
return absl::OkStatus();
}
void RemoveInputs(const std::vector<int>& inputs_to_remove, NodeDef* node_def,
std::vector<bool>* input_already_exists) {
NodeDef copy;
copy.mutable_input()->Reserve(node_def->input_size() -
inputs_to_remove.size());
for (int i = 0, j = 0; i < node_def->input_size(); ++i) {
if (j < inputs_to_remove.size() && i == inputs_to_remove[j]) {
++j;
} else {
copy.add_input()->swap(*node_def->mutable_input(i));
}
}
node_def->mutable_input()->Swap(copy.mutable_input());
for (int idx : inputs_to_remove) {
input_already_exists->erase(input_already_exists->begin() + idx);
}
DCHECK_EQ(input_already_exists->size(), node_def->input_size());
}
void GraphConstructor::RemapNodeDefInputs(
NodeDef* node_def, std::vector<bool>* input_already_exists) {
DCHECK_EQ(input_already_exists->size(), node_def->input_size());
std::set<TensorId> control_inputs;
std::vector<int> inputs_to_remove;
for (int i = 0; i < node_def->input_size(); ++i) {
auto iter = opts_.input_map.find(ParseTensorName(node_def->input(i)));
if (iter == opts_.input_map.end()) continue;
used_input_map_keys_.insert(iter->first);
TensorId new_input = iter->second;
if (new_input.second == Graph::kControlSlot) {
if (control_inputs.count(new_input) > 0) {
inputs_to_remove.push_back(i);
continue;
}
control_inputs.insert(new_input);
}
node_def->set_input(i, new_input.ToString());
(*input_already_exists)[i] = true;
}
if (!inputs_to_remove.empty()) {
RemoveInputs(inputs_to_remove, node_def, input_already_exists);
}
}
void GraphConstructor::AddControlDependencies(
NodeDef* node_def, std::vector<bool>* input_already_exists) {
bool inherits_deps = false;
for (int i = 0; i < node_def->input_size(); ++i) {
if ((*input_already_exists)[i]) continue;
TensorId id(ParseTensorName(node_def->input(i)));
auto iter = gdef_nodes_.find(id.first);
DCHECK(iter != gdef_nodes_.end()) << id.first;
if (iter->second.node == nullptr) {
continue;
}
inherits_deps = true;
}
if (inherits_deps) return;
for (const string& control_dep : opts_.control_dependencies) {
string input = TensorId(control_dep, Graph::kControlSlot).ToString();
bool found = false;
for (int i = node_def->input_size() - 1; i >= 0; --i) {
const string& node_input = node_def->input(i);
if (node_input[0] != '^') {
break;
}
if (node_input == input) {
found = true;
break;
}
}
if (found) {
continue;
}
node_def->add_input(input);
input_already_exists->push_back(true);
}
}
void GraphConstructor::AddPrefixToNodeDef(
const std::vector<bool>& input_already_exists, NodeDef* node_def) {
if (prefix_.empty()) return;
node_def->set_name(strings::StrCat(prefix_, node_def->name()));
for (int i = 0; i < node_def->input_size(); ++i) {
if (input_already_exists[i]) continue;
StringPiece input(node_def->input(i));
if (absl::ConsumePrefix(&input, "^")) {
node_def->set_input(i, strings::StrCat("^", prefix_, input));
} else {
node_def->set_input(i, strings::StrCat(prefix_, input));
}
}
if (node_def->attr().find(kColocationAttrName) != node_def->attr().end()) {
auto* list =
node_def->mutable_attr()->at(kColocationAttrName).mutable_list();
for (int i = 0; i < list->s_size(); ++i) {
StringPiece v(list->s(i));
if (absl::ConsumePrefix(&v, kColocationGroupPrefix)) {
list->set_s(i, strings::StrCat(kColocationGroupPrefix, prefix_, v));
}
}
}
}
void GraphConstructor::UniquifyNames(
const std::vector<bool>& input_already_exists, NodeDef* node_def) {
if (NameExistsInGraph(node_def->name())) {
string old_name = node_def->name();
node_def->set_name(FindUniqueName(node_def->name()));
uniquified_names_[old_name] = node_def->name();
}
for (int i = 0; i < node_def->input_size(); ++i) {
if (input_already_exists[i]) continue;
TensorId id = ParseTensorName(node_def->input(i));
auto iter = uniquified_names_.find(string(id.first));
if (iter == uniquified_names_.end()) continue;
id.first = iter->second;
node_def->set_input(i, id.ToString());
}
}
void GraphConstructor::UpdateUniquifiedColocationNames() {
for (const auto& pair : gdef_nodes_) {
Node* node = pair.second.node;
if (node == nullptr) continue;
std::vector<string> coloc_values;
if (!TryGetNodeAttr(node->attrs(), kColocationAttrName, &coloc_values))
continue;
bool updated = false;
for (size_t i = 0; i < coloc_values.size(); ++i) {
StringPiece val(coloc_values[i]);
if (absl::ConsumePrefix(&val, kColocationGroupPrefix)) {
auto name_pair = uniquified_names_.find(string(val));
if (name_pair == uniquified_names_.end()) continue;
updated = true;
coloc_values[i] =
strings::StrCat(kColocationGroupPrefix, name_pair->second);
}
}
if (updated) {
node->AddAttr(kColocationAttrName, std::move(coloc_values));
}
}
}
bool GraphConstructor::NameExistsInGraph(StringPiece name) {
if (existing_nodes_.find(name) != existing_nodes_.end()) return true;
if (existing_prefixes_.find(name) != existing_prefixes_.end()) return true;
return false;
}
bool GraphConstructor::NameExistsInGraphDef(StringPiece name) {
if (gdef_nodes_.find(name) != gdef_nodes_.end()) return true;
if (gdef_prefixes_.find(name) != gdef_prefixes_.end()) return true;
return false;
}
string GraphConstructor::FindUniqueName(StringPiece original_name) {
string name(original_name);
int count = 0;
while (NameExistsInGraph(name) || (count > 0 && NameExistsInGraphDef(name))) {
name = strings::StrCat(original_name, "_", ++count);
}
return name;
}
Status GraphConstructor::IsNodeFullyMapped(const NodeDef& node_def,
bool* is_node_mapped) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
for (int i = 0; i < op_def->output_arg_size(); ++i) {
if (opts_.input_map.find({node_def.name(), i}) == opts_.input_map.end()) {
*is_node_mapped = false;
return absl::OkStatus();
}
}
*is_node_mapped = true;
return absl::OkStatus();
}
void GraphConstructor::DFS(int cur_node, std::vector<int>* cur_branch,
std::vector<bool>* is_on_cur_branch,
absl::flat_hash_set<int>* unvisited,
const std::vector<absl::string_view>& node_names) {
cur_branch->push_back(cur_node);
is_on_cur_branch->at(cur_node) = true;
for (auto next_node : outputs_[cur_node]) {
if (unvisited->find(next_node) != unvisited->end()) {
if (is_on_cur_branch->at(next_node)) {
auto iter =
std::find(cur_branch->begin(), cur_branch->end(), next_node);
LOG(WARNING) << "Cycle detected:";
while (iter != cur_branch->end()) {
const absl::string_view name = node_names[*iter];
DCHECK(!name.empty());
LOG(WARNING) << "node id=" << *iter << ", name=" << name;
++iter;
}
LOG(WARNING) << "End of cycle";
} else {
DFS(next_node, cur_branch, is_on_cur_branch, unvisited, node_names);
}
}
}
cur_branch->pop_back();
is_on_cur_branch->at(cur_node) = false;
unvisited->erase(cur_node);
}
void GraphConstructor::PrintCycles() {
int num_nodes = outputs_.size();
std::vector<absl::string_view> node_names;
node_names.resize(num_nodes);
for (const auto& named_node : gdef_nodes_) {
DCHECK_GE(named_node.second.gdef_index, 0);
DCHECK_LT(named_node.second.gdef_index, num_nodes);
node_names[named_node.second.gdef_index] = named_node.first;
}
absl::flat_hash_set<int> unvisited;
for (int i = 0; i < num_nodes; i++) {
unvisited.insert(i);
}
while (!unvisited.empty()) {
int cur_node = *unvisited.begin();
std::vector<int> cur_branch;
std::vector<bool> is_on_cur_branch(num_nodes, false);
DFS(cur_node, &cur_branch, &is_on_cur_branch, &unvisited, node_names);
}
}
FunctionDefLibraryStackTraces
GraphConstructor::CreateStackTracesForFunctionDefLibrary(
const FunctionDefLibrary& library) const {
if (debug_info() == nullptr) {
FunctionDefLibraryStackTraces library_traces;
return library_traces;
} else {
return FunctionLibraryDefinition::CreateStackTracesForFunctionDefLibrary(
library, *debug_info());
}
}
Status GraphConstructor::Convert() {
if (debug_info() != nullptr) {
traces_ = LoadTracesFromDebugInfo(*debug_info());
}
if (auto library = consume_library(); library.has_value()) {
FunctionDefLibraryStackTraces library_traces;
for (const FunctionDef& fdef : library->function()) {
const std::string& function_name = fdef.signature().name();
StackTracesMap& function_traces = library_traces[function_name];
std::string key_suffix = absl::StrCat("@", function_name);
for (const auto& [traces_key, stack_trace] : traces_) {
if (!absl::EndsWith(traces_key, key_suffix)) continue;
std::string node_key =
std::string(absl::StripSuffix(traces_key, key_suffix));
function_traces[node_key] = stack_trace;
}
}
TF_RETURN_IF_ERROR(
g_->AddFunctionLibrary(*std::move(library), library_traces));
}
std::vector<InputInfo> inputs;
int processed = 0;
std::vector<bool> input_already_exists;
while (!ready_.empty()) {
int o = *ready_.begin();
ready_.erase(ready_.begin());
++processed;
inputs.clear();
bool has_data_back_edge = false;
NodeDef node_def = consume_node_def(o);
input_already_exists.clear();
input_already_exists.resize(node_def.input_size(), false);
std::string node_name = node_def.name();
if (opts_.importing) {
if (opts_.skip_mapped_nodes) {
bool is_node_mapped = false;
TF_RETURN_IF_ERROR(IsNodeFullyMapped(node_def, &is_node_mapped));
if (is_node_mapped) {
UpdatePendingCountAndReady(o, IsNextIteration(node_def));
continue;
}
}
if (!opts_.input_map.empty()) {
RemapNodeDefInputs(&node_def, &input_already_exists);
}
if (!opts_.control_dependencies.empty()) {
AddControlDependencies(&node_def, &input_already_exists);
}
if (!opts_.default_device.empty() && node_def.device().empty()) {
node_def.set_device(opts_.default_device);
}
}
DCHECK_EQ(node_def.input_size(), input_already_exists.size());
TF_RETURN_IF_ERROR(ValidateColocationConstraints(node_def));
for (int i = 0; i < node_def.input_size(); ++i) {
TensorId tensor_id = ParseTensorName(node_def.input(i));
Node* src_node;
int src_index;
if (!input_already_exists[i]) {
auto iter = gdef_nodes_.find(tensor_id.node());
DCHECK(iter != gdef_nodes_.end()) << tensor_id.node();
src_node = iter->second.node;
src_index = tensor_id.index();
if (src_node == nullptr) has_data_back_edge = true;
} else {
auto iter = existing_nodes_.find(tensor_id.node());
DCHECK(iter != existing_nodes_.end()) << tensor_id.node();
src_node = iter->second;
src_index = tensor_id.index();
}
if (src_node != nullptr && src_index >= src_node->num_outputs()) {
std::ostringstream out;
out << "Node '" << node_def.name() << "': Connecting to invalid output "
<< tensor_id.index() << " of source node " << tensor_id.node()
<< " which has " << src_node->num_outputs() << " outputs.";
if (src_node->type_string() == "If" ||
src_node->type_string() == "StatelessIf" ||
src_node->type_string() == "While" ||
src_node->type_string() == "StatelessWhile") {
out << " Try using "
<< "tf.compat.v1.experimental.output_all_intermediates(True).";
}
return errors::InvalidArgument(out.str());
}
inputs.emplace_back(string(tensor_id.node()), src_node, src_index);
}
if (has_data_back_edge && !IsMerge(node_def)) {
return errors::InvalidArgument(
"Node '", node_def.name(),
"' had a back edge, but only Merge nodes can have back edges.");
}
Node* node;
if (opts_.importing) {
if (!prefix_.empty()) {
AddPrefixToNodeDef(input_already_exists, &node_def);
}
if (opts_.uniquify_names && (prefix_.empty() || !opts_.uniquify_prefix)) {
UniquifyNames(input_already_exists, &node_def);
}
}
if (opts_.importing) {
TF_RETURN_IF_ERROR(ModifyNodeDefForImport(&node_def));
} else {
const OpDef* op_def;
TF_RETURN_IF_ERROR(
g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
if (opts_.add_default_attributes) {
AddDefaultsToNodeDef(*op_def, &node_def);
}
if (opts_.validate_nodes) {
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));
}
}
TF_RETURN_IF_ERROR(MakeNode(std::move(node_def), &node));
if (node != nullptr) {
if (traces_.contains(node_name)) {
node->SetStackTrace(traces_[node_name]);
}
}
gdef_nodes_[node_name].node = node;
auto first_control = absl::c_find_if(inputs, &InputInfo::IsControlInput);
auto first_control_copy = first_control;
std::sort(first_control, inputs.end(), &InputInfo::CompareName);
inputs.erase(
std::unique(first_control_copy, inputs.end(), &InputInfo::IsSameName),
inputs.end());
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i].node == nullptr) {
back_edges_.emplace_back(inputs[i].name, inputs[i].index, node, i);
} else if (inputs[i].index == Graph::kControlSlot) {
g_->AddControlEdge(inputs[i].node, node, kDoNotCheckDuplicates);
} else {
TF_RETURN_IF_ERROR(MakeEdge(inputs[i].node, inputs[i].index, node, i));
}
}
TF_RETURN_IF_ERROR(ValidateShape(node));
UpdatePendingCountAndReady(o, node->IsNextIteration());
}
if (processed < node_def_count()) {
LOG(WARNING) << "IN " << __func__ << " " << (node_def_count() - processed)
<< " NODES IN A CYCLE";
for (int64_t i = 0; i < node_def_count(); i++) {
if (pending_count_[i] != 0) {
LOG(WARNING) << "PENDING: " << SummarizeNodeDef(get_node_def(i))
<< " WITH PENDING COUNT = " << pending_count_[i];
}
}
PrintCycles();
return errors::InvalidArgument(node_def_count() - processed,
" nodes in a cycle");
}
return absl::OkStatus();
}
Status GraphConstructor::AddBackEdges() {
for (const auto& e : back_edges_) {
Node* src_node = gdef_nodes_[e.src_name].node;
if (e.src_index == Graph::kControlSlot) {
g_->AddControlEdge(src_node, e.dst_node, kDoNotCheckDuplicates);
} else {
TF_RETURN_IF_ERROR(
MakeEdge(src_node, e.src_index, e.dst_node, e.dst_index));
}
VLOG(2) << "Add back edge: " << src_node->name() << " -> "
<< e.dst_node->name();
}
return absl::OkStatus();
}
Status GraphConstructor::UpdateVersionDef() {
if (versions() == nullptr) return absl::OkStatus();
if (!opts_.importing) {
g_->set_versions(*versions());
return absl::OkStatus();
}
VersionDef g_versions = g_->versions();
g_versions.set_producer(
std::min(g_versions.producer(), versions()->producer()));
g_versions.set_min_consumer(
std::max(g_versions.min_consumer(), versions()->min_consumer()));
if (versions()->bad_consumers_size() > 0) {
std::set<int> bad(g_versions.bad_consumers().begin(),
g_versions.bad_consumers().end());
bad.insert(versions()->bad_consumers().begin(),
versions()->bad_consumers().end());
g_versions.clear_bad_consumers();
for (int v : bad) {
g_versions.add_bad_consumers(v);
}
}
g_->set_versions(g_versions);
return absl::OkStatus();
}
Status GraphConstructor::PopulateReturnTensors() {
if (opts_.return_tensors.empty()) return absl::OkStatus();
for (const TensorId& id : opts_.return_tensors) {
auto iter = opts_.input_map.find(id);
if (iter == opts_.input_map.end()) {
auto iter = gdef_nodes_.find(id.first);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Requested return tensor '",
id.ToString(),
"' not found in graph def");
}
int num_outputs = iter->second.node->num_outputs();
if ((id.second < 0 || id.second >= num_outputs) &&
id.second != Graph::kControlSlot) {
return errors::InvalidArgument("Invalid return output ", id.second,
" of node '", id.first, "', which has ",
num_outputs, " output(s)");
}
return_tensors_->push_back({iter->second.node, id.second});
} else {
TensorId remapped_id = iter->second;
DCHECK_GT(existing_nodes_.count(remapped_id.first), 0);
Node* node = existing_nodes_[remapped_id.first];
return_tensors_->push_back({node, remapped_id.second});
}
}
return absl::OkStatus();
}
Status GraphConstructor::PopulateReturnNodes() {
if (opts_.return_nodes.empty()) return absl::OkStatus();
for (StringPiece name : opts_.return_nodes) {
auto iter = gdef_nodes_.find(name);
if (iter == gdef_nodes_.end()) {
return errors::InvalidArgument("Requested return node '", name,
"' not found in graph def");
}
return_nodes_->push_back(iter->second.node);
}
return absl::OkStatus();
}
Status GraphConstructor::PopulateMissingUnusedInputMapKeys() {
if (missing_unused_input_map_keys_ == nullptr) return absl::OkStatus();
for (const auto& input_map_pair : opts_.input_map) {
TensorId key = input_map_pair.first;
if (used_input_map_keys_.count(key) > 0) continue;
auto pair = gdef_nodes_.find(key.first);
if (pair == gdef_nodes_.end()) {
missing_unused_input_map_keys_->push_back(key);
continue;
}
const NodeDef& node_def = get_node_def(pair->second.gdef_index);
const OpDef* op_def;
TF_RETURN_IF_ERROR(g_->op_registry()->LookUpOpDef(node_def.op(), &op_def));
int num_outputs;
TF_RETURN_IF_ERROR(NumOutputsForNode(node_def, *op_def, &num_outputs));
if (key.second >= num_outputs) {
missing_unused_input_map_keys_->push_back(key);
}
}
return absl::OkStatus();
}
void GraphConstructor::Undo() {
for (const auto& iter : gdef_nodes_) {
if (iter.second.node != nullptr) {
g_->RemoveNode(iter.second.node);
}
}
g_->set_versions(original_versions_);
}
Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst,
int input_index) {
if (output_index >= src->num_outputs()) {
return errors::InvalidArgument(
"Output ", output_index, " of node ", src->name(),
" does not exist. Node only has ", src->num_outputs(), " outputs.");
}
if (input_index >= dst->num_inputs()) {
return errors::InvalidArgument(
"Input ", input_index, " of node ", dst->name(),
" does not exist. Node only has ", dst->num_inputs(), " inputs.");
}
DataType src_out = src->output_type(output_index);
DataType dst_in = dst->input_type(input_index);
if (!TypesCompatible(dst_in, src_out)) {
return errors::InvalidArgument(
"Input ", input_index, " of node ", dst->name(), " was passed ",
DataTypeString(src_out), " from ", src->name(), ":", output_index,
" incompatible with expected ", DataTypeString(dst_in), ".");
}
g_->AddEdge(src, output_index, dst, input_index);
return absl::OkStatus();
}
}
Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
const GraphDef& gdef, Graph* g) {
ShapeRefiner refiner(gdef.versions().producer(), g->op_registry());
return GraphConstructor::Construct(
opts, gdef.node(), &gdef.versions(), &gdef.library(), &gdef.debug_info(),
g, &refiner, nullptr, nullptr,
nullptr);
}
Status ConvertGraphDefToGraph(const GraphConstructorOptions& opts,
GraphDef&& gdef, Graph* g) {
ShapeRefiner refiner(gdef.versions().producer(), g->op_registry());
return GraphConstructor::Construct(opts, std::move(gdef), g, &refiner,
nullptr,
nullptr,
nullptr);
}
Status ConvertNodeDefsToGraph(const GraphConstructorOptions& opts,
absl::Span<const NodeDef> nodes, Graph* g,
const GraphDebugInfo* debug_info) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, g->op_registry());
std::vector<const NodeDef*> node_defs;
node_defs.reserve(nodes.size());
for (const auto& n : nodes) {
node_defs.push_back(&n);
}
return GraphConstructor::Construct(opts, node_defs, nullptr, nullptr,
debug_info, g, &refiner,
nullptr,
nullptr,
nullptr);
}
Status ImportGraphDef(const ImportGraphDefOptions& opts, const GraphDef& gdef,
Graph* g, ShapeRefiner* refiner,
ImportGraphDefResults* results) {
if (!opts.return_tensors.empty()) {
if (results == nullptr) {
return errors::InvalidArgument(
"results argument to ImportGraphDef() must be non-null if "
"opts.return_tensors is non-empty");
}
}
if (!opts.return_nodes.empty()) {
if (opts.skip_mapped_nodes) {
return errors::InvalidArgument(
"Requesting return_nodes with skip_mapped_nodes set is not currently "
"supported");
}
if (results == nullptr) {
return errors::InvalidArgument(
"results argument to ImportGraphDef() must be non-null if "
"opts.return_nodes is non-empty");
}
}
if (results != nullptr) {
if (!results->return_tensors.empty() || !results->return_nodes.empty() ||
!results->missing_unused_input_map_keys.empty()) {
return errors::InvalidArgument(
"All fields in results argument to ImportGraphDef() must be empty.");
}
}
ShapeRefiner default_refiner(gdef.versions().producer(), g->op_registry());
if (refiner == nullptr) {
refiner = &default_refiner;
} else {
if (gdef.versions().producer() > 0 &&
gdef.versions().producer() < refiner->graph_def_version() &&
g->num_nodes() > 2) {
LOG(WARNING) << "Importing a graph with a lower producer version "
<< gdef.versions().producer()
<< " into an existing graph with producer version "
<< refiner->graph_def_version() << ". Shape inference will "
<< "have run different parts of the graph with different "
<< "producer versions.";
}
}
refiner->set_graph_def_version(
std::min(refiner->graph_def_version(), gdef.versions().producer()));
if (results == nullptr) {
return GraphConstructor::Construct(opts, gdef.node(), &gdef.versions(),
&gdef.library(), &gdef.debug_info(), g,
refiner, nullptr, nullptr, nullptr);
} else {
return GraphConstructor::Construct(
opts, gdef.node(), &gdef.versions(), &gdef.library(),
&gdef.debug_info(), g, refiner, &results->return_tensors,
&results->return_nodes, &results->missing_unused_input_map_keys);
}
}
void CopyGraph(const Graph& src, Graph* dest) { dest->Copy(src); }
} | #include "tensorflow/core/common_runtime/graph_constructor.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class GraphConstructorTest : public ::testing::Test {
protected:
GraphConstructorTest() : graph_(OpRegistry::Global()) {}
void Convert(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef_));
}
void ExpectError(const string& gdef_ascii,
const std::vector<string>& expected_error_strs,
string not_expected_error_str = "") {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
GraphConstructorOptions opts;
Status status = ConvertGraphDefToGraph(opts, gdef_, &graph_);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
if (!not_expected_error_str.empty()) {
EXPECT_TRUE(!absl::StrContains(status.message(), not_expected_error_str))
<< "Expected not to find '" << not_expected_error_str << "' in "
<< status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectError(const string& gdef_ascii, const ImportGraphDefOptions& opts,
const std::vector<string>& expected_error_strs,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
const string original_graph_description = GraphDebugString();
Convert(gdef_ascii);
Status status = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_FALSE(status.ok());
for (const string& error : expected_error_strs) {
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Expected to find '" << error << "' in " << status;
}
EXPECT_EQ(original_graph_description, GraphDebugString());
}
void ExpectOK(const string& gdef_ascii) {
Convert(gdef_ascii);
GraphConstructorOptions opts;
TF_CHECK_OK(ConvertGraphDefToGraph(opts, gdef_, &graph_));
}
void ExpectOK(const string& gdef_ascii, const ImportGraphDefOptions& opts,
ShapeRefiner* refiner = nullptr,
ImportGraphDefResults* results = nullptr) {
Convert(gdef_ascii);
Status s = ImportGraphDef(opts, gdef_, &graph_, refiner, results);
EXPECT_EQ(absl::OkStatus(), s) << s;
}
void ExpectVersions(int min_consumer, int producer) {
EXPECT_EQ(min_consumer, graph_.versions().min_consumer())
<< "Expected min consumer " << min_consumer << ", got "
<< graph_.versions().min_consumer();
EXPECT_EQ(producer, graph_.versions().producer())
<< "Expected producer " << producer << ", got "
<< graph_.versions().producer();
}
Node* FindNode(const string& name) {
for (Node* n : graph_.nodes()) {
if (n->name() == name) return n;
}
return nullptr;
}
bool HasNode(const string& name) { return FindNode(name) != nullptr; }
bool HasEdge(const string& src, int src_out, const string& dst, int dst_in) {
for (const Edge* e : graph_.edges()) {
if (e->src()->name() == src && e->src_output() == src_out &&
e->dst()->name() == dst && e->dst_input() == dst_in) {
return true;
}
}
return false;
}
bool HasControlEdge(const string& src, const string& dst) {
return HasEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
string ColocationGroup(const string& node) {
Node* n = nullptr;
for (Node* ni : graph_.nodes()) {
if (ni->name() == node) {
n = ni;
break;
}
}
if (n == nullptr) {
return "";
}
std::vector<string> value;
Status s = GetNodeAttr(n->attrs(), kColocationAttrName, &value);
if (!s.ok()) {
return "";
}
if (value.size() != 1) {
ADD_FAILURE()
<< "ColocationGroup was written with the assumption of at most 1 "
"value for the _class attribute. Update it and its callers";
return "";
}
StringPiece loc(value[0]);
return absl::ConsumePrefix(&loc, kColocationGroupPrefix) ? string(loc) : "";
}
string GraphDebugString() const {
return graph_.ToGraphDefDebug().DebugString();
}
Graph graph_;
private:
GraphDef gdef_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("ABC");
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestInt").Input("a: int32");
REGISTER_OP("TestOneInputTwoOutputs")
.Input("x: float")
.Output("y: float")
.Output("z: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestOneInputOneOutput")
.Input("x: T")
.Output("y: T")
.Attr("T: {float, int64}")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("TestVariadicOutput")
.Output("outputs: N * int32")
.Attr("N: int >= 0")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("TestDefaultAttr")
.Attr("default_int: int=31415")
.SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("RequiresCurrentGraphVersion")
.Output("version: int32")
.SetIsStateful()
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->graph_def_version() != TF_GRAPH_DEF_VERSION) {
return errors::InvalidArgument("Wrong graph version for shape");
}
return shape_inference::ScalarShape(c);
});
TEST_F(GraphConstructorTest, InvalidNodeName) {
auto expect_invalid_name = [this](const char* name) {
ExpectError(strings::StrCat("node { name: '", name, "' op: 'ABC' }"),
{"Node name contains invalid characters"});
};
expect_invalid_name("a:b");
expect_invalid_name("_abc");
expect_invalid_name(R"(a\\b)");
expect_invalid_name("/a");
expect_invalid_name("-a");
ExpectOK("node { name: 'a-bc_' op: 'ABC' }");
ExpectOK("node { name: 'a-B.0/.c_' op: 'ABC' }");
ExpectOK("node { name: '0123' op: 'ABC' }");
ExpectOK("node { name: '.0123' op: 'ABC' }");
}
TEST_F(GraphConstructorTest, InvalidSourceNodeName) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: 'W999' input: 'input' }",
{"Unknown input node", "W999"});
}
TEST_F(GraphConstructorTest, InvalidSourceNodeIndex) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1:1', 'input:1' ] }",
{"Connecting to invalid output 1 of source node W1"});
}
TEST_F(GraphConstructorTest, GraphWithCycle) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }",
{"cycle"});
}
TEST_F(GraphConstructorTest, GraphWithOKCycle) {
ExpectOK(R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF");
}
TEST_F(GraphConstructorTest, ImportGraphThatUsesConstantValueFromInsideLoop) {
const string pb_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "Const_1"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 1
}
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "Const_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/transpose"
op: "Transpose"
input: "while/Identity_1"
input: "while/Identity_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tperm"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Identity"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/transpose"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 21
}
)EOF";
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(pb_ascii, &def));
ImportGraphDefOptions opts;
auto s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
}
TEST_F(GraphConstructorTest, TypeMismatch) {
ExpectError(
"node { name: 'input' op: 'TestInput' }"
"node { name: 'int' op: 'TestInt' input: [ 'input' ] }",
{"Input 0 of node int was passed float from input:0 incompatible with "
"expected int32."});
}
TEST_F(GraphConstructorTest, EmptyGraph) {
ExpectOK("");
ExpectVersions(0, 0);
}
TEST_F(GraphConstructorTest, VersionGraph) {
ExpectOK(strings::StrCat("versions { producer: ", TF_GRAPH_DEF_VERSION,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER,
"}"));
ExpectVersions(TF_GRAPH_DEF_VERSION_MIN_CONSUMER, TF_GRAPH_DEF_VERSION);
}
TEST_F(GraphConstructorTest, ForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 22,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"forward compatibility guarantee"});
}
TEST_F(GraphConstructorTest, NoForwardCompatError) {
ExpectError(
strings::StrCat(
"node { name: 'a:b' op: 'ABC' }\n"
"versions { producer: ",
TF_GRAPH_DEF_VERSION + 21,
" min_consumer: ", TF_GRAPH_DEF_VERSION_MIN_CONSUMER, "}"),
{"Node name contains invalid characters"},
"forward compatibility guarantee");
}
TEST_F(GraphConstructorTest, LowVersion) {
ExpectError(strings::StrCat("versions { producer: ", -1, " }"),
{strings::StrCat("GraphDef producer version -1 below min "
"producer ",
TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
" supported by TensorFlow ", TF_VERSION_STRING,
". Please regenerate your graph.")});
}
TEST_F(GraphConstructorTest, HighVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
ExpectError(strings::StrCat("versions { min_consumer: ", version, " }"),
{strings::StrCat("GraphDef min consumer version ", version,
" above current version ", TF_GRAPH_DEF_VERSION,
" for TensorFlow ", TF_VERSION_STRING,
". Please upgrade TensorFlow.")});
}
TEST_F(GraphConstructorTest, BadVersion) {
const int version = TF_GRAPH_DEF_VERSION + 1;
const int bad = TF_GRAPH_DEF_VERSION;
ExpectError(
strings::StrCat("versions { producer: ", version, " bad_consumers: ", bad,
" }"),
{strings::StrCat(
"GraphDef disallows consumer version ", bad,
". Please upgrade TensorFlow: this version is likely buggy.")});
}
TEST_F(GraphConstructorTest, SimpleModel) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
}
TEST_F(GraphConstructorTest, SimpleModelWithControlEdges) {
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', 'input:1', '^t1' ] }");
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasEdge("W1", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
EXPECT_TRUE(HasEdge("W1", 0, "t2", 0));
EXPECT_TRUE(HasEdge("input", 1, "t2", 1));
EXPECT_TRUE(HasControlEdge("W1", "input"));
EXPECT_TRUE(HasControlEdge("t1", "t2"));
}
TEST_F(GraphConstructorTest, Error_ControlEdgeBeforeRealInput) {
ExpectError(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' input: [ '^W1' ] }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'W1', '^t1', 'input:1' ] }",
{"Node 't2': Control dependencies must come after regular dependencies"});
}
TEST_F(GraphConstructorTest, ImportGraphDef) {
GraphDef def;
ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(2, graph_.num_nodes());
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_EQ(1, graph_.num_edges());
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node { name: "A" op: "TestParams" }
node { name: "X" op: "TestParams" }
node {
name: "B"
op: "TestOneInputTwoOutputs"
input: "A"
attr {
key: "_class"
value { list { s: "loc:@A" } }
}
}
node {
name: "C"
op: "TestOneInputTwoOutputs"
input: "B:1"
input: "^X"
}
node {
name: "D"
op: "TestMul"
input: "B:0"
input: "C:0"
})EOF",
&def);
ASSERT_TRUE(parsed);
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(5 + 2, graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_EQ(9, graph_.num_edges());
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
opts.prefix = "import";
s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(
10 + 2,
graph_.num_nodes());
EXPECT_EQ("A", ColocationGroup("B"));
EXPECT_EQ("import/A", ColocationGroup("import/B"));
EXPECT_TRUE(HasEdge("A", 0, "B", 0));
EXPECT_TRUE(HasEdge("B", 1, "C", 0));
EXPECT_TRUE(HasEdge("B", 0, "D", 0));
EXPECT_TRUE(HasEdge("C", 0, "D", 1));
EXPECT_TRUE(HasControlEdge("X", "C"));
EXPECT_TRUE(HasEdge("import/A", 0, "import/B", 0));
EXPECT_TRUE(HasEdge("import/B", 1, "import/C", 0));
EXPECT_TRUE(HasEdge("import/B", 0, "import/D", 0));
EXPECT_TRUE(HasEdge("import/C", 0, "import/D", 1));
EXPECT_TRUE(HasControlEdge("import/X", "import/C"));
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "A"));
EXPECT_TRUE(HasControlEdge(source, "X"));
EXPECT_TRUE(HasControlEdge("D", sink));
EXPECT_TRUE(HasControlEdge(source, "import/A"));
EXPECT_TRUE(HasControlEdge(source, "import/X"));
EXPECT_TRUE(HasControlEdge("import/D", sink));
EXPECT_EQ(17, graph_.num_edges());
}
TEST_F(GraphConstructorTest, ImportGraphDef_DefaultAttrs) {
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{ name:'A' op:'TestDefaultAttr'}", &def));
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
Node* a = nullptr;
for (Node* n : graph_.nodes()) {
if (n->name() == "A") {
a = n;
break;
}
}
ASSERT_TRUE(a != nullptr);
int value = 0;
s = GetNodeAttr(a->attrs(), "default_int", &value);
ASSERT_EQ(absl::OkStatus(), s) << s << " -- " << a->def().DebugString();
EXPECT_EQ(31415, value);
}
TEST_F(GraphConstructorTest, ImportGraphDef_Versioning) {
GraphDef def;
const ImportGraphDefOptions opts;
def.mutable_versions()->set_producer(TF_GRAPH_DEF_VERSION_MIN_PRODUCER - 1);
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->set_min_consumer(TF_GRAPH_DEF_VERSION + 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
def.mutable_versions()->Clear();
graph_.ToGraphDef(&def);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
def.Clear();
const int original_min_consumer = graph_.versions().min_consumer();
def.mutable_versions()->set_min_consumer(original_min_consumer + 2);
def.mutable_versions()->add_bad_consumers(TF_GRAPH_DEF_VERSION - 1);
s = ImportGraphDef(opts, def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(original_min_consumer + 2, graph_.versions().min_consumer());
ASSERT_EQ(1, graph_.versions().bad_consumers_size());
EXPECT_EQ(TF_GRAPH_DEF_VERSION - 1, graph_.versions().bad_consumers(0));
}
TEST_F(GraphConstructorTest, ImportGraphDef_DeprecatedOps) {
GraphDef def;
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node {
name: "zeros"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
dim {
size: 149
}
dim {
size: 149
}
dim {
size: 32
}
}
float_val: 0.0
}
}
}
}
node {
name: "m_v_beta_gamma"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 32
}
}
tensor_content: "\265\374\010=S\250\t\276\206\371>;Z\306y>\217]@\276\347\206\202\275\3747\241\275+1\227=J1\352\275\353?H;`\253\000>\023Y\014\276\341\310L;\301\030\314;\032Kw\275\273fQ;\036\252\200=\257o/\273\377\241\247\275\307,\332\274L\255\247\274\023\331R=r\271\225<\016/\204<\364\340\375\272t\030J=\220\306}\276\276x\003\275\231\013}\276\212\034\224\276\257\020\216>A\223\217\276"
}
}
}
}
node {
name: "batchnorm"
op: "BatchNormWithGlobalNormalization"
input: "zeros"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
input: "m_v_beta_gamma"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "scale_after_normalization"
value {
b: false
}
}
attr {
key: "variance_epsilon"
value {
f: 0.0010000000475
}
}
}
)EOF",
&def);
ASSERT_TRUE(parsed);
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
Graph g2(OpRegistry::Global());
def.mutable_versions()->set_producer(10);
s = ImportGraphDef(ImportGraphDefOptions(), def, &g2, nullptr);
EXPECT_EQ(error::UNIMPLEMENTED, s.code());
EXPECT_TRUE(absl::StrContains(s.message(),
"BatchNormWithGlobalNormalization is not "
"available in GraphDef version 10"))
<< s;
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMap) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 0));
EXPECT_FALSE(HasEdge("new_input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithPrefix) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'input' op: 'TestInput' } "
"node { name: 'unmapped_input' op: 'TestInput'}",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("input", 1)] = TensorId("input", 0);
opts.prefix = "import";
ExpectOK(
R"EOF(
node { name: 'input' op: 'TestInput' }
node { name: 'unmapped_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'input:0', 'input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
node { name: 't3' op: 'TestMul' input: [ 'unmapped_input:0',
'unmapped_input:1' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("unmapped_input"));
EXPECT_TRUE(HasNode("import/unmapped_input"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasNode("import/t2"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "import/t1", 1));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_FALSE(HasEdge("import/input", 0, "import/t1", 1));
EXPECT_TRUE(HasEdge("import/t1", 0, "import/t2", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 0, "import/t3", 0));
EXPECT_TRUE(HasEdge("import/unmapped_input", 1, "import/t3", 1));
Node* t1 = FindNode("import/t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "input:0");
EXPECT_EQ(t1->requested_inputs()[1], "input:0");
Node* t2 = FindNode("import/t2");
ASSERT_EQ(t2->requested_inputs().size(), 2);
EXPECT_EQ(t2->requested_inputs()[0], "import/t1:0");
EXPECT_EQ(t2->requested_inputs()[1], "import/t1:0");
Node* t3 = FindNode("import/t3");
ASSERT_EQ(t3->requested_inputs().size(), 2);
EXPECT_EQ(t3->requested_inputs()[0], "import/unmapped_input:0");
EXPECT_EQ(t3->requested_inputs()[1], "import/unmapped_input:1");
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithControlEdges) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
const int kControlSlot = Graph::kControlSlot;
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("W1", kControlSlot);
opts.input_map[TensorId("W3", kControlSlot)] = TensorId("W1", kControlSlot);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'W3' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestOneInputTwoOutputs' input: [ 'W2' ] }
node { name: 't2' op: 'TestOneInputTwoOutputs'
input: [ 'input', '^W2', '^W3' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("W1"));
EXPECT_TRUE(HasNode("W2"));
EXPECT_TRUE(HasNode("W3"));
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasControlEdge("W1", "input"));
EXPECT_FALSE(HasControlEdge("W2", "input"));
EXPECT_TRUE(HasEdge("W2", 0, "t1", 0));
EXPECT_TRUE(HasControlEdge("W1", "t2"));
EXPECT_FALSE(HasControlEdge("W2", "t2"));
EXPECT_TRUE(HasEdge("input", 0, "t2", 0));
Node* t2 = FindNode("t2");
EXPECT_EQ(t2->in_edges().size(), 2);
opts.prefix = "import";
opts.input_map.clear();
opts.input_map[TensorId("W1", kControlSlot)] = TensorId("W1", kControlSlot);
ExpectOK(
R"EOF(
node { name: 'W1' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W1' ] }
node { name: 't1' op: 'TestOneInputTwoOutputs' input: [ 'W1' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("import/W1"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasControlEdge("W1", "import/input"));
EXPECT_FALSE(HasControlEdge("import/W1", "import/input"));
EXPECT_TRUE(HasEdge("import/W1", 0, "import/t1", 0));
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithBadControlEdge) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("W2", Graph::kControlSlot)] = TensorId("W1", 0);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"input_map entry ^W2->W1:0 between control edge and non-control edge"},
&refiner);
opts.input_map.clear();
opts.input_map[TensorId("W2", 0)] = TensorId("W1", Graph::kControlSlot);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"input_map entry W2:0->^W1 between control edge and non-control edge"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithInvalidNodeIndex) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input1' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("input2", 0)] = TensorId("input1", 3);
ExpectError(
R"EOF(
node { name: 'input2' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'input2:0', 'input2:1' ] }
)EOF",
opts,
{"Node 't1': Connecting to invalid output 3 of source node input1 which "
"has 2 outputs"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithMissingEntries) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'W1' op: 'TestParams' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
const int kControlSlot = Graph::kControlSlot;
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("DNE", kControlSlot);
ExpectError(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'input' op: 'TestInput' input: [ '^W2' ] }
)EOF",
opts,
{"node 'DNE' in input_map does not exist in graph (input_map entry: "
"^W2->^DNE)"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapDuplicateNodeNames) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
Node* node;
TF_CHECK_OK(NodeBuilder("dup", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph_, &node));
TF_CHECK_OK(NodeBuilder("dup", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(&graph_, &node));
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("dup", 0);
ExpectError(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts,
{"cannot resolve input_map because multiple nodes exist with name 'dup'"},
&refiner);
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapMissingUnusedKeys) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
ImportGraphDefResults results;
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }",
opts, &refiner, &results);
EXPECT_TRUE(results.missing_unused_input_map_keys.empty());
results.missing_unused_input_map_keys.push_back(TensorId());
ExpectError(
"node { name: 'W2' op: 'TestParams' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
&refiner, &results);
const int kControlSlot = Graph::kControlSlot;
results.missing_unused_input_map_keys.clear();
opts.input_map[TensorId("W2", kControlSlot)] = TensorId("W1", kControlSlot);
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 3)] = TensorId("input", 0);
opts.input_map[TensorId("DNE", 0)] = TensorId("input", 0);
opts.input_map[TensorId("t1", 0)] = TensorId("W1", 0);
opts.input_map[TensorId("variadic", 4)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'new_input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 'variadic' op: 'TestVariadicOutput'
attr { key: "N" value { i: 5 } } }
)EOF",
opts, &refiner, &results);
std::set<TensorId> expected_unused_keys = {TensorId("new_input", 3),
TensorId("DNE", 0)};
ASSERT_EQ(results.missing_unused_input_map_keys.size(),
expected_unused_keys.size());
std::set<TensorId> actual_unused_keys(
results.missing_unused_input_map_keys.begin(),
results.missing_unused_input_map_keys.end());
EXPECT_EQ(actual_unused_keys, expected_unused_keys);
opts = ImportGraphDefOptions();
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 2)] = TensorId("input", 1);
opts.skip_mapped_nodes = true;
opts.prefix = "import";
results = ImportGraphDefResults();
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'new_input' op: 'TestInput' input: [ '^W2' ] }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts, &refiner, &results);
ASSERT_EQ(results.missing_unused_input_map_keys.size(), 1);
EXPECT_EQ(results.missing_unused_input_map_keys[0],
SafeTensorId("new_input", 2));
}
TEST_F(GraphConstructorTest, ImportGraphDef_InputMapWithUnboundInput) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_FALSE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_SkipMappedNodes_FullyMapped) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.skip_mapped_nodes = true;
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 1);
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_FALSE(HasNode("new_input"));
EXPECT_TRUE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "input:1");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_SkipMappedNodes_NotFullyMapped) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
ImportGraphDefOptions opts;
opts.skip_mapped_nodes = true;
opts.input_map[TensorId("new_input", 1)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
node { name: 't2' op: 'TestMul' input: [ 't1:0', 't1:0' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("t2"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_FALSE(HasEdge("input", 1, "t1", 0));
EXPECT_TRUE(HasEdge("input", 0, "t1", 1));
EXPECT_TRUE(HasEdge("new_input", 0, "t1", 0));
EXPECT_FALSE(HasEdge("new_input", 1, "t1", 1));
EXPECT_TRUE(HasEdge("t1", 0, "t2", 0));
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
ASSERT_EQ(t1->requested_inputs()[0], "new_input:0");
ASSERT_EQ(t1->requested_inputs()[1], "input:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnTensors) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
opts.return_tensors.push_back({"input", 1});
opts.return_tensors.push_back({"t1", 0});
opts.return_tensors.push_back({"input", 0});
ImportGraphDefResults results;
ExpectOK(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: ['input:0', 'input:1'] }",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input", 1, "t1", 1));
ASSERT_EQ(results.return_tensors.size(), 3);
EXPECT_EQ(results.return_tensors[0].first->name(), "input");
EXPECT_EQ(results.return_tensors[0].second, 1);
EXPECT_EQ(results.return_tensors[1].first->name(), "t1");
EXPECT_EQ(results.return_tensors[1].second, 0);
EXPECT_EQ(results.return_tensors[2].first->name(), "input");
EXPECT_EQ(results.return_tensors[2].second, 0);
opts.return_tensors.clear();
results = ImportGraphDefResults();
opts.prefix = "import";
opts.input_map[{"new_input", 1}] = {"input", 0};
opts.return_tensors.push_back({"new_input", 0});
opts.return_tensors.push_back({"new_input", 1});
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("import/new_input"));
ASSERT_EQ(results.return_tensors.size(), 2);
EXPECT_EQ(results.return_tensors[0].first->name(), "import/new_input");
EXPECT_EQ(results.return_tensors[0].second, 0);
EXPECT_EQ(results.return_tensors[1].first->name(), "input");
EXPECT_EQ(results.return_tensors[1].second, 0);
opts.prefix.clear();
opts.input_map.clear();
opts.return_tensors.clear();
results = ImportGraphDefResults();
opts.input_map[{"new_input", 0}] = {"_SOURCE", 0};
opts.return_tensors.push_back({"new_input", 0});
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("new_input"));
ASSERT_EQ(results.return_tensors.size(), 1);
EXPECT_EQ(results.return_tensors[0].first->name(), "_SOURCE");
EXPECT_EQ(results.return_tensors[0].second, 0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnTensorsErrors) {
ImportGraphDefOptions opts;
opts.return_tensors.push_back({"new_input", 0});
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"results argument to ImportGraphDef() must be non-null if "
"opts.return_tensors is non-empty"});
ImportGraphDefResults results;
results.return_tensors.push_back({nullptr, 0});
ExpectError(
"node { name: 'new_input' op: 'TestInput' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
nullptr, &results);
results.return_tensors.clear();
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"Requested return tensor 'new_input:0' not found in graph def"},
nullptr, &results);
opts.return_tensors.clear();
opts.return_tensors.push_back({"new_input", 2});
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"Invalid return output 2 of node 'new_input', which has 2 "
"output(s)"},
nullptr, &results);
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnNodes) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ImportGraphDefOptions opts;
opts.return_nodes.push_back("input");
opts.return_nodes.push_back("t1");
ImportGraphDefResults results;
ExpectOK(
"node { name: 'input' op: 'TestInput' }"
"node { name: 'input2' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: ['input:0', 'input2:1'] }",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("input2"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("input2", 1, "t1", 1));
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_tensors.size(), 0);
EXPECT_EQ(results.missing_unused_input_map_keys.size(), 0);
EXPECT_EQ(results.return_nodes[0]->name(), "input");
EXPECT_EQ(results.return_nodes[1]->name(), "t1");
opts = ImportGraphDefOptions();
results = ImportGraphDefResults();
opts.prefix = "import";
opts.return_nodes.push_back("input");
ExpectOK("node { name: 'input' op: 'TestInput' }", opts, &refiner, &results);
EXPECT_TRUE(HasNode("import/input"));
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "import/input");
opts = ImportGraphDefOptions();
results = ImportGraphDefResults();
opts.input_map[{"new_input", 0}] = {"input", 0};
opts.return_nodes.push_back("new_input");
ExpectOK("node { name: 'new_input' op: 'TestInput' }", opts, &refiner,
&results);
EXPECT_TRUE(HasNode("new_input"));
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "new_input");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ReturnNodesErrors) {
ImportGraphDefOptions opts;
opts.return_nodes.push_back("new_input");
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"results argument to ImportGraphDef() must be non-null if "
"opts.return_nodes is non-empty"});
ImportGraphDefResults results;
results.return_nodes.push_back(nullptr);
ExpectError(
"node { name: 'new_input' op: 'TestInput' }", opts,
{"All fields in results argument to ImportGraphDef() must be empty."},
nullptr, &results);
results.return_nodes.clear();
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"Requested return node 'new_input' not found in graph def"},
nullptr, &results);
opts.skip_mapped_nodes = true;
ExpectError("node { name: 'new_input' op: 'TestInput' }", opts,
{"Requesting return_nodes with skip_mapped_nodes set is not "
"currently supported"});
}
TEST_F(GraphConstructorTest, ImportGraphDef_UniquifyNames) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
const char* graph_def_str =
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }";
ImportGraphDefOptions opts;
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
ImportGraphDefResults results;
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A");
EXPECT_EQ(results.return_nodes[1]->name(), "B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1:0");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_2");
EXPECT_EQ(results.return_nodes[1]->name(), "B_2");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_2:0");
opts.prefix = "A";
opts.uniquify_prefix = true;
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_3/A");
EXPECT_EQ(results.return_nodes[1]->name(), "A_3/B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_3/A");
ExpectOK("node { name: 'B_3' op: 'TestInput' }");
opts.uniquify_prefix = false;
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A/A");
EXPECT_EQ(results.return_nodes[1]->name(), "A/B");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A/A");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A/A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "A/B_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A/A_1:0");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("A_1");
opts.return_nodes.push_back("B_1");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A_1' op: 'TestInput' }"
"node { name: 'B_1' op: 'TestOneInputTwoOutputs' input: ['A_1:0'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1_1");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A_1_1:0");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("A_4");
opts.return_nodes.push_back("B");
opts.return_nodes.push_back("B_4/B");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'A_4' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }"
"node { name: 'B_4/B' op: 'TestOneInputTwoOutputs' input: ['A_4'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 4);
EXPECT_EQ(results.return_nodes[0]->name(), "A_5");
EXPECT_EQ(results.return_nodes[1]->name(), "A_4");
EXPECT_EQ(results.return_nodes[2]->name(), "B_5");
EXPECT_EQ(results.return_nodes[2]->def().input(0), "A_5:0");
EXPECT_EQ(results.return_nodes[3]->name(), "B_4/B");
EXPECT_EQ(results.return_nodes[3]->def().input(0), "A_4");
ExpectOK("node { name: 'foo/abc' op: 'ABC' }");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("foo");
results = ImportGraphDefResults();
ExpectOK("node { name: 'foo' op: 'TestInput' }", opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 1);
EXPECT_EQ(results.return_nodes[0]->name(), "foo_1");
ExpectOK("node { name: 'outer/inner/abc' op: 'ABC' }");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.return_nodes.push_back("outer");
opts.return_nodes.push_back("inner");
opts.return_nodes.push_back("abc");
opts.return_nodes.push_back("outer/inner");
opts.return_nodes.push_back("outer/inner/abc");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'outer' op: 'TestInput' }"
"node { name: 'inner' op: 'TestInput' }"
"node { name: 'abc' op: 'TestInput' }"
"node { name: 'outer/inner' op: 'TestInput' }"
"node { name: 'outer/inner/abc' op: 'TestInput' }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 5);
EXPECT_EQ(results.return_nodes[0]->name(), "outer_1");
EXPECT_EQ(results.return_nodes[1]->name(), "inner");
EXPECT_EQ(results.return_nodes[2]->name(), "abc");
EXPECT_EQ(results.return_nodes[3]->name(), "outer/inner_1");
EXPECT_EQ(results.return_nodes[4]->name(), "outer/inner/abc_1");
opts = ImportGraphDefOptions();
opts.uniquify_names = true;
opts.input_map[TensorId("A", 0)] = TensorId("A", 0);
opts.input_map[TensorId("B", 0)] = TensorId("B", 0);
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
results = ImportGraphDefResults();
ExpectOK(graph_def_str, opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_6");
EXPECT_EQ(results.return_nodes[1]->name(), "B_6");
EXPECT_EQ(results.return_nodes[1]->def().input(0), "A:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_UniquifyNames_ColocationGroups) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A'] }");
ImportGraphDefOptions opts;
opts.uniquify_names = true;
opts.return_nodes.push_back("A");
opts.return_nodes.push_back("B");
ImportGraphDefResults results;
ExpectOK(
"node { name: 'A' op: 'TestInput' }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] "
" attr { key: '_class' value { list { s:'loc:@A' } } } }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_1");
EXPECT_EQ(results.return_nodes[1]->name(), "B_1");
const AttrValue* class_attr =
results.return_nodes[1]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@A_1");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' "
" attr { key: '_class' value { list { s:'loc:@B' } } } }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_2");
EXPECT_EQ(results.return_nodes[1]->name(), "B_2");
class_attr = results.return_nodes[0]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_2");
results = ImportGraphDefResults();
ExpectOK(
"node { name: 'A' op: 'TestInput' "
" attr { key: '_class' value { list { s:'loc:@B' } } } }"
"node { name: 'B' op: 'TestOneInputTwoOutputs' input: ['A:0'] "
" attr { key: '_class' value { list { s:'loc:@B' } } } }",
opts, &refiner, &results);
ASSERT_EQ(results.return_nodes.size(), 2);
EXPECT_EQ(results.return_nodes[0]->name(), "A_3");
EXPECT_EQ(results.return_nodes[1]->name(), "B_3");
class_attr = results.return_nodes[0]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_3");
class_attr = results.return_nodes[1]->attrs().Find(kColocationAttrName);
ASSERT_TRUE(class_attr != nullptr);
ASSERT_EQ(class_attr->list().s_size(), 1);
EXPECT_EQ(class_attr->list().s(0), "loc:@B_3");
}
TEST_F(GraphConstructorTest, ImportGraphDef_WithCycle) {
GraphDef def;
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF",
&def);
ASSERT_TRUE(parsed);
Status s = ImportGraphDef(ImportGraphDefOptions(), def, &graph_, nullptr);
EXPECT_EQ(absl::OkStatus(), s) << s;
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDeps) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'W2' op: 'TestParams' }",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.control_dependencies = {"W1", "W2"};
opts.prefix = "import";
opts.input_map[TensorId("W2", -1)] = TensorId("W2", -1);
opts.input_map[TensorId("W3", -1)] = TensorId("W2", -1);
ExpectOK(
R"EOF(
node { name: 'W2' op: 'TestParams' }
node { name: 'W3' op: 'TestParams' }
node { name: 'input' op: 'TestInput' }
node { name: 'input2' op: 'TestInput' input: [ '^W2' ] }
node { name: 'input3' op: 'TestInput' input: [ '^W2', '^W3' ] }
node { name: 't1' op: 'TestMul' input: [ 'input:0', 'input:1' ] }
node { name: 't2' op: 'TestMul'
input: [ 'input:0', 'input:1', '^W2', '^W3' ] }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("import/W2"));
EXPECT_TRUE(HasNode("import/W3"));
EXPECT_TRUE(HasNode("import/input"));
EXPECT_TRUE(HasNode("import/input2"));
EXPECT_TRUE(HasNode("import/input3"));
EXPECT_TRUE(HasNode("import/t1"));
EXPECT_TRUE(HasNode("import/t2"));
EXPECT_TRUE(HasControlEdge("W1", "import/W2"));
EXPECT_TRUE(HasControlEdge("W2", "import/W2"));
EXPECT_TRUE(HasControlEdge("W1", "import/W3"));
EXPECT_TRUE(HasControlEdge("W2", "import/W3"));
EXPECT_TRUE(HasControlEdge("W1", "import/input"));
EXPECT_TRUE(HasControlEdge("W2", "import/input"));
EXPECT_FALSE(HasControlEdge("W1", "import/t1"));
EXPECT_FALSE(HasControlEdge("W2", "import/t1"));
EXPECT_TRUE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("import/input", 1, "import/t1", 1));
EXPECT_TRUE(HasControlEdge("W2", "import/t2"));
EXPECT_FALSE(HasControlEdge("W1", "import/t2"));
EXPECT_TRUE(HasEdge("import/input", 0, "import/t1", 0));
EXPECT_TRUE(HasEdge("import/input", 1, "import/t1", 1));
EXPECT_TRUE(HasControlEdge("W1", "import/input2"));
EXPECT_TRUE(HasControlEdge("W2", "import/input2"));
EXPECT_FALSE(HasControlEdge("import/W2", "import/input2"));
EXPECT_TRUE(HasControlEdge("W1", "import/input3"));
EXPECT_TRUE(HasControlEdge("W2", "import/input3"));
Node* w2 = FindNode("import/W2");
ASSERT_EQ(w2->requested_inputs().size(), 2);
EXPECT_EQ(w2->requested_inputs()[0], "^W1");
EXPECT_EQ(w2->requested_inputs()[1], "^W2");
Node* w3 = FindNode("import/W3");
ASSERT_EQ(w3->requested_inputs().size(), 2);
EXPECT_EQ(w3->requested_inputs()[0], "^W1");
EXPECT_EQ(w3->requested_inputs()[1], "^W2");
Node* input = FindNode("import/input");
ASSERT_EQ(input->requested_inputs().size(), 2);
EXPECT_EQ(input->requested_inputs()[0], "^W1");
EXPECT_EQ(input->requested_inputs()[1], "^W2");
Node* input2 = FindNode("import/input2");
ASSERT_EQ(input2->requested_inputs().size(), 2);
EXPECT_EQ(input2->requested_inputs()[0], "^W2");
EXPECT_EQ(input2->requested_inputs()[1], "^W1");
Node* input3 = FindNode("import/input3");
ASSERT_EQ(input3->requested_inputs().size(), 2);
EXPECT_EQ(input3->requested_inputs()[0], "^W2");
EXPECT_EQ(input3->requested_inputs()[1], "^W1");
Node* t1 = FindNode("import/t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "import/input:0");
EXPECT_EQ(t1->requested_inputs()[1], "import/input:1");
Node* t2 = FindNode("import/t2");
ASSERT_EQ(t2->requested_inputs().size(), 3);
EXPECT_EQ(t2->requested_inputs()[0], "import/input:0");
EXPECT_EQ(t2->requested_inputs()[1], "import/input:1");
EXPECT_EQ(t2->requested_inputs()[2], "^W2");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDepsWithCycle) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }",
ImportGraphDefOptions(), &refiner);
ImportGraphDefOptions opts;
opts.control_dependencies.push_back("W1");
opts.input_map[TensorId("new_input", 0)] = TensorId("input", 0);
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 'merge' op: 'Merge' input: [ 'new_input:0', 'next:0' ]
attr { key: "N" value: { i: 2 } }
attr { key: "T" value: { type: DT_FLOAT } } }
node { name: 't1' op: 'TestMul' input: [ 'merge:0', 'merge:0' ] }
node { name: 'next' op: 'NextIteration' input: ['t1:0']
attr { key: "T" value: { type: DT_FLOAT } } }
)EOF",
opts, &refiner);
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasNode("merge"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasNode("next"));
EXPECT_TRUE(HasEdge("merge", 0, "t1", 0));
EXPECT_TRUE(HasEdge("t1", 0, "next", 0));
EXPECT_TRUE(HasEdge("next", 0, "merge", 1));
EXPECT_TRUE(HasControlEdge("W1", "merge"));
EXPECT_FALSE(HasControlEdge("W1", "t1"));
Node* merge = FindNode("merge");
ASSERT_EQ(merge->requested_inputs().size(), 3);
EXPECT_EQ(merge->requested_inputs()[0], "input:0");
EXPECT_EQ(merge->requested_inputs()[1], "next:0");
EXPECT_EQ(merge->requested_inputs()[2], "^W1");
Node* t1 = FindNode("t1");
ASSERT_EQ(t1->requested_inputs().size(), 2);
EXPECT_EQ(t1->requested_inputs()[0], "merge:0");
EXPECT_EQ(t1->requested_inputs()[1], "merge:0");
Node* next = FindNode("next");
ASSERT_EQ(next->requested_inputs().size(), 1);
EXPECT_EQ(next->requested_inputs()[0], "t1:0");
}
TEST_F(GraphConstructorTest, ImportGraphDef_ControlDepsErrors) {
ImportGraphDefOptions opts;
opts.control_dependencies.push_back("W1");
ExpectError("node { name: 'W1' op: 'TestParams' }", opts,
{"node 'W1' in control_dependencies does not exist in graph"});
}
TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) {
GraphDef def;
TF_EXPECT_OK(
NodeDefBuilder("scope/A", "TestParams").Finalize(def.add_node()));
ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();
Status s = ImportGraphDef(opts, def, &graph_, nullptr);
ASSERT_EQ(absl::OkStatus(), s) << s;
EXPECT_EQ(3, graph_.num_nodes());
EXPECT_TRUE(HasControlEdge(source, sink));
EXPECT_TRUE(HasControlEdge(source, "scope/A"));
EXPECT_TRUE(HasControlEdge("scope/A", sink));
EXPECT_EQ(3, graph_.num_edges());
const string original_graph_description = GraphDebugString();
#define EXPECT_IMPORT_FAILURE(graph_def, options, expected_err) \
do { \
Status s = ImportGraphDef(options, graph_def, &graph_, nullptr); \
EXPECT_NE(OkStatus(), s) << s; \
EXPECT_TRUE(s.message().find(expected_err) != string::npos) << s; \
const string graph_description = GraphDebugString(); \
EXPECT_EQ(original_graph_description, graph_description); \
EXPECT_EQ(3, graph_.num_nodes()); \
EXPECT_TRUE(HasControlEdge(source, sink)); \
EXPECT_TRUE(HasControlEdge(source, "scope/A")); \
EXPECT_TRUE(HasControlEdge("scope/A", sink)); \
EXPECT_EQ(3, graph_.num_edges()); \
} while (0)
EXPECT_IMPORT_FAILURE(def, opts,
"Node name 'scope/A' already exists in the Graph");
GraphDef bad_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'!B' op:'TestParams'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node '!B': Node name contains invalid characters");
opts.prefix = "!bad_prefix";
EXPECT_IMPORT_FAILURE(def, opts,
"Imported node name prefix '!bad_prefix/' would lead "
"to invalid node names");
opts.prefix = "import";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'B' op:'SomeUnknownOp'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Op type not registered 'SomeUnknownOp'");
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'B' op:'TestOneInputTwoOutputs' input:'C'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts, "Node 'B': Unknown input node 'C'");
bool parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"Root" op:"TestParams" } # TestParams produces a float
node{
name:"Integer"
op:"TestOneInputOneOutput"
attr{ key:"T" value{ type:DT_INT64 } }
input: "Root"
}
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Input 0 of node import/Integer was passed float from "
"import/Root:0 incompatible with expected int64");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"A" op:"TestParams" }
node{ name:"B" op:"TestOneInputTwoOutputs" input:"A:1" }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node 'B': Connecting to invalid output 1 of source "
"node A which has 1 outputs");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{ name:"A" op:"TestParams" }
node{ name:"B" op:"TestParams" }
node{ name:"C" op:"TestOneInputTwoOutputs" input:"A" input:"B" }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts, "do not match 2 inputs specified");
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{ name:'A' op:'TestOneInputTwoOutputs' }", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts, "do not match 0 inputs specified");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node{
name:"A"
op:"TestParams"
attr{
key:"_class"
value{ list{ s:"loc:@B" } }
}
})EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(
bad_def, opts, "Node 'A' expects to be colocated with unknown node 'B'");
opts.prefix = "";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node{name:'scope/A' op:'TestParams'}", &bad_def));
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node name 'scope/A' already exists in the Graph");
parsed = protobuf::TextFormat::ParseFromString(
R"EOF(
node { name: "A" op: "TestParams" }
node { name: "B" op: "L2Loss"
input: "A:0"
attr { key: "T" value { type: DT_FLOAT } }
attr { key: "_output_shapes"
value { list { shape { dim { size: 43 } } } } } }
)EOF",
&bad_def);
ASSERT_TRUE(parsed);
EXPECT_IMPORT_FAILURE(bad_def, opts,
"Node 'B' has an _output_shapes attribute inconsistent "
"with the GraphDef for output #0");
#undef EXPECT_IMPORT_FAILURE
}
TEST_F(GraphConstructorTest, ImportGraphDef_FunctionDefs) {
ImportGraphDefOptions opts;
ExpectOK(
R"EOF(
node {
name: "Placeholder" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Placeholder_1" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Foo_d03c39a3" op: "Foo_d03c39a3"
input: "Placeholder" input: "Placeholder_1"
}
library {
function {
signature {
name: "Foo_d03c39a3"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "add" type: DT_FLOAT }
}
node_def {
name: "add" op: "Add" input: "x" input: "y"
attr { key: "T" value { type: DT_FLOAT } }
}
ret { key: "add" value: "add:z:0" }
}
function {
signature {
name: "FooGrad_dc60abc8"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
input_arg { name: "dz" type: DT_FLOAT }
output_arg { name: "dz" type: DT_FLOAT }
output_arg { name: "dz_U0" type: DT_FLOAT }
}
ret { key: "dz" value: "dz:0" }
ret { key: "dz_U0" value: "dz:0" }
}
gradient {
function_name: "Foo_d03c39a3" gradient_func: "FooGrad_dc60abc8"
}
}
versions { producer: 21 min_consumer: 12 }
)EOF",
opts);
EXPECT_TRUE(HasNode("Placeholder"));
EXPECT_TRUE(HasNode("Placeholder_1"));
EXPECT_TRUE(HasNode("Foo_d03c39a3"));
const OpDef* op_def;
TF_ASSERT_OK(graph_.op_registry()->LookUpOpDef("Foo_d03c39a3", &op_def));
TF_ASSERT_OK(graph_.op_registry()->LookUpOpDef("FooGrad_dc60abc8", &op_def));
GraphDef gdef;
graph_.ToGraphDef(&gdef);
EXPECT_EQ(gdef.library().function_size(), 2);
EXPECT_EQ(gdef.library().gradient_size(), 1);
EXPECT_EQ(gdef.library().gradient()[0].function_name(), "Foo_d03c39a3");
EXPECT_EQ(gdef.library().gradient()[0].gradient_func(), "FooGrad_dc60abc8");
std::unique_ptr<Session> sess(NewSession(SessionOptions()));
TF_ASSERT_OK(sess->Create(gdef));
Tensor p1(DT_FLOAT, TensorShape({1}));
p1.scalar<float>()() = 1.0;
Tensor p2(DT_FLOAT, TensorShape({1}));
p2.scalar<float>()() = 2.0;
std::vector<std::pair<string, Tensor>> inputs = {{"Placeholder", p1},
{"Placeholder_1", p2}};
std::vector<string> output_names = {"Foo_d03c39a3"};
std::vector<string> target_names;
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run(inputs, output_names, target_names, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].scalar<float>()(), 3.0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_NestedFunctionDefs) {
ImportGraphDefOptions opts;
ExpectOK(
R"EOF(
node {
name: "Placeholder" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Placeholder_1" op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { } } }
}
node {
name: "Outer_966fa13d" op: "Outer_966fa13d"
input: "Placeholder" input: "Placeholder_1"
}
library {
function {
signature {
name: "Outer_966fa13d"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "Inner_d03c39a3" type: DT_FLOAT }
}
node_def {
name: "Inner_d03c39a3" op: "Inner_d03c39a3" input: "x" input: "y"
}
ret { key: "Inner_d03c39a3" value: "Inner_d03c39a3:add:0" }
}
function {
signature {
name: "Inner_d03c39a3"
input_arg { name: "x" type: DT_FLOAT }
input_arg { name: "y" type: DT_FLOAT }
output_arg { name: "add" type: DT_FLOAT }
}
node_def {
name: "add" op: "Add" input: "x" input: "y"
attr { key: "T" value { type: DT_FLOAT } }
}
ret { key: "add" value: "add:z:0" }
}
}
versions { producer: 21 min_consumer: 12 }
)EOF",
opts);
EXPECT_TRUE(HasNode("Placeholder"));
EXPECT_TRUE(HasNode("Placeholder_1"));
EXPECT_TRUE(HasNode("Outer_966fa13d"));
const OpDef* op_def;
Status s = graph_.op_registry()->LookUpOpDef("Inner_d03c39a3", &op_def);
ASSERT_TRUE(s.ok()) << s.message();
s = graph_.op_registry()->LookUpOpDef("Outer_966fa13d", &op_def);
ASSERT_TRUE(s.ok()) << s.message();
GraphDef gdef;
graph_.ToGraphDef(&gdef);
std::unique_ptr<Session> sess(NewSession(SessionOptions()));
s = sess->Create(gdef);
ASSERT_TRUE(s.ok()) << s.message();
Tensor p1(DT_FLOAT, TensorShape({1}));
p1.scalar<float>()() = 1.0;
Tensor p2(DT_FLOAT, TensorShape({1}));
p2.scalar<float>()() = 2.0;
std::vector<std::pair<string, Tensor>> inputs = {{"Placeholder", p1},
{"Placeholder_1", p2}};
std::vector<string> output_names = {"Outer_966fa13d"};
std::vector<string> target_names;
std::vector<Tensor> outputs;
s = sess->Run(inputs, output_names, target_names, &outputs);
ASSERT_TRUE(s.ok()) << s.message();
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0].scalar<float>()(), 3.0);
}
TEST_F(GraphConstructorTest, ImportGraphDef_OptionsMemMgmt) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK("node { name: 'input' op: 'TestInput' }", ImportGraphDefOptions(),
&refiner);
char buf1[100];
char buf2[100];
char buf3[100];
snprintf(buf1, sizeof(buf1), "input");
snprintf(buf2, sizeof(buf2), "new_input");
snprintf(buf3, sizeof(buf3), "t1");
ImportGraphDefOptions opts;
opts.input_map[TensorId(buf2, 0)] = TensorId(buf1, 0);
opts.return_tensors.push_back(TensorId(buf3, 0));
snprintf(buf1, sizeof(buf1), "xxxxxxxxxxxxxxxxxxxx");
snprintf(buf2, sizeof(buf2), "xxxxxxxxxxxxxxxxxxxx");
snprintf(buf3, sizeof(buf3), "xxxxxxxxxxxxxxxxxxxx");
ImportGraphDefResults results;
ExpectOK(
R"EOF(
node { name: 'new_input' op: 'TestInput' }
node { name: 't1' op: 'TestMul' input: [ 'new_input:0', 'new_input:1' ] }
)EOF",
opts, &refiner, &results);
EXPECT_TRUE(HasNode("input"));
EXPECT_TRUE(HasNode("new_input"));
EXPECT_TRUE(HasNode("t1"));
EXPECT_TRUE(HasEdge("input", 0, "t1", 0));
EXPECT_TRUE(HasEdge("new_input", 1, "t1", 1));
ASSERT_EQ(results.return_tensors.size(), 1);
EXPECT_EQ(results.return_tensors[0].first->name(), "t1");
}
TEST_F(GraphConstructorTest, CopyGraph) {
const int v = TF_GRAPH_DEF_VERSION;
const int bad = v + 17;
VersionDef versions;
versions.set_producer(v - 1);
versions.set_min_consumer(v - 2);
versions.add_bad_consumers(bad);
Graph src(OpRegistry::Global());
src.set_versions(versions);
Graph dst(OpRegistry::Global());
CopyGraph(src, &dst);
EXPECT_EQ(dst.versions().producer(), versions.producer());
EXPECT_EQ(dst.versions().min_consumer(), versions.min_consumer());
EXPECT_EQ(dst.versions().bad_consumers_size(), 1);
EXPECT_EQ(dst.versions().bad_consumers(0), bad);
}
TEST_F(GraphConstructorTest, GraphDefVersionUsedForShapeInference) {
string gdef_ascii = strings::StrCat(R"EOF(
node{ name:"A" op:"RequiresCurrentGraphVersion" }
versions { producer: )EOF",
TF_GRAPH_DEF_VERSION - 1, "}");
ImportGraphDefOptions opts;
ExpectError(gdef_ascii, opts, {"Wrong graph version for shape"});
gdef_ascii = strings::StrCat(R"EOF(
node{ name:"A" op:"RequiresCurrentGraphVersion" }
versions { producer: )EOF",
TF_GRAPH_DEF_VERSION, "}");
ExpectOK(gdef_ascii, opts);
}
TEST_F(GraphConstructorTest, GraphDefVersionMergingDuringImport) {
ImportGraphDefOptions opts;
ExpectOK(
"versions { producer: 15 min_consumer: 5 bad_consumers: 2 bad_consumers: "
"3 "
"}",
opts);
EXPECT_EQ(15, graph_.versions().producer());
EXPECT_EQ(5, graph_.versions().min_consumer());
ASSERT_EQ(2, graph_.versions().bad_consumers_size());
EXPECT_EQ(2, graph_.versions().bad_consumers(0));
EXPECT_EQ(3, graph_.versions().bad_consumers(1));
ExpectOK(
"versions { producer: 10 min_consumer: 8 bad_consumers: 1 bad_consumers: "
"3 "
"}",
opts);
EXPECT_EQ(10, graph_.versions().producer());
EXPECT_EQ(8, graph_.versions().min_consumer());
ASSERT_EQ(3, graph_.versions().bad_consumers_size());
EXPECT_EQ(1, graph_.versions().bad_consumers(0));
EXPECT_EQ(2, graph_.versions().bad_consumers(1));
EXPECT_EQ(3, graph_.versions().bad_consumers(2));
ExpectOK("versions { producer: 20 min_consumer: 7 }", opts);
EXPECT_EQ(10, graph_.versions().producer());
EXPECT_EQ(8, graph_.versions().min_consumer());
ASSERT_EQ(3, graph_.versions().bad_consumers_size());
EXPECT_EQ(1, graph_.versions().bad_consumers(0));
EXPECT_EQ(2, graph_.versions().bad_consumers(1));
EXPECT_EQ(3, graph_.versions().bad_consumers(2));
}
TEST_F(GraphConstructorTest, ImportGraphDefProvidedShapeRefinerVersions) {
ImportGraphDefOptions opts;
string gdef_ascii;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "Sum/input"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
node {
name: "Sum/reduction_indices"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\000\000\000\000\001"
}
}
}
}
node {
name: "Sum"
op: "Sum"
input: "Sum/input"
input: "Sum/reduction_indices"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "keep_dims"
value {
b: false
}
}
}
versions {
producer: 20
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "Sum/input"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
node {
name: "Sum/reduction_indices"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\000\001\000\000\000"
}
}
}
}
node {
name: "Sum"
op: "Sum"
input: "Sum/input"
input: "Sum/reduction_indices"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "keep_dims"
value {
b: false
}
}
}
versions {
producer: 20
})EOF");
#endif
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, graph_.op_registry());
ExpectOK(gdef_ascii, opts, &refiner);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
versions {
producer: 21
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
versions {
producer: 21
})EOF");
#endif
ExpectOK(gdef_ascii, opts, &refiner);
EXPECT_EQ(20, refiner.graph_def_version());
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\000\000\000\001\000\000\000\002"
}
}
}
}
versions {
producer: 17
})EOF");
#else
gdef_ascii = strings::StrCat(R"EOF(
node {
name: "RandomConst2"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 1
}
}
tensor_content: "\001\000\000\000\002\000\000\000"
}
}
}
}
versions {
producer: 17
})EOF");
#endif
ExpectOK(gdef_ascii, opts, &refiner);
EXPECT_EQ(17, refiner.graph_def_version());
}
TEST_F(GraphConstructorTest, ImportGraphDef_ValidateColocationConstraints) {
GraphDef def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"node { name: 'A' op: 'TestInput' attr { key: '_class' value { list { "
"s:'loc:@missing' } } } }",
&def));
ImportGraphDefOptions options;
Status s = ImportGraphDef(options, def, &graph_, nullptr);
EXPECT_TRUE(errors::IsInvalidArgument(s)) << s;
options.validate_colocation_constraints = false;
TF_EXPECT_OK(ImportGraphDef(options, def, &graph_, nullptr));
}
TEST_F(GraphConstructorTest, ImportGraphDef_ValidateDefaultDevice) {
std::string gdef_ascii(
R"EOF(
node { name: 'test_input' op: 'TestInput' }
node { name: 'test_input_with_dev' op: 'TestInput' device: 'some dev'}
node { name: 'test_op' op: 'TestMul' input: [ 'test_input:0', 'test_input:1' ] }
node { name: 'test_op_with_dev' op: 'TestMul' input: [ 'test_input:0', 'test_input:1' ] device: 'some dev'}
)EOF");
GraphDef gdef;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(gdef_ascii, &gdef));
ImportGraphDefOptions options;
options.default_device = "/gpu:13";
ImportGraphDefResults res;
TF_ASSERT_OK(ImportGraphDef(options, gdef, &graph_, nullptr, &res));
std::map<string, string> node2dev;
for (Node* n : graph_.nodes()) {
node2dev[n->name()] = n->requested_device();
}
EXPECT_EQ(node2dev["test_input"], "/gpu:13");
EXPECT_EQ(node2dev["test_op"], "/gpu:13");
EXPECT_EQ(node2dev["test_input_with_dev"], "some dev");
EXPECT_EQ(node2dev["test_op_with_dev"], "some dev");
}
TEST_F(GraphConstructorTest, ImportGraphDef_UnknownOps) {
const string pb_ascii = "node { name: 'op_from_contrib' op: 'OpFromContrib'}";
ExpectError(pb_ascii, {"Op type not registered 'OpFromContrib'"});
ExpectError(
pb_ascii,
{"Make sure the Op and Kernel are registered in the "
"binary running in this process. Note that if you "
"are loading a saved graph which used ops from "
"tf.contrib (e.g. `tf.contrib.resampler`), accessing should be done "
"before importing the graph, as contrib ops are lazily registered "
"when the module is first accessed."});
}
TEST_F(GraphConstructorTest, GraphDebugInfo_Node_StackTrace_Deserialize) {
ExpectOK(R"(
node {
name: "w1"
op: "TestParams"
}
node {
name: "input"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "w1"
input: "input:1"
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
traces {
key: "w1"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 1
line: 30
func: "bar"
}
}
}
traces {
key: "input"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 2
line: 35
func: "tree"
}
}
}
traces {
key: "a1@foo"
value {
file_line_cols {
file_index: 0
line: 20
func: "foo"
}
file_line_cols {
file_index: 1
line: 30
func: "bar"
}
}
}
})");
Node* w1 = FindNode("w1");
EXPECT_NE(w1, nullptr);
const std::shared_ptr<AbstractStackTrace>& w1_stack = w1->GetStackTrace();
EXPECT_NE(w1_stack, nullptr);
EXPECT_EQ(w1_stack->ToString({}),
"File \"alpha.cc\", line 20, in foo\n"
"File \"beta.cc\", line 30, in bar");
Node* input = FindNode("input");
EXPECT_NE(input, nullptr);
const std::shared_ptr<AbstractStackTrace>& input_stack =
input->GetStackTrace();
EXPECT_NE(input_stack, nullptr);
EXPECT_EQ(input_stack->ToString({}),
"File \"alpha.cc\", line 20, in foo\n"
"File \"gamma.cc\", line 35, in tree");
}
TEST_F(GraphConstructorTest,
GraphDebugInfo_Node_StackTrace_Deserialize_InvalidFileIndex) {
ExpectOK(R"(
node {
name: "w1"
op: "TestParams"
}
node {
name: "input"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "w1"
input: "input:1"
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
traces {
key: "w1"
value {
file_line_cols {
file_index: 2
line: 20
func: "foo"
}
file_line_cols {
file_index: -1
line: 30
func: "negative_index"
}
file_line_cols {
file_index: 3
line: 40
func: "index_ge_length"
}
}
}
})");
Node* w1 = FindNode("w1");
EXPECT_NE(w1, nullptr);
const std::shared_ptr<AbstractStackTrace>& w1_stack = w1->GetStackTrace();
EXPECT_NE(w1_stack, nullptr);
EXPECT_EQ(w1_stack->ToString({}),
"File \"gamma.cc\", line 20, in foo\n"
"File \"<UNKNOWN_FILE_NAME>\", line 30, in negative_index\n"
"File \"<UNKNOWN_FILE_NAME>\", line 40, in index_ge_length");
}
TEST_F(GraphConstructorTest,
GraphDebugInfo_FunctionLibrary_StackTrace_Deserialize) {
ExpectOK(R"(
node {
name: "a"
op: "TestParams"
}
node {
name: "b"
op: "TestInput"
}
node {
name: "t1"
op: "TestMul"
input: "a"
input: "b:1"
}
library {
function {
signature { name: "foo" }
node_def { name: "a1" }
node_def { name: "a2" }
}
function {
signature { name: "bar" }
node_def { name: "b1" }
node_def { name: "b2" }
}
}
debug_info {
files: "alpha.cc"
files: "beta.cc"
files: "gamma.cc"
files: "delta.cc"
traces {
key: "input"
value {
file_line_cols { file_index: 0 line: 20 func: "foo" }
file_line_cols { file_index: 2 line: 35 func: "tree" }
}
}
traces {
key: "a1@foo"
value {
file_line_cols { file_index: 0 line: 20 func: "jazz" }
file_line_cols { file_index: 1 line: 30 func: "buzz" }
}
}
traces {
key: "a2@foo"
value {
file_line_cols { file_index: 1 line: 25 func: "fuzz" }
file_line_cols { file_index: 2 line: 35 func: "fizz" }
}
}
traces {
key: "b1@bar"
value {
file_line_cols { file_index: 0 line: 23 func: "chez" }
file_line_cols { file_index: 3 line: 33 func: "whiz" }
}
}
traces {
key: "b2@bar"
value {
file_line_cols { file_index: 1 line: 24 func: "quip" }
file_line_cols { file_index: 3 line: 34 func: "jape" }
}
}
})");
const FunctionLibraryDefinition& flib_def = graph_.flib_def();
core::RefCountPtr<FunctionRecord> foo_function_record =
flib_def.FindRecord("foo");
EXPECT_NE(foo_function_record.get(), nullptr);
const StackTracesMap& foo_stack_traces = foo_function_record->stack_traces();
auto a1_iter = foo_stack_traces.find("a1");
EXPECT_NE(a1_iter, foo_stack_traces.end());
std::shared_ptr<AbstractStackTrace> a1_stack_trace = a1_iter->second;
EXPECT_NE(a1_stack_trace.get(), nullptr);
EXPECT_EQ(a1_stack_trace->ToString({}),
"File \"alpha.cc\", line 20, in jazz\n"
"File \"beta.cc\", line 30, in buzz");
auto a2_iter = foo_stack_traces.find("a2");
EXPECT_NE(a2_iter, foo_stack_traces.end());
std::shared_ptr<AbstractStackTrace> a2_stack_trace = a2_iter->second;
EXPECT_NE(a2_stack_trace.get(), nullptr);
EXPECT_EQ(a2_stack_trace->ToString({}),
"File \"beta.cc\", line 25, in fuzz\n"
"File \"gamma.cc\", line 35, in fizz");
core::RefCountPtr<FunctionRecord> bar_function_record =
flib_def.FindRecord("bar");
EXPECT_NE(bar_function_record.get(), nullptr);
const StackTracesMap& bar_stack_traces = bar_function_record->stack_traces();
auto b1_iter = bar_stack_traces.find("b1");
EXPECT_NE(b1_iter, bar_stack_traces.end());
std::shared_ptr<AbstractStackTrace> b1_stack_trace = b1_iter->second;
EXPECT_NE(b1_stack_trace.get(), nullptr);
EXPECT_EQ(b1_stack_trace->ToString({}),
"File \"alpha.cc\", line 23, in chez\n"
"File \"delta.cc\", line 33, in whiz");
auto b2_iter = bar_stack_traces.find("b2");
EXPECT_NE(b2_iter, bar_stack_traces.end());
std::shared_ptr<AbstractStackTrace> b2_stack_trace = b2_iter->second;
EXPECT_NE(b2_stack_trace.get(), nullptr);
EXPECT_EQ(b2_stack_trace->ToString({}),
"File \"beta.cc\", line 24, in quip\n"
"File \"delta.cc\", line 34, in jape");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_constructor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/graph_constructor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0198949a-e919-4cf3-b597-d178b5b5c6e9 | cpp | tensorflow/tensorflow | session | tensorflow/core/common_runtime/session.cc | tensorflow/core/common_runtime/session_test.cc | #include "tensorflow/core/public/session.h"
#include <string>
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
auto* session_created = monitoring::Gauge<bool, 0>::New(
"/tensorflow/core/session_created", "True if a session was created.");
}
void SetSessionCreatedMetric() { session_created->GetCell()->Set(true); }
Session::Session() {}
Session::~Session() {}
Status Session::Run(const RunOptions& run_options,
const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_tensor_names,
const std::vector<string>& target_tensor_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata) {
return errors::Unimplemented(
"Run with options is not supported for this session.");
}
Status Session::PRunSetup(const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
string* handle) {
return errors::Unimplemented(
"Partial run is not supported for this session.");
}
Status Session::PRun(const string& handle,
const std::vector<std::pair<string, Tensor> >& inputs,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
return errors::Unimplemented(
"Partial run is not supported for this session.");
}
Session* NewSession(const SessionOptions& options) {
SetSessionCreatedMetric();
Session* out_session;
Status s = NewSession(options, &out_session);
if (!s.ok()) {
LOG(ERROR) << "Failed to create session: " << s;
return nullptr;
}
return out_session;
}
Status NewSession(const SessionOptions& options, Session** out_session) {
SessionFactory* factory;
Status s = SessionFactory::GetFactory(options, &factory);
if (!s.ok()) {
*out_session = nullptr;
LOG(ERROR) << "Failed to get session factory: " << s;
return s;
}
SetSessionCreatedMetric();
s = factory->NewSession(options, out_session);
if (!s.ok()) {
*out_session = nullptr;
LOG(ERROR) << "Failed to create session: " << s;
}
return s;
}
Status Reset(const SessionOptions& options,
const std::vector<string>& containers) {
SessionFactory* factory;
TF_RETURN_IF_ERROR(SessionFactory::GetFactory(options, &factory));
return factory->Reset(options, containers);
}
} | #include "tensorflow/core/public/session.h"
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(SessionTest, InvalidTargetReturnsNull) {
SessionOptions options;
options.target = "invalid target";
EXPECT_EQ(nullptr, tensorflow::NewSession(options));
Session* session;
Status s = tensorflow::NewSession(options, &session);
EXPECT_EQ(s.code(), error::NOT_FOUND);
EXPECT_TRUE(absl::StrContains(
s.message(),
"No session factory registered for the given session options"));
}
class FakeSessionFactory : public SessionFactory {
public:
FakeSessionFactory() {}
bool AcceptsOptions(const SessionOptions& options) override {
return absl::StartsWith(options.target, "fake");
}
Status NewSession(const SessionOptions& options,
Session** out_session) override {
*out_session = nullptr;
return absl::OkStatus();
}
};
class FakeSessionRegistrar {
public:
FakeSessionRegistrar() {
SessionFactory::Register("FAKE_SESSION_1", new FakeSessionFactory());
SessionFactory::Register("FAKE_SESSION_2", new FakeSessionFactory());
}
};
static FakeSessionRegistrar registrar;
TEST(SessionTest, MultipleFactoriesForTarget) {
SessionOptions options;
options.target = "fakesession";
Session* session;
Status s = tensorflow::NewSession(options, &session);
EXPECT_EQ(s.code(), error::INTERNAL);
EXPECT_TRUE(absl::StrContains(s.message(), "Multiple session factories"));
EXPECT_TRUE(absl::StrContains(s.message(), "FAKE_SESSION_1"));
EXPECT_TRUE(absl::StrContains(s.message(), "FAKE_SESSION_2"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4247af5c-3ab6-4144-ac73-c851293eeb69 | cpp | tensorflow/tensorflow | arg_ret_placement | tensorflow/core/common_runtime/arg_ret_placement.cc | tensorflow/core/common_runtime/arg_ret_placement_test.cc | #include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include <algorithm>
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow::full_type {
MemoryType MemoryTypeFromFullTypeId(FullTypeId id) {
if (id == TFT_SHAPE_TENSOR) {
return HOST_MEMORY;
}
return DEVICE_MEMORY;
}
bool LogMemoryTypeMismatch(bool use_host_memory, const FullTypeDef& ft) {
FullTypeId id = ft.type_id();
if (id == TFT_PRODUCT) {
LOG(ERROR) << "Unexpected full type information for tensor, which should "
"not start with TFT_PRODUCT\n"
<< ft.DebugString();
return false;
}
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if (use_host_memory != (mt_from_ft == HOST_MEMORY)) {
VLOG(1) << "use_host_memory=" << use_host_memory
<< "but full type information is\n"
<< ft.DebugString();
return false;
}
return true;
}
Status CheckMemoryType(bool use_host_memory, const FullTypeDef& ft) {
FullTypeId id = ft.type_id();
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if (id == TFT_PRODUCT) {
return errors::Internal(
"Unexpected full type information for tensor, which should not start "
"with TFT_PRODUCT\n",
ft.DebugString());
}
if (use_host_memory != (mt_from_ft == HOST_MEMORY)) {
return errors::Internal("use_host_memory=", use_host_memory,
" but full type information is\n",
ft.DebugString());
}
return absl::OkStatus();
}
static Status SetMemoryTypeForNode(
const Node* node, const DataType dtype, bool is_arg, bool weak_flag,
bool ints_on_device, MemoryTypeVector* memory_types,
std::vector<AllocatorAttributes>* alloc_attrs) {
const Node* n;
int output_idx;
if (is_arg) {
DCHECK(node->op_def().name() == "_Arg" ||
node->op_def().name() == "_DeviceArg");
output_idx = 0;
n = node;
} else {
DCHECK(node->op_def().name() == "_Retval" ||
node->op_def().name() == "_DeviceRetval");
const Edge* edge;
TF_RETURN_IF_ERROR(node->input_edge(0, &edge));
n = edge->src();
output_idx = edge->src_output();
}
MemoryType mt_from_dtype = ints_on_device ? MTypeFromDTypeIntsOnDevice(dtype)
: MTypeFromDType(dtype);
if (dtype == DT_INT32) {
if (n->def().has_experimental_type()) {
bool valid_full_type_information = false;
auto ft = n->def().experimental_type();
if (ft.type_id() == TFT_PRODUCT) {
FullTypeId id = GetArgDefaultUnset(ft, output_idx).type_id();
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if ((id == TFT_TENSOR) || (id == TFT_SHAPE_TENSOR)) {
valid_full_type_information = mt_from_dtype == mt_from_ft;
} else if (id == TFT_UNSET) {
valid_full_type_information = mt_from_dtype != HOST_MEMORY;
}
}
if (!valid_full_type_information) {
if (weak_flag) {
VLOG(1) << "node=" << n->name() << " (op=" << n->def().op()
<< ") has an int32 output with unexpected full type "
<< "information with ints_on_device=" << ints_on_device
<< "\n"
<< n->def().DebugString();
} else {
return errors::Internal(
"node=", n->name(), " (op=", n->def().op(),
") has an int32 output with unexpected full type information ",
"with ints_on_device=", ints_on_device, "\n",
n->def().DebugString());
}
}
} else if (mt_from_dtype == HOST_MEMORY) {
if (weak_flag) {
VLOG(1) << "node=" << n->name() << " (op=" << n->def().op()
<< ") has a HOST_MEMORY int32 output but does not have "
<< "(TFT_SHAPE_TENSOR) full type information.";
} else {
return errors::Internal(
"node=", n->name(), " (op=", n->def().op(),
") has a HOST_MEMORY int32 output but does not have "
"(TFT_SHAPE_TENSOR) full type information.");
}
}
}
if (memory_types != nullptr) {
memory_types->push_back(mt_from_dtype);
}
if (alloc_attrs != nullptr) {
AllocatorAttributes aa;
aa.set_on_host(mt_from_dtype == HOST_MEMORY);
alloc_attrs->push_back(aa);
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const absl::InlinedVector<Node*, 4UL>& nodes, const DataTypeVector& dtypes,
bool is_arg, bool weak_flag, MemoryTypeVector* memory_types,
std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK_EQ(nodes.size(), dtypes.size());
if (alloc_attrs != nullptr) {
alloc_attrs->reserve(nodes.size());
}
for (int i = 0; i < nodes.size(); ++i) {
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(nodes[i], dtypes[i], is_arg,
weak_flag, false,
memory_types, alloc_attrs));
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool weak_flag, bool ints_on_device,
std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK(alloc_attrs != nullptr);
alloc_attrs->reserve(arg_nodes.size());
for (const auto& arg : arg_nodes) {
const AttrValue* attr_value = arg.first->attrs().Find("T");
if (attr_value == nullptr) {
return errors::Internal("Arg node missing T attribute");
}
DataType dtype = attr_value->type();
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(
arg.first, dtype, true, weak_flag, ints_on_device,
nullptr, alloc_attrs));
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const std::vector<std::pair<Node*, int>> ret_nodes, bool weak_flag,
bool ints_on_device, std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK(alloc_attrs != nullptr);
alloc_attrs->reserve(ret_nodes.size());
for (const auto& ret : ret_nodes) {
const AttrValue* attr_value = ret.first->attrs().Find("T");
if (attr_value == nullptr) {
return errors::Internal("Ret node missing T attribute");
}
DataType dtype = attr_value->type();
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(
ret.first, dtype, false, weak_flag, ints_on_device,
nullptr, alloc_attrs));
}
return absl::OkStatus();
}
Status SetMemoryTypeForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, true,
false, &memory_types, nullptr);
}
Status WeakSetMemoryTypeForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, true,
true, &memory_types, nullptr);
}
Status SetMemoryTypeForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, false,
false, &memory_types, nullptr);
}
Status WeakSetMemoryTypeForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, false,
true, &memory_types, nullptr);
}
Status SetAllocAttrsForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, true,
false, nullptr, &alloc_attrs);
}
Status WeakSetAllocAttrsForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, true,
true, nullptr, &alloc_attrs);
}
Status SetAllocAttrsForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, false,
false, nullptr, &alloc_attrs);
}
Status WeakSetAllocAttrsForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, false,
true, nullptr, &alloc_attrs);
}
Status SingleDeviceSetAllocAttrsForArgs(
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool ints_on_device, std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(arg_nodes, false, ints_on_device,
&alloc_attrs);
}
Status WeakSingleDeviceSetAllocAttrsForArgs(
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool ints_on_device, std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(arg_nodes, true, ints_on_device,
&alloc_attrs);
}
Status SingleDeviceSetAllocAttrsForRets(
const std::vector<std::pair<Node*, int>> ret_nodes, bool ints_on_device,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(ret_nodes, false, ints_on_device,
&alloc_attrs);
}
Status WeakSingleDeviceSetAllocAttrsForRets(
const std::vector<std::pair<Node*, int>> ret_nodes, bool ints_on_device,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(ret_nodes, true, ints_on_device,
&alloc_attrs);
}
} | #include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
class FullTypeGraphUtilsTest : public ::testing::Test {
protected:
FullTypeGraphUtilsTest()
: graph_(OpRegistry::Global()),
root_(Scope::NewRootScope().ExitOnError()) {}
Status MakeArg(Node **arg, DataType dtype) {
return NodeBuilder("arg", "_Arg", &root_.graph()->flib_def())
.Attr("T", dtype)
.Attr("index", 0)
.Finalize(root_.graph(), arg);
}
Status MakeRet(Node *src, Node **ret, DataType dtype) {
return NodeBuilder("ret", "_Retval", &root_.graph()->flib_def())
.Input(src, 0)
.Attr("T", dtype)
.Attr("index", 0)
.Finalize(root_.graph(), ret);
}
public:
Status MakeArgRet(Node **arg, Node **ret, DataType dtype) {
TF_RETURN_IF_ERROR(MakeArg(arg, dtype));
return MakeRet(*arg, ret, dtype);
}
void AddArgFullType(Node *arg, FullTypeId out_id, FullTypeId data_id) {
FullTypeDef *t = arg->mutable_def()->mutable_experimental_type();
t->set_type_id(TFT_PRODUCT);
FullTypeDef out_t;
out_t.set_type_id(out_id);
if (data_id != TFT_UNSET) {
FullTypeDef data_t;
data_t.set_type_id(data_id);
(*out_t.add_args()) = data_t;
}
(*t->add_args()) = out_t;
}
private:
Graph graph_;
Scope root_;
};
TEST_F(FullTypeGraphUtilsTest, MemoryTypesArgNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(arg);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(
full_type::WeakSetMemoryTypeForArgs(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::DEVICE_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(arg);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, MemoryTypesArgWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetMemoryTypeForArgs(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::HOST_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, ArgError) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
Status status = full_type::SetMemoryTypeForArgs(nodes, dtypes, memory_types);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocAttrsArgIgnore) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, RetNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(ret);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(
full_type::WeakSetMemoryTypeForRets(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::DEVICE_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, MemoryTypeRetWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetMemoryTypeForRets(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::HOST_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllowAttrRetWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetAllocAttrsForRets(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, RetError) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
Status status = full_type::SetMemoryTypeForRets(nodes, dtypes, memory_types);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocAttrsRetIgnore) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForRets(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithUnsetFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_UNSET, TFT_UNSET);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocatorAttrsArgWithFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::WeakSingleDeviceSetAllocAttrsForArgs(
arg_nodes, false, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceAllocAttrsRetError) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
ret_nodes.push_back(std::make_pair(ret, 0));
Status status = full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, true, alloc_attrs);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceAllocAttrsNotInt32) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_STRING));
ret_nodes.push_back(std::make_pair(ret, 0));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, false, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceWeakAllocAttrsRetIgnore) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
ret_nodes.push_back(std::make_pair(ret, 0));
TF_ASSERT_OK(full_type::WeakSingleDeviceSetAllocAttrsForRets(
ret_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeOK) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
TF_ASSERT_OK(full_type::CheckMemoryType(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeBadFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type();
Status status = full_type::CheckMemoryType(true, ft);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeWrongFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
Status status = full_type::CheckMemoryType(false, ft);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchOK) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
EXPECT_TRUE(full_type::LogMemoryTypeMismatch(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchBadFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type();
EXPECT_FALSE(full_type::LogMemoryTypeMismatch(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchWrongFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
EXPECT_FALSE(full_type::LogMemoryTypeMismatch(false, ft));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/arg_ret_placement.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/arg_ret_placement_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
78c82e59-e383-40f5-be5e-ae074095bd05 | cpp | tensorflow/tensorflow | type_inference | tensorflow/core/common_runtime/type_inference.cc | tensorflow/core/common_runtime/type_inference_test.cc | #include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <list>
#include <queue>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
int MAX_VISITS_PER_NODE = 3;
typedef absl::flat_hash_map<int, std::reference_wrapper<TypeInferenceFn const>>
ForwardInferMap;
typedef absl::flat_hash_map<
int, std::pair<int, std::reference_wrapper<TypeInferenceFn const>>>
ReverseInferMap;
bool all_sources_closed(const Node& n, const absl::flat_hash_set<int>& closed,
const ForwardInferMap& forward,
const ReverseInferMap& reverse) {
for (const auto& e : n.out_edges()) {
if (e->IsControlEdge()) {
continue;
}
int dst_id = e->dst()->id();
if (reverse.contains(dst_id) && !closed.contains(dst_id)) {
return false;
}
}
if (forward.contains(n.id())) {
for (const auto& e : n.in_edges()) {
if (e->IsControlEdge()) {
continue;
}
if (!closed.contains(e->src()->id())) {
return false;
}
}
}
return true;
}
std::vector<std::reference_wrapper<const FullTypeDef>> input_types(
const Node& n) {
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
input_types.push_back(*no_type);
}
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
VLOG(5) << " in edge: " << in_edge->DebugString();
NodeDef* ndef = in_edge->src()->mutable_def();
if (ndef->has_experimental_type()) {
const auto& t = ndef->experimental_type();
if (t.type_id() != TFT_UNSET) {
DCHECK(t.type_id() == TFT_PRODUCT) << ndef->DebugString();
DCHECK(t.args_size() > in_edge->src_output()) << ndef->DebugString();
input_types.at(in_edge->dst_input()) = t.args(in_edge->src_output());
}
}
}
return input_types;
}
Status update_inferred_type(Node* target, const FullTypeDef& t, bool& updated) {
if (t.type_id() == TFT_UNSET) {
VLOG(3) << " " << target->name() << " no inferred type";
return absl::OkStatus();
}
if (target->def().has_experimental_type()) {
const auto existing = target->def().experimental_type();
if (full_type::IsSubtype(existing, t)) {
VLOG(3) << " " << target->name() << " no new type info";
return absl::OkStatus();
} else if (!full_type::IsSubtype(t, existing)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("type mismatch for node '", target->name(),
"': expected a subtype of:\n", existing.DebugString(),
"\n got:\n", t.DebugString(), "\n "));
}
}
*(target->mutable_def()->mutable_experimental_type()) = t;
updated = true;
VLOG(3) << " " << target->name() << " updated";
return absl::OkStatus();
}
absl::StatusOr<FullTypeDef> run_inference(const string& fn_name,
const TypeRefVector& in_types) {
return absl::OkStatus();
}
}
Status TypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "TypeInferencePass::Run";
DCHECK(options.graph != nullptr);
Graph* g = options.graph->get();
DCHECK(g != nullptr);
FunctionLibraryDefinition* flib_def = options.flib_def;
DCHECK(flib_def != nullptr);
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_before", *g, flib_def);
}
for (Node* n : g->nodes()) {
n->UpdateProperties();
}
ForwardInferMap forward;
ReverseInferMap reverse;
for (Node* n : g->nodes()) {
VLOG(4) << "\n node: " << n->def().DebugString()
<< "\n op def: " << n->op_def().DebugString();
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_def->LookUp(n->op_def().name(), ®));
if (reg->fwd_type_fn != nullptr) {
forward.emplace(n->id(), reg->fwd_type_fn);
}
if (reg->rev_type_fn != nullptr) {
reverse.emplace(n->id(), std::make_pair(reg->rev_type_input,
std::cref(reg->rev_type_fn)));
}
}
auto infer_forward = [&forward](Node* n, bool& updated) {
if (!forward.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has forward function";
auto in_types = input_types(*n);
const auto& infer_ret = forward.at(n->id())(in_types, run_inference);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(n, *infer_ret, updated),
"while updating its output type.");
return absl::OkStatus();
};
auto infer_reverse = [&reverse](Node* n, bool& updated) {
if (!reverse.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has reverse function";
auto in_types = input_types(*n);
auto rev_idx_and_fn = reverse.at(n->id());
const auto& infer_ret = rev_idx_and_fn.second(in_types, run_inference);
const Edge* e;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
n->input_edge(rev_idx_and_fn.first, &e),
absl::StrCat("while querying input ", rev_idx_and_fn.first, " of '",
n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", e->src()->name(),
"' via '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(e->src(), *infer_ret, updated),
absl::StrCat("while updating its output type inferred from '",
n->name(), ","));
return absl::OkStatus();
};
std::list<int> queue;
absl::flat_hash_set<int> in_queue;
absl::flat_hash_map<int, int> visit_count;
absl::flat_hash_set<int> open;
absl::flat_hash_set<int> closed;
int max_passes = g->num_nodes();
int visits = 0;
for (Node* n : g->nodes()) {
const int nid = n->id();
bool niladic = true;
for (const auto& e : n->in_edges()) {
if (!e->IsControlEdge()) {
niladic = false;
break;
}
}
if (niladic) {
queue.emplace_back(nid);
in_queue.emplace(nid);
}
open.emplace(nid);
visit_count.emplace(nid, 0);
}
for (int i = 0; i < max_passes; i++) {
VLOG(2) << "Iteration " << i << ", " << queue.size() << " nodes in queue";
while (!queue.empty()) {
int nid = queue.front();
Node* n = g->FindNodeId(nid);
VLOG(3) << " visiting " << n->name();
visits++;
visit_count[nid]++;
DCHECK(!closed.contains(nid));
bool updated = false;
TF_RETURN_IF_ERROR(infer_forward(n, updated));
TF_RETURN_IF_ERROR(infer_reverse(n, updated));
VLOG(4) << " done " << n->def().DebugString();
queue.pop_front();
in_queue.erase(nid);
open.erase(nid);
if (visit_count.at(nid) >= MAX_VISITS_PER_NODE) {
VLOG(3) << " closing " << n->name() << " - visit limit reached";
closed.emplace(nid);
} else if (all_sources_closed(*n, closed, forward, reverse)) {
VLOG(3) << " closing " << n->name() << " - all sources closed";
closed.emplace(nid);
}
for (const auto& out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* c = out_edge->dst();
int cid = c->id();
if (closed.contains(cid) || in_queue.contains(cid)) {
continue;
}
if (updated || all_sources_closed(*c, closed, forward, reverse)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
if (updated && reverse.contains(nid)) {
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(reverse.at(nid).first, &e));
Node* c = e->src();
int cid = c->id();
if (!closed.contains(cid) && !in_queue.contains(cid)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
}
VLOG(2) << "Done iteration " << i << ", " << closed.size()
<< " nodes closed";
if (open.empty()) {
VLOG(1) << "Finished after " << i + 1 << " iterations; done "
<< closed.size() << " of " << g->num_nodes() << " nodes in "
<< visits << " visits";
break;
} else {
queue.emplace_back(*(open.begin()));
}
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_after", *g, flib_def);
}
return absl::OkStatus();
}
Status WeakTypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
TypeInferencePass pass;
const auto& pass_status = pass.Run(options);
if (!pass_status.ok()) {
LOG_FIRST_N(WARNING, 1)
<< "Type inference failed. This indicates an "
"invalid graph that escaped type checking. Error message: "
<< pass_status.ToString();
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 99999,
WeakTypeInferencePass);
} | #include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <string>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(TypeInferenceTest, BasicStraightline) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("T", DT_VARIANT)
.Finalize(root.graph(), &id));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || ((node->name() == "id"))) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, CyclicGraphWithV1ControlFlow) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &id));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || (node->name() == "id") ||
(node->name() == "enter") || (node->name() == "exit") ||
(node->name() == "sw") || (node->name() == "merge") ||
(node->name() == "next")) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
REGISTER_OP("TestSourceOp").Output("o: variant");
REGISTER_OP("TestTensorUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
return t;
});
REGISTER_OP("TestArrayUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_ARRAY);
return t;
});
REGISTER_OP("TestMergeOp")
.Input("i1: variant")
.Input("i2: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
EXPECT_EQ(input_types.size(), 2);
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
if ((input_types[0].get().type_id() == TFT_TENSOR) &&
(input_types[1].get().type_id() == TFT_ARRAY)) {
t.add_args()->set_type_id(TFT_ARRAY);
} else {
t.add_args()->set_type_id(TFT_ANY);
}
return t;
});
TEST(TypeInferenceTest, TernaryNodeWithIgnoredInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &id));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Input({NodeBuilder::NodeOut(an)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithUnorderedInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
Node* m_copy = nullptr;
Node* tn_copy = nullptr;
Node* an_copy = nullptr;
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
m_copy = node;
} else if (node->name() == "tn") {
tn_copy = node;
} else if (node->name() == "an") {
an_copy = node;
}
}
TF_ASSERT_OK(graph->UpdateEdge(an_copy, 0, m_copy, 1));
TF_ASSERT_OK(graph->UpdateEdge(tn_copy, 0, m_copy, 0));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithCycleInput) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &tn));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
const auto& status = Rewrite(&graph);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("expected compatible input types"));
}
TEST(WeakTypeInferenceTest, AlwaysSucceeds) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* merge;
TF_ASSERT_OK(NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an), NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &merge));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(graph->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = &graph;
opt_options.flib_def = &flib_def;
WeakTypeInferencePass pass;
TF_ASSERT_OK(pass.Run(opt_options));
}
TEST(ReverseTypeInferenceTest, BasicVDependency) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* it;
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ITERATOR) << node->def().DebugString();
}
}
}
TEST(ReverseTypeInferenceTest, FromUnsetType) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* it;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
ASSERT_FALSE(node->def().has_experimental_type());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/type_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/type_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c8d261a-8b13-453f-97e9-ad253db6ca01 | cpp | tensorflow/tensorflow | process_function_library_runtime | tensorflow/core/common_runtime/process_function_library_runtime.cc | tensorflow/core/common_runtime/process_function_library_runtime_test.cc | #include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/build_graph_options.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include "tensorflow/core/common_runtime/single_threaded_executor.h"
#include "tensorflow/core/common_runtime/stats_publisher_interface.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tensorflow/core/util/reffed_status_callback.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
#endif
namespace tensorflow {
namespace {
int64_t GetParallelSubgraphThreshold() {
static int64_t parallel_subgraph_threshold = []() {
int64_t result;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar(
"TF_PFLR_PARALLEL_INSTANTIATE_THRESHOLD", 8, &result));
return result;
}();
return parallel_subgraph_threshold;
}
}
const char ProcessFunctionLibraryRuntime::kDefaultFLRDevice[] = "null";
void ProcessFunctionLibraryRuntime::FunctionData::DistributedInit(
DistributedFunctionLibraryRuntime* parent, const string& function_name,
const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::DoneCallback done) {
{
mutex_lock l(mu_);
is_cross_process_ = true;
if (init_started_) {
init_done_.WaitForNotification();
done(init_result_);
return;
}
init_started_ = true;
}
parent->Instantiate(function_name, lib_def, attrs, options, &local_handle_,
[this, done](const Status& s) {
init_done_.Notify();
done(s);
});
}
ProcessFunctionLibraryRuntime::ProcessFunctionLibraryRuntime(
const DeviceMgr* device_mgr, Env* env, const ConfigProto* config,
int graph_def_version, const FunctionLibraryDefinition* lib_def,
const OptimizerOptions& optimizer_options,
thread::ThreadPool* default_thread_pool,
DistributedFunctionLibraryRuntime* parent,
const SessionMetadata* session_metadata,
Rendezvous::Factory rendezvous_factory,
StatsPublisherFactory stats_publisher_factory)
: parent_(parent),
env_(env),
config_(config ? std::make_optional(*config) : std::nullopt),
device_mgr_(device_mgr),
lib_def_(lib_def),
default_thread_pool_(default_thread_pool),
flr_map_(
new std::unordered_map<Device*,
core::RefCountPtr<FunctionLibraryRuntime>>),
next_handle_(0),
session_metadata_(session_metadata),
rendezvous_factory_(std::move(rendezvous_factory)),
optimizer_options_(optimizer_options),
graph_def_version_(graph_def_version),
stats_publisher_factory_(std::move(stats_publisher_factory)) {
if (device_mgr == nullptr) {
(*flr_map_)[nullptr] = NewFunctionLibraryRuntime(
nullptr, env, config_ ? &(*config_) : nullptr, nullptr,
graph_def_version, lib_def_, default_thread_pool, optimizer_options,
session_metadata_, this);
return;
}
InitializeDeviceAndFlr();
}
Status ProcessFunctionLibraryRuntime::SendTensors(
const string& source_device, const string& target_device,
const string& key_prefix, int64_t src_incarnation,
absl::Span<const Tensor> tensors_to_send, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
RendezvousInterface* rendezvous) {
std::vector<string> keys;
for (int i = 0; i < tensors_to_send.size(); ++i) {
string name = strings::StrCat(key_prefix, i);
string key = Rendezvous::CreateKey(source_device, src_incarnation,
target_device, name, FrameAndIter(0, 0));
keys.push_back(key);
}
TF_RETURN_IF_ERROR(SendTensorsToRendezvous(
rendezvous, device_context, alloc_attrs, keys, tensors_to_send));
return absl::OkStatus();
}
void ProcessFunctionLibraryRuntime::ReceiveTensorsAsync(
const string& source_device, const string& target_device,
const string& key_prefix, int64_t src_incarnation, int64_t num_tensors,
DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
RendezvousInterface* rendezvous, std::vector<Tensor>* received_tensors,
StatusCallback done) {
std::vector<string> keys;
for (int64_t i = 0; i < num_tensors; ++i) {
string name = strings::StrCat(key_prefix, i);
string key = Rendezvous::CreateKey(source_device, src_incarnation,
target_device, name, FrameAndIter(0, 0));
keys.push_back(key);
}
RecvOutputsFromRendezvousAsync(rendezvous, device_context, alloc_attrs, keys,
received_tensors, std::move(done));
}
Status ProcessFunctionLibraryRuntime::GetRetTypes(
FunctionLibraryRuntime::Handle h, DataTypeVector* ret_types) {
FunctionLibraryRuntime* flr = nullptr;
{
tf_shared_lock l(mu_);
auto miter = mdevice_data_.find(h);
if (miter != mdevice_data_.end()) {
*ret_types = miter->second->ret_types_;
return absl::OkStatus();
}
auto fiter = function_data_.find(h);
if (fiter != function_data_.end()) {
flr = GetFLR(fiter->second->target_device());
}
}
if (flr != nullptr) {
return flr->GetRetTypes(h, ret_types);
}
return errors::InvalidArgument("Handle ", h, " not found.");
}
Status ProcessFunctionLibraryRuntime::GetDeviceIncarnation(
const string& device_name, int64_t* incarnation) const {
FunctionLibraryRuntime* flr = GetFLR(device_name);
if (flr == nullptr) {
return errors::InvalidArgument("Device name: ", device_name, " not found.");
}
*incarnation = flr->device()->attributes().incarnation();
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::GetDeviceContext(
const string& device_name, DeviceContext** device_context) const {
*device_context = nullptr;
FunctionLibraryRuntime* flr = GetFLR(device_name);
if (flr == nullptr) {
return errors::InvalidArgument("Device name: ", device_name, " not found.");
}
Device* device = flr->device();
string device_type = device->parsed_name().type;
if (device_type == "CPU" || device_type == "TPU_SYSTEM") {
return absl::OkStatus();
}
if (device->IsRemoteCallAllowed()) {
auto* dev_info = flr->device()->tensorflow_accelerator_device_info();
if (dev_info) {
*device_context = dev_info->default_context;
return absl::OkStatus();
}
}
return errors::Internal("Device type: ", device_type,
" is currently unsupported for remote ",
"function executions");
}
void ProcessFunctionLibraryRuntime::InitializeDeviceAndFlr() {
mutex_lock l(mu_);
device_set_ = std::make_shared<DeviceSet>();
if (parent_ != nullptr && parent_->remote_device_mgr() != nullptr) {
for (auto d : parent_->remote_device_mgr()->ListDevices()) {
Device* device = nullptr;
if (device_mgr_->LookupDevice(d->name(), &device) == absl::OkStatus()) {
device_set_->AddDevice(device);
} else {
device_set_->AddDevice(d);
}
}
} else {
for (auto d : device_mgr_->ListDevices()) {
device_set_->AddDevice(d);
}
}
for (Device* d : device_mgr_->ListDevices()) {
if ((*flr_map_)[d] == nullptr) {
(*flr_map_)[d] = NewFunctionLibraryRuntime(
device_mgr_, env_, config_ ? &(*config_) : nullptr, d,
graph_def_version_, lib_def_, default_thread_pool_,
optimizer_options_, session_metadata_, this);
}
}
}
FunctionLibraryRuntime* ProcessFunctionLibraryRuntime::GetFLR(
const string& device_name) const {
Device* device = nullptr;
if (device_name != kDefaultFLRDevice) {
if (!device_mgr_->LookupDevice(device_name, &device).ok()) {
VLOG(4) << "Could not find device: " << device_name;
return nullptr;
}
}
const auto& iter = flr_map_->find(device);
if (iter == flr_map_->end()) {
VLOG(1) << "Could not find device: " << device_name
<< "in the local process.";
return nullptr;
}
return iter->second.get();
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::AddHandle(
const string& function_key, const string& device_name,
FunctionLibraryRuntime::LocalHandle local_handle) {
mutex_lock l(mu_);
return AddHandleLocked(function_key, device_name, local_handle);
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::AddHandleLocked(
const string& function_key, const string& device_name,
FunctionLibraryRuntime::LocalHandle local_handle) {
auto h = next_handle_;
function_data_[h] =
std::make_unique<FunctionData>(device_name, local_handle, function_key);
table_[function_key] = h;
next_handle_++;
return h;
}
FunctionLibraryRuntime::Handle
ProcessFunctionLibraryRuntime::AddMultiDeviceHandle(
std::unique_ptr<MultiDeviceFunctionData> data, const string& function_key) {
mutex_lock l(mu_);
auto h = next_handle_;
mdevice_data_[h] = std::move(data);
table_[function_key] = h;
next_handle_++;
return h;
}
bool ProcessFunctionLibraryRuntime::HasMultiDeviceHandle(
FunctionLibraryRuntime::Handle handle) const {
bool multi_device;
{
tf_shared_lock l(mu_);
multi_device = mdevice_data_.find(handle) != mdevice_data_.end();
}
return multi_device;
}
FunctionLibraryRuntime::Handle ProcessFunctionLibraryRuntime::GetHandle(
const string& function_key) const {
tf_shared_lock l(mu_);
return gtl::FindWithDefault(table_, function_key, kInvalidHandle);
}
FunctionLibraryRuntime::LocalHandle
ProcessFunctionLibraryRuntime::GetHandleOnDevice(
const string& device_name, FunctionLibraryRuntime::Handle handle,
bool include_multi_device) const {
tf_shared_lock l(mu_);
auto miter = mdevice_data_.find(handle);
if (miter != mdevice_data_.end()) {
if (!include_multi_device) return kInvalidLocalHandle;
const MultiDeviceFunctionData& data = *miter->second;
if (data.glue_.size() != 1) return kInvalidLocalHandle;
const auto& pair = *data.glue_.begin();
const string& func_device_name = pair.first;
const ComponentFunctionData& component_data = pair.second;
if (func_device_name != device_name) return kInvalidLocalHandle;
handle = component_data.handle;
}
auto iter = function_data_.find(handle);
if (iter == function_data_.end()) {
return kInvalidLocalHandle;
}
FunctionData* function_data = iter->second.get();
if (function_data->target_device() != device_name) {
return kInvalidLocalHandle;
}
return function_data->local_handle();
}
string ProcessFunctionLibraryRuntime::GetDeviceName(
FunctionLibraryRuntime::Handle handle) const {
tf_shared_lock l(mu_);
auto iter = function_data_.find(handle);
CHECK(iter != function_data_.end());
FunctionData* function_data = iter->second.get();
return function_data->target_device();
}
ProcessFunctionLibraryRuntime::MultiDeviceFunctionData*
ProcessFunctionLibraryRuntime::IsMultiDevice(
FunctionLibraryRuntime::Handle handle) const {
tf_shared_lock l(mu_);
const auto& it = mdevice_data_.find(handle);
if (it != mdevice_data_.end()) {
return it->second.get();
}
return nullptr;
}
namespace {
std::vector<Tensor> GetLocalArgs(absl::Span<const FunctionArg> args) {
std::vector<Tensor> tensors;
for (const auto& arg : args) {
if (arg.index() == 0) {
tensors.push_back(absl::get<Tensor>(arg));
}
}
return tensors;
}
FunctionLibraryRuntime::DoneCallback TensorsToFunctionRetsDoneCallback(
std::vector<FunctionRet>* rets, std::vector<Tensor>* tensors,
FunctionLibraryRuntime::DoneCallback done) {
return [rets, tensors, done = std::move(done)](const Status& s) {
if (s.ok()) {
for (const auto& t : *tensors) {
rets->push_back(t);
}
}
delete tensors;
done(s);
};
}
Status FunctionRetsToTensors(const std::vector<FunctionRet>* function_rets,
std::vector<Tensor>* tensors) {
for (const auto& ret : *function_rets) {
if (ret.index() != 0) {
return errors::Internal(
"Expect a Tensor as a function output but got a TensorShape.");
}
tensors->push_back(absl::get<Tensor>(ret));
}
return absl::OkStatus();
}
}
ProcessFunctionLibraryRuntime::AsyncAttributes::Summary
ProcessFunctionLibraryRuntime::AsyncAttributes::Summarize(const Graph* graph) {
bool has_send_op = false;
bool has_recv_op = false;
bool has_unsafe_op = false;
for (const Node* node : graph->nodes()) {
if (node->IsSend() || node->IsHostSend()) {
has_send_op = true;
}
if (node->IsRecv() || node->IsHostRecv()) {
has_recv_op = true;
}
if (!ValidateOpIsSafeForSyncExecution(*node,
allow_control_flow_sync_execution())
.ok()) {
has_unsafe_op = true;
}
}
if (has_unsafe_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "unsafe_op");
return AsyncAttributes::kAsyncRequired;
}
if (!has_send_op && !has_recv_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "safe_for_sync");
return AsyncAttributes::kSafeForSync;
}
if (has_send_op && !has_recv_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "send_only");
return AsyncAttributes::kSendOnly;
}
if (has_recv_op && !has_send_op) {
metrics::IncrementTestCounter("subgraph_async_summary", "recv_only");
return AsyncAttributes::kRecvOnly;
}
metrics::IncrementTestCounter("subgraph_async_summary", "other");
return AsyncAttributes::kAsyncRequired;
}
void ProcessFunctionLibraryRuntime::PublishSubgraphs(
const std::string& function_name,
std::vector<core::RefCountPtr<FunctionRecord>>&& function_records) {
std::unique_ptr<StatsPublisherInterface> stats_publisher =
stats_publisher_factory_(function_name, BuildGraphOptions(),
SessionOptions());
stats_publisher->PublishGraphProto(std::move(function_records));
mutex_lock l(mu_);
stats_publishers_.push_back(std::move(stats_publisher));
}
Status ProcessFunctionLibraryRuntime::InstantiateMultiDevice(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
const string& function_key = Canonicalize(function_name, attrs, options);
{
mutex_lock l(mu_);
const auto& it = table_.find(function_key);
if (it != table_.end()) {
*handle = it->second;
++mdevice_data_[*handle]->instantiation_counter_;
return absl::OkStatus();
}
}
VLOG(1) << "Instantiating MultiDevice function \"" << function_name
<< "\" on default device \"" << options.target << "\"";
if (VLOG_IS_ON(3)) {
int index = 0;
VLOG(3) << "Requested input devices:";
for (const string& device : options.input_devices) {
VLOG(3) << " [input " << index++ << "] " << device;
}
index = 0;
VLOG(3) << "Requested output devices:";
for (const string& device : options.output_devices) {
VLOG(3) << " [output " << index++ << "] " << device;
}
}
const std::shared_ptr<DeviceSet> dev_set = device_set();
Device* default_device = nullptr;
if (options.default_device_to_target && !options.target.empty()) {
FunctionLibraryRuntime* flr = GetFLR(options.target);
if (flr == nullptr) {
return errors::InvalidArgument(
"Cannot instantiate multi-device function with target device ",
options.target);
}
default_device = flr->device();
}
std::vector<CompositeDevice*> composite_devices;
{
tf_shared_lock l(mu_);
for (auto* d : composite_devices_) composite_devices.push_back(d);
}
Device* cpu_device;
TF_RETURN_IF_ERROR(device_mgr_->LookupDevice("CPU:0", &cpu_device));
const uint64 optimization_start_time_usecs = Env::Default()->NowMicros();
std::optional<absl::StatusOr<OptimizedFunctionGraph>> optimized_graph_proto =
options.lib_def != nullptr
? options.lib_def->FindOptimizedFunctionGraph(function_name)
: lib_def_->FindOptimizedFunctionGraph(function_name);
if (optimized_graph_proto.has_value()) {
if (optimized_graph_proto->ok()) {
LOG(INFO) << "Found AOT'd graph for function: " << function_name;
metrics::UpdateFunctionGraphOptimizationSavingTime(
optimized_graph_proto->value().optimization_time_usecs(),
metrics::GraphOptimizationSource::kAot);
metrics::IncrementFunctionGraphOptimizationCacheHitCount(
1, metrics::GraphOptimizationSource::kAot);
} else {
LOG(WARNING) << "Failed to create AOT'd graph for function: "
<< function_name
<< " with status: " << optimized_graph_proto->status();
}
}
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_graph_info =
(!optimized_graph_proto.has_value() ||
!optimized_graph_proto.value().ok())
? OptimizeFunctionGraphOrReadFromFileCache(
function_name, attrs, options, *dev_set, lib_def_,
composite_devices, cpu_device, default_device, env_)
: OptimizedFunctionGraphInfo::FromProto(
std::move(optimized_graph_proto.value().value()));
if (!optimized_graph_info.ok()) return optimized_graph_info.status();
optimized_graph_info->function_graph->mutable_flib_def()
->set_default_registry(&(optimized_graph_info->lib_def));
TF_ASSIGN_OR_RETURN(auto subgraphs, PreprocessAndPartitionGraph(
function_name, *optimized_graph_info,
options, *dev_set, lib_def_,
composite_devices, cpu_device, env_));
const uint64 optimization_end_time_usecs = Env::Default()->NowMicros();
const uint64 graph_optimization_duration =
optimization_end_time_usecs - optimization_start_time_usecs;
metrics::UpdateFunctionGraphOptimizationTime(graph_optimization_duration);
VLOG(1) << "Finished graph optimizations for MultiDevice function \""
<< function_name << "\" with target device \"" << options.target
<< "\". Took " << graph_optimization_duration / 1000000 << " secs.";
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? lib_def_ : options.lib_def;
if (options.graph_collector != nullptr) {
for (const auto& pair : *subgraphs) {
GraphDef def;
pair.second->ToGraphDef(&def);
*def.mutable_library() = lib_def->ReachableDefinitions(def).ToProto();
options.graph_collector->CollectPartitionedGraph(def);
}
}
const auto& node_name_to_control_ret =
optimized_graph_info->node_name_to_control_ret;
const auto control_ret =
[&node_name_to_control_ret](const Node* n) -> std::optional<string> {
const auto it = node_name_to_control_ret.find(n->name());
return it != node_name_to_control_ret.end()
? absl::make_optional<string>(it->second)
: absl::nullopt;
};
auto data = std::make_unique<MultiDeviceFunctionData>(
function_name, function_key, optimized_graph_info->num_return_nodes,
std::move(optimized_graph_info->ret_types));
int i = 0;
FunctionLibraryDefinition data_lib_def =
std::move(optimized_graph_info->lib_def);
FunctionNameGenerator name_generator(
&data_lib_def,
absl::StrCat(function_name, "_partitioned_", random::New64()));
const int num_subgraphs = subgraphs->size();
absl::InlinedVector<Status, 4UL> instantiate_status(num_subgraphs);
data->enable_sync_execution = false;
if (options.allow_small_function_optimizations) {
data->enable_sync_execution = true;
for (const auto& pair : *subgraphs) {
ComponentFunctionData* comp_data = &data->glue_[pair.first];
const Graph* subgraph = pair.second.get();
comp_data->async_attributes =
AsyncAttributes(subgraph, options.allow_control_flow_sync_execution);
if (comp_data->async_attributes.summary() ==
AsyncAttributes::kAsyncRequired) {
data->enable_sync_execution = false;
}
}
}
auto instantiate_component = [this, dev_set, &data_lib_def, &control_ret,
&options,
&data](const string& target,
std::unique_ptr<Graph> subgraph,
ComponentFunctionData* comp_data,
std::function<void(Status)> done) {
const string& device_type =
dev_set->FindDeviceByName(target)->device_type();
bool ints_on_device =
(device_type == "TPU" || device_type == "XLA_CPU" ||
device_type == "XLA_GPU" || options.int_args_and_retvals_on_device);
Int32FulltypePass int32_fulltype(
"ProcessFunctionLibraryRuntime::InstantiateMultiDevice");
Status s = int32_fulltype.ProcessGraph(subgraph.get(), ints_on_device);
if (!s.ok()) {
done(s);
return;
}
s = UpdateArgAndRetvalMetadata(subgraph.get(), &comp_data->arg_indices,
&comp_data->ret_indices,
&comp_data->arg_alloc_attrs,
&comp_data->ret_alloc_attrs, ints_on_device);
if (!s.ok()) {
done(s);
return;
}
FunctionDef shard;
s = GraphToFunctionDef(std::move(subgraph), comp_data->name, control_ret,
&shard);
if (!s.ok()) {
done(s);
return;
}
subgraph.reset();
AttrValueMap attrs(shard.attr());
s = data_lib_def.AddFunctionDef(std::move(shard));
if (!s.ok()) {
done(s);
return;
}
FunctionLibraryRuntime::InstantiateOptions opts;
opts.executor_type = options.executor_type;
opts.target = target;
opts.lib_def = &data_lib_def;
opts.create_kernels_eagerly = options.create_kernels_eagerly;
opts.state_handle = options.state_handle;
opts.allow_small_function_optimizations = data->enable_sync_execution;
opts.allow_control_flow_sync_execution =
options.allow_control_flow_sync_execution;
AttrValue ints_on_device_attr;
ints_on_device_attr.set_b(options.int_args_and_retvals_on_device);
attrs.insert(
{FunctionLibraryDefinition::kIntsOnDeviceAttr, ints_on_device_attr});
VLOG(1) << "Start instantiating component function " << comp_data->name
<< " on device " << target;
auto* component_handle = new FunctionLibraryRuntime::Handle;
auto wrapped_done = [this, comp_data, component_handle, &data,
done = std::move(done)](const Status& s) {
VLOG(1) << "Finished instantiating component function " << comp_data->name
<< " with handle " << *component_handle << " status: " << s;
if (s.ok()) {
{
mutex_lock l(mu_);
if (function_data_[*component_handle]->is_cross_process()) {
data->is_cross_process_ = true;
}
}
comp_data->handle = *component_handle;
}
delete component_handle;
done(s);
};
FunctionLibraryRuntime* flr = GetFLR(opts.target);
if (flr != nullptr) {
Status s = flr->Instantiate(comp_data->name, AttrSlice(&attrs), opts,
component_handle);
wrapped_done(s);
} else {
opts.ret_indices = comp_data->ret_indices;
InstantiateRemote(comp_data->name, AttrSlice(&attrs), opts,
component_handle, std::move(wrapped_done));
}
};
if (default_thread_pool_ != nullptr &&
num_subgraphs > GetParallelSubgraphThreshold()) {
BlockingCounter counter(static_cast<int>(num_subgraphs));
for (auto& pair : *subgraphs) {
Status* status = &instantiate_status[i];
ComponentFunctionData* comp_data = &data->glue_[pair.first];
comp_data->name = name_generator.GetName();
default_thread_pool_->Schedule(
[&instantiate_component, &pair, comp_data, &counter, status]() {
instantiate_component(pair.first, std::move(pair.second), comp_data,
[&counter, status](Status s) {
status->Update(s);
counter.DecrementCount();
});
});
i += 1;
}
counter.Wait();
} else {
for (auto& pair : *subgraphs) {
Notification n;
Status* status = &instantiate_status[i];
ComponentFunctionData* comp_data = &data->glue_[pair.first];
comp_data->name = name_generator.GetName();
instantiate_component(pair.first, std::move(pair.second), comp_data,
[&n, status](Status s) {
status->Update(s);
n.Notify();
});
n.WaitForNotification();
i += 1;
}
}
StatusGroup group;
for (auto& status : instantiate_status) {
group.Update(status);
}
TF_RETURN_IF_ERROR(group.as_summary_status());
std::vector<core::RefCountPtr<FunctionRecord>> function_records;
const bool should_publish_function_graphs =
flags::Global().publish_function_graphs.value();
if (should_publish_function_graphs) {
for (const auto& pair : *subgraphs) {
ComponentFunctionData* comp_data = &data->glue_[pair.first];
function_records.push_back(data_lib_def.FindRecord(comp_data->name));
}
}
*handle = AddMultiDeviceHandle(std::move(data), function_key);
VLOG(1) << "Instantiated MultiDevice function \"" << function_name
<< "\" with handle " << *handle;
if (should_publish_function_graphs) {
PublishSubgraphs(function_name, std::move(function_records));
}
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::GetOutputDevices(
FunctionLibraryRuntime::Handle handle,
std::vector<Device*>* output_devices) const {
MultiDeviceFunctionData* data = IsMultiDevice(handle);
if (data == nullptr) {
return errors::InvalidArgument(
"Failed for find multi-device function handle ", handle);
}
for (const auto& pair : data->glue_) {
const ComponentFunctionData& comp_data = pair.second;
DCHECK(comp_data.ret_alloc_attrs.size() == comp_data.ret_indices.size());
if (comp_data.ret_indices.empty()) {
continue;
}
const string& target = pair.first;
FunctionLibraryRuntime* target_flr = GetFLR(target);
Device* target_device = nullptr;
Device* host = nullptr;
if (target_flr == nullptr) {
if (!data->has_remote_outputs) {
data->has_remote_outputs = true;
}
target_device = device_set()->FindDeviceByName(target);
string remote_host;
TF_RETURN_IF_ERROR(
DeviceNameUtils::DeviceNameToCpuDeviceName(target, &remote_host));
host = device_set()->FindDeviceByName(remote_host);
} else {
target_device = target_flr->device();
}
output_devices->resize(data->num_outputs_);
for (int j = 0; j < comp_data.ret_indices.size(); ++j) {
int ret_index = comp_data.ret_indices[j];
if (data->ret_types_[ret_index] == DT_RESOURCE) {
(*output_devices)[ret_index] = target_device;
} else {
(*output_devices)[ret_index] =
comp_data.ret_alloc_attrs[j].on_host() ? host : target_device;
}
}
}
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::PrepareRunMultiDevice(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle,
const MultiDeviceFunctionData** data) const {
if (opts.create_rendezvous) {
return errors::Internal(
"Cannot call ProcessFunctionLibraryRuntime::Run with "
"create_rendezvous=true. Please run the function "
"using FunctionLibraryRuntime::Run");
}
*data = IsMultiDevice(handle);
if (*data == nullptr) {
return errors::NotFound("Multi-device function handle ", handle,
"not found. Was the function instantiated?");
}
if (opts.rendezvous && (*data)->is_cross_process_ &&
!opts.rendezvous->is_cross_process()) {
return errors::InvalidArgument(
"Running a cross process function ", (*data)->function_name_,
" without an appropriate cross process Rendezvous.");
}
return absl::OkStatus();
}
std::vector<string> ProcessFunctionLibraryRuntime::GetOrderedSubgraphs(
const MultiDeviceFunctionData* data) const {
std::vector<string> subgraph_keys;
subgraph_keys.reserve(data->glue_.size());
for (const auto& pair : data->glue_) {
subgraph_keys.push_back(pair.first);
}
auto send_first_ordering = [&](const string& a, const string& b) {
auto a_summary = data->glue_.at(a).async_attributes.summary();
auto b_summary = data->glue_.at(b).async_attributes.summary();
if (a_summary == b_summary) {
return false;
}
if (a_summary == AsyncAttributes::kSendOnly) {
return true;
}
return false;
};
std::sort(subgraph_keys.begin(), subgraph_keys.end(), send_first_ordering);
return subgraph_keys;
}
Status ProcessFunctionLibraryRuntime::RunMultiDeviceSync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle outer_handle, std::vector<FunctionRet>* rets,
std::function<Status(const ComponentFunctionData& comp_data,
InternalArgs* args)>
get_component_args) const {
const MultiDeviceFunctionData* data;
Status prepare_status = PrepareRunMultiDevice(opts, outer_handle, &data);
if (!prepare_status.ok()) {
return prepare_status;
}
FunctionLibraryRuntime::Options opts_copy = opts;
std::vector<string> subgraph_keys = GetOrderedSubgraphs(data);
for (const string& target : subgraph_keys) {
const ComponentFunctionData& comp_data = data->glue_.at(target);
FunctionLibraryRuntime::Handle comp_handle = comp_data.handle;
opts_copy.args_alloc_attrs = comp_data.arg_alloc_attrs;
opts_copy.rets_alloc_attrs = comp_data.ret_alloc_attrs;
InternalArgs comp_args;
Status args_status = get_component_args(comp_data, &comp_args);
if (!args_status.ok()) {
VLOG(2) << "Failed to get component function arguments: " << args_status;
return args_status;
}
rets->resize(data->num_outputs_);
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
FunctionLibraryRuntime* flr = GetFLR(target);
if (flr != nullptr) {
opts_copy.remote_execution = false;
thread::ThreadPool* pool = flr->device()->tensorflow_device_thread_pool();
opts_copy.runner = (pool == nullptr) ? opts.runner : flr->runner();
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<Tensor> comp_tensor_rets;
Status run_status =
flr->RunSync(opts_copy, comp_handle, GetLocalArgs(comp_args.args),
&comp_tensor_rets);
if (!run_status.ok()) {
VLOG(2) << "Component function execution failed: " << run_status;
const string function_and_msg = strings::StrCat(
errors::FormatFunctionForError(data->function_name_), " ",
run_status.message());
if (opts.rendezvous != nullptr) opts.rendezvous->StartAbort(run_status);
return errors::CreateWithUpdatedMessage(run_status, function_and_msg);
} else {
VLOG(2) << "Component function execution succeeded.";
for (int i = 0; i < comp_tensor_rets.size(); ++i) {
(*rets)[comp_data.ret_indices[i]] = comp_tensor_rets[i];
}
}
} else {
opts_copy.remote_execution = true;
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<std::unique_ptr<CleanUpItem>> cleanup_items;
Notification n;
Status s;
std::vector<FunctionRet> comp_rets;
RunInternal(opts_copy, comp_handle, comp_args.args, &comp_rets,
&cleanup_items, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
}
return absl::OkStatus();
}
void ProcessFunctionLibraryRuntime::RunMultiDeviceAsync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle outer_handle, std::vector<FunctionRet>* rets,
std::vector<std::unique_ptr<CleanUpItem>>* cleanup_items,
FunctionLibraryRuntime::DoneCallback done,
std::function<Status(const ComponentFunctionData& comp_data,
InternalArgs* args)>
get_component_args) const {
const MultiDeviceFunctionData* data;
Status prepare_status = PrepareRunMultiDevice(opts, outer_handle, &data);
if (!prepare_status.ok()) {
done(prepare_status);
return;
}
std::shared_ptr<CancellationManager> local_cm;
CancellationManager* cm = opts.cancellation_manager;
if (cm == nullptr) {
local_cm = std::make_shared<CancellationManager>();
cm = local_cm.get();
}
auto* refcounted_done = new ReffedStatusCallback(std::move(done));
for (int i = 0; i < data->glue_.size(); ++i) {
refcounted_done->Ref();
}
FunctionLibraryRuntime::Options opts_copy = opts;
for (const auto& pair : data->glue_) {
const string& target = pair.first;
const ComponentFunctionData& comp_data = pair.second;
FunctionLibraryRuntime::Handle comp_handle = pair.second.handle;
opts_copy.args_alloc_attrs = comp_data.arg_alloc_attrs;
opts_copy.rets_alloc_attrs = comp_data.ret_alloc_attrs;
opts_copy.cancellation_manager = cm;
InternalArgs comp_args;
Status s = get_component_args(comp_data, &comp_args);
if (!s.ok()) {
VLOG(2) << "Failed to get component function arguments: " << s;
refcounted_done->UpdateStatus(s);
refcounted_done->Unref();
cm->StartCancel();
continue;
}
std::vector<FunctionRet>* comp_rets = new std::vector<FunctionRet>;
rets->resize(data->num_outputs_);
auto component_fn_callback = [comp_rets, rets, comp_data, refcounted_done,
cm, local_cm, data, comp_handle,
target](const Status& status) {
if (!status.ok()) {
VLOG(2) << "Component function execution on target " << target
<< " from " << data->function_name_ << " with handle "
<< comp_handle << " failed: " << status;
const string function_and_msg = strings::StrCat(
errors::FormatFunctionForError(data->function_name_), " ",
status.message());
refcounted_done->UpdateStatus(
errors::CreateWithUpdatedMessage(status, function_and_msg));
cm->StartCancel();
} else {
VLOG(2) << "Component function execution on target " << target
<< " from " << data->function_name_ << " with handle "
<< comp_handle << " succeeded.";
for (int i = 0; i < comp_rets->size(); ++i) {
(*rets)[comp_data.ret_indices[i]] = (*comp_rets)[i];
}
}
delete comp_rets;
refcounted_done->Unref();
};
FunctionLibraryRuntime* flr = GetFLR(target);
if (flr != nullptr) {
opts_copy.remote_execution = false;
thread::ThreadPool* pool = flr->device()->tensorflow_device_thread_pool();
opts_copy.runner = (pool == nullptr) ? opts.runner : flr->runner();
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
VLOG(4) << " with " << opts_copy.DebugString();
std::vector<Tensor>* comp_tensor_rets = new std::vector<Tensor>;
flr->Run(
opts_copy, comp_handle, GetLocalArgs(comp_args.args),
comp_tensor_rets,
TensorsToFunctionRetsDoneCallback(comp_rets, comp_tensor_rets,
std::move(component_fn_callback)));
} else {
opts_copy.remote_execution = true;
VLOG(1) << "Running component function on device " << target << " from "
<< data->function_name_ << " with handle " << comp_handle;
VLOG(4) << " with " << opts_copy.DebugString();
RunInternal(opts_copy, comp_handle, comp_args.args, comp_rets,
cleanup_items, std::move(component_fn_callback));
}
}
refcounted_done->Unref();
}
Status ProcessFunctionLibraryRuntime::Instantiate(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle) {
if (options.is_multi_device_function) {
return InstantiateMultiDevice(function_name, attrs, options, handle);
}
*handle = kInvalidHandle;
FunctionLibraryRuntime* flr = GetFLR(options.target);
if (flr != nullptr) {
return flr->Instantiate(function_name, attrs, options, handle);
}
Status status;
Notification notification;
InstantiateRemote(function_name, attrs, options, handle,
[&status, ¬ification](const Status& s) {
status = s;
notification.Notify();
});
notification.WaitForNotification();
return status;
}
Status ProcessFunctionLibraryRuntime::IsCrossProcess(
FunctionLibraryRuntime::Handle handle, bool* is_cross_process) const {
tf_shared_lock l(mu_);
const auto& mdevice_it = mdevice_data_.find(handle);
if (mdevice_it != mdevice_data_.end()) {
*is_cross_process = mdevice_it->second->is_cross_process_;
return absl::OkStatus();
}
const auto& it = function_data_.find(handle);
if (it != function_data_.end()) {
*is_cross_process = it->second->is_cross_process();
return absl::OkStatus();
}
return errors::InvalidArgument("Handle ", handle, " not found.");
}
void ProcessFunctionLibraryRuntime::InstantiateRemote(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::Handle* handle,
FunctionLibraryRuntime::DoneCallback done) {
if (parent_ == nullptr) {
done(errors::Internal(
"Currently don't support instantiating functions on device: ",
options.target));
return;
}
auto target = options.target;
VLOG(1) << "ProcessFLR Instantiate: " << function_name << " on: " << target;
string function_key = Canonicalize(function_name, attrs, options);
FunctionData* f;
{
mutex_lock l(mu_);
FunctionLibraryRuntime::Handle h =
gtl::FindWithDefault(table_, function_key, kInvalidHandle);
if (h == kInvalidHandle || function_data_.count(h) == 0) {
h = AddHandleLocked(function_key, target, kInvalidHandle);
}
f = function_data_[h].get();
*handle = h;
}
f->DistributedInit(
parent_, function_name,
options.lib_def == nullptr ? *lib_def_ : *options.lib_def, attrs, options,
[this, function_name, target, handle, done](const Status& s) {
VLOG(1) << "ProcessFLR Instantiate [success]: " << function_name
<< " on: " << target << " with handle: " << *handle
<< " (this: " << this << ")";
done(s);
});
}
Status ProcessFunctionLibraryRuntime::RemoveHandle(
FunctionLibraryRuntime::Handle handle) {
mutex_lock l(mu_);
table_.erase(function_data_[handle]->function_key());
function_data_.erase(handle);
return absl::OkStatus();
}
Status ProcessFunctionLibraryRuntime::ReleaseMultiDeviceHandle(
FunctionLibraryRuntime::Handle handle) {
std::unique_ptr<MultiDeviceFunctionData> mdata;
{
mutex_lock l(mu_);
auto it = mdevice_data_.find(handle);
--it->second->instantiation_counter_;
if (it->second->instantiation_counter_ != 0) {
return absl::OkStatus();
}
mdata = std::move(it->second);
table_.erase(mdata->function_key_);
mdevice_data_.erase(it);
}
Status overall_status;
for (const auto& it : mdata->glue_) {
const string& device = it.first;
FunctionLibraryRuntime::Handle flr_handle = it.second.handle;
FunctionLibraryRuntime* flr = GetFLR(device);
if (flr == nullptr) {
if (parent_ != nullptr) {
return errors::Unimplemented(
"Releasing a multi-device component handle on a remote device is "
"not yet implemented.");
}
return errors::InvalidArgument(
"Failed to find FunctionLibraryRuntime for device ", device,
" when releasing multi-device function handle ", handle);
}
Status status = flr->ReleaseHandle(flr_handle);
if (!status.ok()) {
overall_status = status;
}
}
return overall_status;
}
Status ProcessFunctionLibraryRuntime::ReleaseHandle(
FunctionLibraryRuntime::Handle handle) {
if (flr_map_ == nullptr) return absl::OkStatus();
if (IsMultiDevice(handle)) {
return ReleaseMultiDeviceHandle(handle);
}
FunctionLibraryRuntime* flr = nullptr;
string target_device;
{
mutex_lock l(mu_);
CHECK_EQ(1, function_data_.count(handle)) << " handle: " << handle;
target_device = function_data_[handle]->target_device();
}
flr = GetFLR(target_device);
if (flr != nullptr) {
return flr->ReleaseHandle(handle);
}
return errors::InvalidArgument("Handle not found: ", handle);
}
FunctionLibraryRuntime::DoneCallback
ProcessFunctionLibraryRuntime::ApplyCleanUpToDoneCallback(
std::vector<std::unique_ptr<CleanUpItem>>* items,
FunctionLibraryRuntime::DoneCallback done,
const FunctionLibraryRuntime::Options& opts,
tsl::core::RefCountPtr<Rendezvous> created_rendezvous) const {
return [this, items, done = std::move(done), step_id = opts.step_id,
created_rendezvous =
created_rendezvous.release()](const Status& status) {
if (created_rendezvous != nullptr) {
created_rendezvous->Unref();
}
auto* local_status = new Status(status);
CleanUp(items, [local_status, done](const Status& cleanup_status) {
local_status->Update(cleanup_status);
done(*local_status);
delete local_status;
});
delete items;
};
}
Status ProcessFunctionLibraryRuntime::CreateRendezvous(
FunctionLibraryRuntime::Options& opts,
tsl::core::RefCountPtr<Rendezvous>* created_rendezvous) const {
DCHECK(opts.rendezvous == nullptr);
if (!rendezvous_factory_) {
return errors::FailedPrecondition(
"The caller does not provide a rendezvous and "
"ProcessFunctionLibraryRuntime was created without a rendezvous "
"factory.");
}
Status s = rendezvous_factory_(opts.step_id, device_mgr_, created_rendezvous);
if (s.ok()) {
opts.rendezvous = created_rendezvous->get();
opts.create_rendezvous = false;
}
return s;
}
Status ProcessFunctionLibraryRuntime::GetComponentArgs(
const absl::Span<const Tensor> args,
const ProcessFunctionLibraryRuntime::ComponentFunctionData& comp_data,
ProcessFunctionLibraryRuntime::InternalArgs* comp_args) {
for (const auto& it : comp_data.arg_indices) {
if (it.index >= args.size()) {
return errors::InvalidArgument("index ", it.index,
" is out of range [0, ", args.size(), ")");
}
if (it.sub_index >= 0) {
const Tensor& t = args[it.index];
if (t.dtype() != DT_RESOURCE) {
return errors::InvalidArgument("Got unexpected sub_index ",
it.sub_index, " for argument ",
it.index);
}
const auto& handles = t.flat<ResourceHandle>();
if (it.sub_index >= handles.size()) {
return errors::InvalidArgument("Sub_index ", it.sub_index,
"is out of range [0,", handles.size(),
") for argument ", it.index);
}
comp_args->args.push_back(Tensor(handles(it.sub_index)));
} else {
comp_args->args.push_back(args[it.index]);
}
}
return absl::OkStatus();
}
#if !defined(IS_MOBILE_PLATFORM)
Status ProcessFunctionLibraryRuntime::GetComponentArgs(
const FunctionArgsInterface& args,
const ProcessFunctionLibraryRuntime::ComponentFunctionData& comp_data,
ProcessFunctionLibraryRuntime::InternalArgs* comp_args) {
for (int i = 0; i < comp_data.arg_indices.size(); ++i) {
const FunctionArgIndex index = comp_data.arg_indices.at(i);
Tensor tensor;
if (args.GetLocalArg(index, &tensor).ok()) {
comp_args->args.push_back(std::move(tensor));
} else {
eager::RemoteTensorHandle remote_handle;
TF_RETURN_IF_ERROR(args.GetRemoteArg(index, &remote_handle));
comp_args->remote_args.emplace_back(
std::make_unique<eager::RemoteTensorHandle>(
std::move(remote_handle)));
comp_args->args.push_back(comp_args->remote_args.back().get());
}
}
return absl::OkStatus();
}
#endif
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets,
FunctionLibraryRuntime::DoneCallback done) const {
FunctionLibraryRuntime::Options new_opts = opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!opts.rendezvous) {
Status s = CreateRendezvous(new_opts, &created_rendezvous);
if (!s.ok()) {
done(s);
return;
}
}
auto* cleanup_items = new std::vector<std::unique_ptr<CleanUpItem>>;
done = ApplyCleanUpToDoneCallback(cleanup_items, std::move(done), new_opts,
std::move(created_rendezvous));
std::vector<FunctionRet>* function_rets = new std::vector<FunctionRet>;
done = [rets, function_rets, done = std::move(done)](const Status& s) {
Status status = s;
if (status.ok()) {
status.Update(FunctionRetsToTensors(function_rets, rets));
}
delete function_rets;
done(status);
};
bool multi_device = HasMultiDeviceHandle(handle);
if (multi_device) {
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) -> Status {
return GetComponentArgs(args, comp_data, comp_args);
};
return RunMultiDeviceAsync(new_opts, handle, function_rets, cleanup_items,
std::move(done), std::move(get_component_args));
}
std::vector<FunctionArg> local_args;
for (const auto& tensor : args) {
local_args.push_back(tensor);
}
RunInternal(new_opts, handle, local_args, function_rets, cleanup_items,
std::move(done));
}
void ProcessFunctionLibraryRuntime::RunInternal(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const FunctionArg> args,
std::vector<FunctionRet>* rets,
std::vector<std::unique_ptr<CleanUpItem>>* cleanup_items,
FunctionLibraryRuntime::DoneCallback done) const {
FunctionLibraryRuntime* flr = nullptr;
string target_device;
FunctionLibraryRuntime::LocalHandle local_handle;
{
tf_shared_lock l(mu_);
auto iter = function_data_.find(handle);
if (iter == function_data_.end()) {
done(errors::NotFound("Handle: ", handle, " not found."));
return;
}
FunctionData* function_data = iter->second.get();
target_device = function_data->target_device();
local_handle = function_data->local_handle();
}
if (!opts.remote_execution) {
done(
errors::InvalidArgument("ProcessFunctionLibraryRuntime::Run should "
"only be called for multi-device functions or "
"for remote execution."));
return;
}
flr = GetFLR(target_device);
if (flr != nullptr) {
auto rendezvous = opts.rendezvous;
string source_device = opts.source_device;
DeviceContext* device_context;
Status s = GetDeviceContext(source_device, &device_context);
if (!s.ok()) {
done(s);
return;
}
int64_t src_incarnation, target_incarnation;
s = GetDeviceIncarnation(source_device, &src_incarnation);
s.Update(GetDeviceIncarnation(target_device, &target_incarnation));
if (!s.ok()) {
done(s);
return;
}
std::vector<Tensor> local_args = GetLocalArgs(args);
s = SendTensors(source_device, target_device, "arg_", src_incarnation,
local_args, device_context, opts.args_alloc_attrs,
rendezvous);
if (!s.ok()) {
done(s);
return;
}
const std::vector<AllocatorAttributes>& rets_alloc_attrs =
opts.rets_alloc_attrs;
std::vector<Tensor>* remote_rets = new std::vector<Tensor>;
flr->Run(opts, handle, local_args, remote_rets,
[source_device, target_device, target_incarnation, rendezvous,
device_context, rets_alloc_attrs, remote_rets, rets,
done = std::move(done)](const Status& status) mutable {
if (!status.ok()) {
delete remote_rets;
done(status);
return;
}
int64_t num_returns = remote_rets->size();
delete remote_rets;
std::vector<Tensor>* recv_tensors = new std::vector<Tensor>;
ReceiveTensorsAsync(target_device, source_device, "ret_",
target_incarnation, num_returns,
device_context, rets_alloc_attrs, rendezvous,
recv_tensors,
TensorsToFunctionRetsDoneCallback(
rets, recv_tensors, std::move(done)));
});
return;
}
if (parent_ != nullptr) {
auto cleanup_item = std::make_unique<CleanUpItem>();
cleanup_item->device = target_device;
cleanup_item->step_id = opts.step_id;
cleanup_item->local_handle = local_handle;
cleanup_items->emplace_back(std::move(cleanup_item));
parent_->Run(opts, local_handle, args, rets, std::move(done));
return;
}
done(errors::Internal("Could not find device"));
}
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, CallFrameInterface* frame,
FunctionLibraryRuntime::DoneCallback done) const {
std::vector<Tensor> args;
args.reserve(frame->num_args());
for (size_t i = 0; i < frame->num_args(); ++i) {
const Tensor* arg;
Status s = frame->GetArg(i, &arg);
args.emplace_back(*arg);
if (!s.ok()) {
done(s);
}
}
std::vector<Tensor>* rets = new std::vector<Tensor>;
rets->reserve(frame->num_retvals());
Run(opts, handle, args, rets,
[frame, rets, done = std::move(done)](const Status& status) {
std::unique_ptr<std::vector<Tensor>> rets_releaser(rets);
if (!status.ok()) {
done(status);
return;
}
if (rets->size() != frame->num_retvals()) {
done(errors::Internal(
"Number of return values from function (", rets->size(),
") did not match expected number of return values (",
frame->num_retvals(), ")."));
return;
}
for (size_t i = 0; i < frame->num_retvals(); ++i) {
Status s = frame->SetRetval(i, (*rets)[i]);
if (!s.ok()) {
done(s);
return;
}
}
done(absl::OkStatus());
});
}
Status ProcessFunctionLibraryRuntime::RunSync(
const FunctionLibraryRuntime::Options& orig_opts,
FunctionLibraryRuntime::Handle handle, absl::Span<const Tensor> args,
std::vector<Tensor>* rets) const {
MultiDeviceFunctionData* multi_device_data = IsMultiDevice(handle);
if (multi_device_data && multi_device_data->enable_sync_execution) {
metrics::IncrementTestCounter("pflr_runsync", "sync");
FunctionLibraryRuntime::Options new_opts = orig_opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!new_opts.rendezvous) {
TF_RETURN_IF_ERROR(CreateRendezvous(new_opts, &created_rendezvous));
}
std::vector<FunctionRet> function_rets;
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) {
return GetComponentArgs(args, comp_data, comp_args);
};
Status status = RunMultiDeviceSync(new_opts, handle, &function_rets,
std::move(get_component_args));
status.Update(FunctionRetsToTensors(&function_rets, rets));
return status;
} else {
metrics::IncrementTestCounter("pflr_runsync", "async");
Notification n;
Status s;
Run(orig_opts, handle, args, rets, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
}
Status ProcessFunctionLibraryRuntime::RunSync(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, CallFrameInterface* frame) const {
Notification n;
Status s;
Run(opts, handle, frame, [&n, &s](const Status& status) {
s.Update(status);
n.Notify();
});
n.WaitForNotification();
return s;
}
void ProcessFunctionLibraryRuntime::Run(
const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::Handle handle, const FunctionArgsInterface& args,
std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) const {
bool has_remote_outputs = false;
const MultiDeviceFunctionData* data = IsMultiDevice(handle);
if (data != nullptr) {
has_remote_outputs = data->has_remote_outputs;
}
if (!args.HasRemoteOrPackedInputs() && !has_remote_outputs) {
const std::vector<Tensor> local_inputs = args.GetLocalTensors();
std::vector<Tensor>* tensor_rets = new std::vector<Tensor>;
return Run(
opts, handle, local_inputs, tensor_rets,
TensorsToFunctionRetsDoneCallback(rets, tensor_rets, std::move(done)));
}
FunctionLibraryRuntime::Options new_opts = opts;
tsl::core::RefCountPtr<Rendezvous> created_rendezvous = nullptr;
if (!opts.rendezvous) {
Status s = CreateRendezvous(new_opts, &created_rendezvous);
if (!s.ok()) {
done(s);
return;
}
}
#if defined(IS_MOBILE_PLATFORM)
done(errors::Unimplemented(
"Remote inputs are not available on mobile devices."));
return;
#else
auto* cleanup_items = new std::vector<std::unique_ptr<CleanUpItem>>;
done = ApplyCleanUpToDoneCallback(cleanup_items, done, opts,
std::move(created_rendezvous));
auto get_component_args = [&args](const ComponentFunctionData& comp_data,
InternalArgs* comp_args) -> Status {
return GetComponentArgs(args, comp_data, comp_args);
};
return RunMultiDeviceAsync(new_opts, handle, rets, cleanup_items,
std::move(done), std::move(get_component_args));
#endif
}
void ProcessFunctionLibraryRuntime::CleanUp(
std::vector<std::unique_ptr<CleanUpItem>>* items,
FunctionLibraryRuntime::DoneCallback done) const {
auto* refcounted_done = new ReffedStatusCallback(std::move(done));
for (auto& item : *items) {
refcounted_done->Ref();
auto* flr = GetFLR(item->device);
if (flr != nullptr) {
refcounted_done->UpdateStatus(
errors::Internal("Cleanup items shouldn't contain local item."));
refcounted_done->Unref();
} else if (parent_ != nullptr) {
parent_->CleanUp(item->step_id, item->local_handle,
[refcounted_done](const Status& status) {
if (!status.ok()) {
refcounted_done->UpdateStatus(status);
}
refcounted_done->Unref();
});
} else {
refcounted_done->UpdateStatus(
errors::Internal("Could not find device in cleanup."));
refcounted_done->Unref();
}
}
refcounted_done->Unref();
}
Status ProcessFunctionLibraryRuntime::Clone(
Env* env, int graph_def_version, const OptimizerOptions& optimizer_options,
std::unique_ptr<FunctionLibraryDefinition>* out_lib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime>* out_pflr,
bool skip_flib_def) const {
if (skip_flib_def) {
*out_lib_def = std::make_unique<FunctionLibraryDefinition>(
lib_def_->default_registry(), FunctionDefLibrary());
} else {
*out_lib_def = std::make_unique<FunctionLibraryDefinition>(*lib_def_);
}
*out_pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_, env, config_ ? &(*config_) : nullptr, graph_def_version,
out_lib_def->get(), optimizer_options, default_thread_pool_, parent_,
session_metadata_, rendezvous_factory_);
{
tf_shared_lock l(mu_);
for (auto* d : composite_devices_) (*out_pflr)->AddCompositeDevice(d);
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/rendezvous_cache.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/type_index.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/protobuf.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#endif
namespace tensorflow {
namespace {
class TestClusterFLR : public DistributedFunctionLibraryRuntime {
public:
explicit TestClusterFLR(DeviceMgr* device_mgr) : device_mgr_(device_mgr) {}
void Instantiate(const string& function_name,
const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
FunctionLibraryRuntime::LocalHandle* handle,
FunctionLibraryRuntime::DoneCallback done) override {
{
mutex_lock l(mu_);
*handle = next_handle_;
next_handle_++;
}
done(absl::OkStatus());
}
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const Tensor> args, std::vector<Tensor>* rets,
FunctionLibraryRuntime::DoneCallback done) override {}
void Run(const FunctionLibraryRuntime::Options& opts,
FunctionLibraryRuntime::LocalHandle handle,
absl::Span<const FunctionArg> args, std::vector<FunctionRet>* rets,
FunctionLibraryRuntime::DoneCallback done) override {}
void CleanUp(uint64 step_id, FunctionLibraryRuntime::LocalHandle handle,
FunctionLibraryRuntime::DoneCallback done) override {}
DeviceMgr* remote_device_mgr() const override { return device_mgr_; }
private:
mutex mu_;
int next_handle_ TF_GUARDED_BY(mu_) = 0;
DeviceMgr* device_mgr_;
};
SessionMetadata GenerateSessionMetadata() {
SessionMetadata session_metadata;
session_metadata.set_name("name");
session_metadata.set_version(42);
return session_metadata;
}
class ProcessFunctionLibraryRuntimeTest : public ::testing::Test {
public:
ProcessFunctionLibraryRuntimeTest()
: rendezvous_cache_(new RendezvousCache<IntraProcessRendezvous>()) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 3});
std::vector<std::unique_ptr<Device>> created_devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&created_devices));
device2_ = std::move(created_devices[2]);
created_devices.erase(created_devices.begin() + 2);
device_mgr_ = std::make_unique<DynamicDeviceMgr>();
TF_CHECK_OK(device_mgr_->AddDevices(std::move(created_devices)));
TF_CHECK_OK(device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:CPU:0", &device0_));
TF_CHECK_OK(device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:CPU:1", &device1_));
Device* device2_ptr = nullptr;
EXPECT_NE(
error::OK,
device_mgr_
->LookupDevice("/job:a/replica:0/task:0/device:CPU:2", &device2_ptr)
.code());
Status status = device_mgr_->LookupDevice(
"/job:a/replica:0/task:0/device:GPU:0", &gpu_device_);
if (!status.ok()) {
CHECK_EQ(nullptr, gpu_device_);
}
}
void Init(const std::vector<FunctionDef>& flib,
const SessionMetadata* session_metadata = nullptr,
const std::vector<OptimizedFunctionGraph>&
optimized_function_graphs = {}) {
FunctionDefLibrary proto;
for (const auto& fdef : flib) *(proto.add_function()) = fdef;
lib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), proto));
for (const auto& fg : optimized_function_graphs) {
lib_def_->AddOptimizedFunctionGraph(fg.name(), fg);
}
OptimizerOptions opts;
cluster_flr_.reset(new TestClusterFLR(device_mgr_.get()));
proc_flr_.reset(new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def_.get(), opts,
nullptr, cluster_flr_.get(), session_metadata,
Rendezvous::Factory{[this](const int64_t step_id,
const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = this->rendezvous_cache_->FindOrCreate(step_id, [device_mgr]() {
return tsl::core::RefCountPtr<IntraProcessRendezvous>(
new IntraProcessRendezvous(device_mgr));
});
return absl::OkStatus();
}}));
}
void AddCompositeDevice(CompositeDevice* d) {
proc_flr_->AddCompositeDevice(d);
}
Status Instantiate(
const string& name, test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
FunctionLibraryRuntime::Handle* handle) {
return proc_flr_->Instantiate(name, attrs, instantiate_opts, handle);
}
Tensor GPUToCPU(const Tensor& device_tensor) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CHECK(gpu_device_);
CHECK(gpu_device_->tensorflow_accelerator_device_info() != nullptr);
DeviceContext* device_context =
gpu_device_->tensorflow_accelerator_device_info()->default_context;
Tensor cpu_tensor(device_tensor.dtype(), device_tensor.shape());
CHECK(device_context
->CopyDeviceTensorToCPUSync(&device_tensor, "", gpu_device_,
&cpu_tensor)
.ok());
return cpu_tensor;
#else
CHECK(false);
#endif
}
Tensor CPUToGPU(const Tensor& cpu_tensor) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CHECK(gpu_device_);
CHECK(gpu_device_->tensorflow_accelerator_device_info() != nullptr);
DeviceContext* device_context =
gpu_device_->tensorflow_accelerator_device_info()->default_context;
Tensor device_tensor(gpu_device_->GetAllocator({}), cpu_tensor.dtype(),
cpu_tensor.shape(), {});
CHECK(device_context
->CopyCPUTensorToDeviceSync(&cpu_tensor, gpu_device_,
&device_tensor)
.ok());
return device_tensor;
#else
CHECK(false);
#endif
}
template <typename T, typename K>
Status RunWithRuntime(
const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const T& args, std::vector<K*> rets,
ProcessFunctionLibraryRuntime* pflr) {
FunctionLibraryRuntime::Handle handle;
Status status = pflr->Instantiate(name, attrs, instantiate_opts, &handle);
if (!status.ok()) {
return status;
}
bool is_cross_process = false;
TF_CHECK_OK(pflr->IsCrossProcess(handle, &is_cross_process));
EXPECT_FALSE(is_cross_process);
std::function<void(std::function<void()>)> runner =
[](std::function<void()> fn) {
test::function::FunctionTestSchedClosure(fn);
};
Notification done;
opts.runner = &runner;
std::vector<K> out;
pflr->Run(opts, handle, args, &out, [&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
status = pflr->ReleaseHandle(handle);
if (!status.ok()) {
return status;
}
Notification done2;
pflr->Run(opts, handle, args, &out, [&status, &done2](const Status& s) {
status = s;
done2.Notify();
});
done2.WaitForNotification();
EXPECT_TRUE(errors::IsNotFound(status)) << "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(status.message(), "not found."));
return absl::OkStatus();
}
Status Run(const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const std::vector<Tensor>& args, std::vector<Tensor*> rets,
ProcessFunctionLibraryRuntime* pflr = nullptr) {
return RunWithRuntime<std::vector<Tensor>, Tensor>(
name, opts, attrs, instantiate_opts, args, rets, proc_flr_.get());
}
Status RunWithPackedArgs(
const string& name, FunctionLibraryRuntime::Options opts,
test::function::Attrs attrs,
const FunctionLibraryRuntime::InstantiateOptions& instantiate_opts,
const FunctionArgsInterface& args, std::vector<FunctionRet*> rets,
ProcessFunctionLibraryRuntime* pflr = nullptr) {
return RunWithRuntime<FunctionArgsInterface, FunctionRet>(
name, opts, attrs, instantiate_opts, args, rets, proc_flr_.get());
}
Status RunInstantiated(FunctionLibraryRuntime::Handle handle,
FunctionLibraryRuntime::Options opts,
const std::vector<Tensor>& args,
std::vector<Tensor*> rets) {
std::function<void(std::function<void()>)> runner =
[](std::function<void()> fn) {
test::function::FunctionTestSchedClosure(fn);
};
opts.runner = &runner;
Status status;
Notification done;
std::vector<Tensor> out;
proc_flr_->Run(opts, handle, args, &out, [&status, &done](const Status& s) {
status = s;
done.Notify();
});
done.WaitForNotification();
if (!status.ok()) {
return status;
}
CHECK_EQ(rets.size(), out.size());
for (size_t i = 0; i < rets.size(); ++i) {
*rets[i] = out[i];
}
return absl::OkStatus();
}
std::unique_ptr<DynamicDeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
std::unique_ptr<Device> device2_;
Device* gpu_device_ = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def_;
std::unique_ptr<TestClusterFLR> cluster_flr_;
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr_;
tsl::core::RefCountPtr<RendezvousCache<IntraProcessRendezvous>>
rendezvous_cache_;
};
TEST_F(ProcessFunctionLibraryRuntimeTest, GetFLRNull) {
FunctionDefLibrary proto;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), proto));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr(
new ProcessFunctionLibraryRuntime(
nullptr , Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def.get(), opts));
FunctionLibraryRuntime* flr =
proc_flr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
EXPECT_NE(flr, nullptr);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, DeviceSet) {
FunctionDefLibrary proto;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), proto));
OptimizerOptions opts;
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(device2_));
auto mgr = std::make_unique<DynamicDeviceMgr>();
TF_CHECK_OK(mgr.get()->AddDevices(std::move(devices)));
std::unique_ptr<ProcessFunctionLibraryRuntime> proc_flr(
new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:0"));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:1"));
cluster_flr_.reset(new TestClusterFLR(mgr.get()));
proc_flr.reset(new ProcessFunctionLibraryRuntime(
device_mgr_.get(), Env::Default(),
nullptr, TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr, cluster_flr_.get()));
EXPECT_NE(nullptr, proc_flr->device_set()->FindDeviceByName(
"/job:a/replica:0/task:0/device:CPU:2"));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, Basic) {
Init({});
FunctionLibraryRuntime* flr =
proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/device:CPU:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/device:CPU:0");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device0_);
flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:1");
EXPECT_NE(flr, nullptr);
EXPECT_EQ(flr->device(), device1_);
flr = proc_flr_->GetFLR("abc");
EXPECT_EQ(flr, nullptr);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, GetDeviceIncarnation) {
Init({});
int64_t incarnation;
TF_EXPECT_OK(proc_flr_->GetDeviceIncarnation("/job:a/replica:0/task:0/cpu:1",
&incarnation));
EXPECT_NE(incarnation, 0);
Status s = proc_flr_->GetDeviceIncarnation("/job:a/replica:0/task:0/cpu:2",
&incarnation);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SingleCall) {
Init({test::function::XTimesTwo()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
auto x = test::AsTensor<float>({1, 2, 3, 4});
Tensor y;
TF_CHECK_OK(
Run("XTimesTwo", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({2, 4, 6, 8}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SingleCallFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
Tensor y;
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:0"},
TensorShape({})));
EXPECT_EQ(0, rendezvous_cache_->Size());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsSameDeviceXTimes) {
Init({test::function::XTimesTwo(), test::function::XTimesFour()});
auto x = test::AsTensor<float>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
Tensor y;
TF_CHECK_OK(
Run("XTimesTwo", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({2, 4, 6, 8}));
TF_CHECK_OK(
Run("XTimesFour", opts, {{"T", DT_FLOAT}}, instantiate_opts, {x}, {&y}));
test::ExpectTensorEqual<float>(y, test::AsTensor<float>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
SameDeviceXTimesFourInt32MultiDevice) {
Init({test::function::XTimesTwoInt32(), test::function::XTimesFourInt32()});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
instantiate_opts.input_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.output_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.is_multi_device_function = true;
Tensor y;
TF_CHECK_OK(Run("XTimesFourInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultipleCallsSameDeviceXTimesMultiDevice) {
Init({test::function::XTimesTwoInt32(), test::function::XTimesFourInt32()});
auto x = test::AsTensor<int32>({1, 2, 3, 4});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
instantiate_opts.input_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.output_devices = {"/job:a/replica:0/task:0/cpu:0"};
instantiate_opts.is_multi_device_function = true;
Tensor y;
TF_CHECK_OK(Run("XTimesTwoInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({2, 4, 6, 8}));
TF_CHECK_OK(Run("XTimesFourInt32", opts, {{"T", DT_INT32}}, instantiate_opts,
{x}, {&y}));
test::ExpectTensorEqual<int32>(y, test::AsTensor<int32>({4, 8, 12, 16}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsSameDeviceFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:1";
Tensor y;
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultipleCallsDiffDeviceFindDevice) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
Tensor y;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts_0;
instantiate_opts_0.target = "/job:a/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts_0, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:0"},
TensorShape({})));
FunctionLibraryRuntime::InstantiateOptions instantiate_opts_1;
instantiate_opts_1.target = "/job:a/replica:0/task:0/device:CPU:1";
TF_CHECK_OK(Run("FindDevice", opts, {}, instantiate_opts_1, {}, {&y}));
test::ExpectTensorEqual<tstring>(
y, test::AsTensor<tstring>({"/job:a/replica:0/task:0/device:CPU:1"},
TensorShape({})));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, InstantiateFunctionOnRemovedDevice) {
std::vector<std::unique_ptr<Device>> devices;
Device* device2_ptr = device2_.get();
devices.emplace_back(std::move(device2_));
TF_CHECK_OK(device_mgr_->AddDevices(std::move(devices)));
Init({test::function::FindDevice()});
std::vector<Device*> remove_devices{device2_ptr};
TF_CHECK_OK(device_mgr_->RemoveDevices(std::move(remove_devices)));
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
FunctionLibraryRuntime::Handle h;
instantiate_opts.target = "/job:a/replica:0/task:0/device:CPU:1";
instantiate_opts.is_multi_device_function = true;
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:2"}},
instantiate_opts, &h));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, ClusterFLRSerialTest) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:b/replica:0/task:0/device:CPU:0";
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
bool is_cross_process = false;
TF_CHECK_OK(proc_flr_->IsCrossProcess(h, &is_cross_process));
EXPECT_TRUE(is_cross_process);
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
instantiate_opts.target = "/job:c/replica:0/task:0/device:CPU:0";
TF_CHECK_OK(Instantiate("FindDevice",
{{"_target", "/job:c/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(1, proc_flr_->GetHandleOnDevice(
"/job:c/replica:0/task:0/device:CPU:0", h));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, ClusterFLRParallelTest) {
Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:b/replica:0/task:0/device:CPU:0";
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
auto fn = [this, &instantiate_opts]() {
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(Instantiate(
"FindDevice", {{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
instantiate_opts, &h));
EXPECT_EQ(0, proc_flr_->GetHandleOnDevice(
"/job:b/replica:0/task:0/device:CPU:0", h));
};
for (int i = 0; i < 100; ++i) {
tp->Schedule(fn);
}
delete tp;
}
bool IsCUDATensor(const Tensor& t) {
#if GOOGLE_CUDA
cudaPointerAttributes attributes;
cudaError_t err =
cudaPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == cudaErrorInvalidValue) return false;
CHECK_EQ(cudaSuccess, err) << cudaGetErrorString(err);
return (attributes.type == cudaMemoryTypeDevice);
#elif TENSORFLOW_USE_ROCM
hipPointerAttribute_t attributes;
hipError_t err = hipPointerGetAttributes(&attributes, t.tensor_data().data());
if (err == hipErrorInvalidValue) return false;
CHECK_EQ(hipSuccess, err) << hipGetErrorString(err);
return (attributes.memoryType == hipMemoryTypeDevice);
#else
CHECK(false)
<< "IsCUDATensor should not be called when CUDA is not available";
#endif
}
void TestTwoDeviceMult(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts,
const string& error = "") {
fixture->Init({test::function::TwoDeviceMult()});
FunctionLibraryRuntime::Options opts;
auto x = test::AsTensor<float>({1, 2, 3});
Tensor y_cpu;
Tensor y_gpu;
Status status = fixture->Run("TwoDeviceMult", opts, {{"T", DT_FLOAT}},
inst_opts, {x}, {&y_cpu, &y_gpu});
if (!error.empty()) {
EXPECT_TRUE(errors::IsInvalidArgument(status))
<< "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(status.message(), error))
<< "Actual error message: " << status.message();
return;
}
EXPECT_TRUE(status.ok()) << "Actual status: " << status;
EXPECT_FALSE(IsCUDATensor(y_cpu));
test::ExpectTensorEqual<float>(y_cpu, test::AsTensor<float>({2, 4, 6}));
EXPECT_TRUE(IsCUDATensor(y_gpu));
Tensor y_gpu_on_cpu = fixture->GPUToCPU(y_gpu);
test::ExpectTensorEqual<float>(y_gpu_on_cpu,
test::AsTensor<float>({3, 6, 9}));
}
void TestInstantiateSimpleFunction(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& orig_opts) {
fixture->Init({test::function::FindDevice()});
FunctionLibraryRuntime::InstantiateOptions opts_copy = orig_opts;
opts_copy.input_devices.clear();
FunctionLibraryRuntime::Handle h;
TF_CHECK_OK(fixture->Instantiate(
"FindDevice", {{"_target", "/job:b/replica:0/task:0/device:CPU:0"}},
opts_copy, &h));
}
void TestControlFlow(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts) {
fixture->Init({test::function::ControlFlow()});
FunctionLibraryRuntime::Options opts;
Tensor x1 = test::AsTensor<float>({3, 5, 17, 257});
if (absl::StrContains(inst_opts.input_devices[0], "GPU")) {
x1 = fixture->CPUToGPU(x1);
}
Tensor y1;
TF_CHECK_OK(fixture->Run("ControlFlow", opts, {}, inst_opts, {x1}, {&y1}));
if (absl::StrContains(inst_opts.output_devices[0], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y1));
y1 = fixture->GPUToCPU(y1);
}
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({3, 5, 17, 257}));
}
void TestTwoDeviceInputOutput(
ProcessFunctionLibraryRuntimeTest* fixture,
const FunctionLibraryRuntime::InstantiateOptions& inst_opts) {
if (fixture->gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
fixture->Init({test::function::TwoDeviceInputOutput()});
FunctionLibraryRuntime::Options opts;
Tensor x1 = test::AsTensor<float>({1, 2});
if (absl::StrContains(inst_opts.input_devices[0], "GPU")) {
x1 = fixture->CPUToGPU(x1);
}
Tensor x2 = test::AsTensor<float>({10, 20});
if (absl::StrContains(inst_opts.input_devices[1], "GPU")) {
x2 = fixture->CPUToGPU(x2);
}
Tensor y1;
Tensor y2;
TF_CHECK_OK(fixture->Run("TwoDeviceInputOutput", opts, {{"T", DT_FLOAT}},
inst_opts, {x1, x2}, {&y1, &y2}));
if (absl::StrContains(inst_opts.output_devices[0], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y1));
y1 = fixture->GPUToCPU(y1);
} else {
EXPECT_FALSE(IsCUDATensor(y1));
}
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({2, 4}));
if (absl::StrContains(inst_opts.output_devices[1], "GPU")) {
EXPECT_TRUE(IsCUDATensor(y2));
y2 = fixture->GPUToCPU(y2);
} else {
EXPECT_FALSE(IsCUDATensor(y2));
}
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({30, 60}));
}
std::vector<string> CompleteDevices(const std::vector<string>& v) {
std::vector<string> result;
result.reserve(v.size());
for (const string& s : v) {
result.push_back(strings::StrCat("/job:a/replica:0/task:0/device:", s));
}
return result;
}
FunctionLibraryRuntime::InstantiateOptions MakeOptions(
const string& target, const std::vector<string>& input_devices,
const std::vector<string>& output_devices) {
FunctionLibraryRuntime::InstantiateOptions inst_opts;
inst_opts.target = target;
inst_opts.input_devices = CompleteDevices(input_devices);
inst_opts.output_devices = CompleteDevices(output_devices);
inst_opts.is_multi_device_function = true;
return inst_opts;
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ExplicitOutputDevice) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_InferredOutputDevice) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0"}, {}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenNoInputDevices) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {}, {}),
"input_devices must have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenTooManyInputDevices) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
TestTwoDeviceMult(this, MakeOptions("CPU:0", {"CPU:0", "CPU:1"}, {}),
"input_devices must have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenTooManyOutputDevices) {
TestTwoDeviceMult(
this, MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0", "CPU:1"}),
"output_devices must either be empty or have the same length");
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ErrorWhenBadTargetDevice) {
TestTwoDeviceMult(
this, MakeOptions("GPU:11", {"CPU:0"}, {"CPU:0", "GPU:0"}),
"Cannot instantiate multi-device function with target device GPU:11");
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenListInput) {
const FunctionDef& def = test::function::FuncWithListInput();
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"FuncWithListInput", test::function::Attrs({{"T", DT_FLOAT}, {"N", 1}}),
MakeOptions("CPU:0", {"CPU:0"}, {}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(
status.message(),
"FuncWithListInput has an input named \"x1\" that is a list of tensors"))
<< "Actual error message: " << status.message();
}
TEST_F(ProcessFunctionLibraryRuntimeTest, FullTypeForInt32) {
FunctionDef def = test::function::XTimesTwoInt32();
def.mutable_node_def(2)->mutable_experimental_type()->set_type_id(
TFT_PRODUCT);
def.mutable_node_def(2)->mutable_experimental_type()->add_args()->set_type_id(
TFT_TENSOR);
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status =
proc_flr_->Instantiate("XTimesTwoInt32", test::function::Attrs({}),
MakeOptions("CPU:0", {"CPU:0"}, {}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
EXPECT_TRUE(absl::StrContains(
status.message(),
"in 'ProcessFunctionLibraryRuntime::InstantiateMultiDevice' has "
"TFT_TENSOR output 0 which has 0 args instead of 1"));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ErrorWhenListOutput) {
const FunctionDef& def = test::function::FuncWithListOutput();
Init({def});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"FuncWithListOutput", test::function::Attrs({{"T", DT_FLOAT}, {"N", 1}}),
MakeOptions("CPU:0", {}, {"CPU:0"}), &handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(
status.message(),
"FuncWithListOutput has an output named \"y\" that is a list of tensors"))
<< "Actual error message: " << status.message();
}
TEST_F(ProcessFunctionLibraryRuntimeTest,
MultiDevice_ExplicitMultiInputOutput) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"CPU:0", "GPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipInputs) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"CPU:0", "GPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipOutputs) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"CPU:0", "GPU:0"}, {"GPU:0", "CPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_FlipBoth) {
TestTwoDeviceInputOutput(
this, MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"GPU:0", "CPU:0"}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_EmptyBodySwap) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "CPU:0"}, {"CPU:0", "GPU:0"});
Init({test::function::EmptyBodySwap()});
Tensor x1 = CPUToGPU(test::AsTensor<float>({1, 2}));
Tensor x2 = test::AsTensor<float>({10, 20});
Tensor y1;
Tensor y2;
TF_CHECK_OK(Run("EmptyBodySwap", {}, {{"T", DT_FLOAT}}, inst_opts, {x1, x2},
{&y1, &y2}));
EXPECT_FALSE(IsCUDATensor(y1));
test::ExpectTensorEqual<float>(y1, test::AsTensor<float>({10, 20}));
EXPECT_TRUE(IsCUDATensor(y2));
y2 = GPUToCPU(y2);
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({1, 2}));
}
Tensor GetResourceHandle(const string& var_name, const string& container,
const string& device_name) {
ResourceHandle handle;
handle.set_device(device_name);
handle.set_container(container);
handle.set_name(var_name);
handle.set_hash_code(TypeIndex::Make<Var>().hash_code());
handle.set_maybe_type_name(TypeIndex::Make<Var>().name());
Tensor tensor(DT_RESOURCE, TensorShape({}));
tensor.scalar<ResourceHandle>()() = handle;
return tensor;
}
FunctionDef AddVarAcrossDevices() {
return FunctionDefHelper::Create(
"AddVarAcrossDevices",
{"x: resource"},
{"y: float"},
{},
{
{{"read0"},
"ReadVariableOp",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/device:CPU:0"},
{{"read1"},
"ReadVariableOp",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/device:CPU:1"},
{{"add"},
"Add",
{"read0:value:0", "read1:value:0"},
{{"T", DT_FLOAT}},
{},
"/device:CPU:0"},
},
{{"y", "add:z:0"}});
}
class TestFunctionPackedArgs : public FunctionArgsInterface {
public:
TestFunctionPackedArgs(const int index,
absl::InlinedVector<TensorValue, 4UL>&& tensor_args) {
packed_args_.emplace(index, std::move(tensor_args));
}
~TestFunctionPackedArgs() override{};
bool HasRemoteOrPackedInputs() const override { return true; };
Status GetLocalArg(const FunctionArgIndex& index,
Tensor* val) const override {
*val = *packed_args_.at(index.index).at(index.sub_index).tensor;
return absl::OkStatus();
};
std::vector<Tensor> GetLocalTensors() const override { return {}; }
private:
absl::flat_hash_map<int, absl::InlinedVector<TensorValue, 4UL>> packed_args_;
};
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_CompositeDevice) {
Init({AddVarAcrossDevices()});
const Tensor initial_resource_value0 = test::AsTensor<float>({10, 20});
Var* resource0 = new Var(DT_FLOAT);
*resource0->tensor() = initial_resource_value0;
resource0->is_initialized = true;
const Tensor initial_resource_value1 = test::AsTensor<float>({30, 40});
Var* resource1 = new Var(DT_FLOAT);
*resource1->tensor() = initial_resource_value1;
resource1->is_initialized = true;
ResourceMgr* mgr0 = device0_->resource_manager();
ResourceMgr* mgr1 = device1_->resource_manager();
TF_ASSERT_OK(mgr0->Create(mgr0->default_container(), "var", resource0));
TF_ASSERT_OK(mgr1->Create(mgr1->default_container(), "var", resource1));
Tensor resource_handle0 =
GetResourceHandle("var", mgr0->default_container(), device0_->name());
Tensor resource_handle1 =
GetResourceHandle("var", mgr1->default_container(), device1_->name());
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0_->name(), device1_->name()},
0,
device_mgr_->HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
AddCompositeDevice(composite_device.get());
FunctionLibraryRuntime::Options opts;
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"COMPOSITE:0"}, {"CPU:0"});
inst_opts.composite_devices[composite_device->name()] =
composite_device->underlying_devices();
inst_opts.input_resource_dtypes_and_shapes[0] = {
initial_resource_value0.dtype(), initial_resource_value0.shape()};
{
absl::InlinedVector<TensorValue, 4UL> handles;
handles.push_back(TensorValue(&resource_handle0));
handles.push_back(TensorValue(&resource_handle1));
TestFunctionPackedArgs args(0, std::move(handles));
FunctionRet ret;
TF_CHECK_OK(RunWithPackedArgs("AddVarAcrossDevices", opts,
{{"T", DT_FLOAT}}, inst_opts, args, {&ret}));
EXPECT_EQ(ret.index(), 0);
test::ExpectTensorEqual<float>(absl::get<Tensor>(ret),
test::AsTensor<float>({40, 60}));
}
{
Tensor arg(DT_RESOURCE, TensorShape({2}));
arg.flat<ResourceHandle>()(0) = resource_handle0.scalar<ResourceHandle>()();
arg.flat<ResourceHandle>()(1) = resource_handle1.scalar<ResourceHandle>()();
Tensor ret;
TF_CHECK_OK(Run("AddVarAcrossDevices", opts, {{"T", DT_FLOAT}}, inst_opts,
{arg}, {&ret}));
test::ExpectTensorEqual<float>(ret, test::AsTensor<float>({40, 60}));
}
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_ResourceOutput_GPU) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "GPU:0"}, {"GPU:0", "GPU:0"});
Init({test::function::ResourceOutput(),
test::function::ReadResourceVariable()});
Tensor resource_value = CPUToGPU(test::AsTensor<float>({10, 20}));
Var* resource = new Var(DT_FLOAT);
*resource->tensor() = resource_value;
resource->is_initialized = true;
ResourceMgr* mgr = gpu_device_->resource_manager();
Status status = mgr->Create(mgr->default_container(), "my_gpu_var", resource);
ASSERT_TRUE(status.ok()) << status.message();
FunctionLibraryRuntime::Options opts;
Tensor x1 = CPUToGPU(test::AsTensor<float>({1, 2}));
Tensor x2 = GetResourceHandle("my_gpu_var", mgr->default_container(),
"/job:a/replica:0/task:0/device:GPU:0");
Tensor returned_handle;
Tensor y2;
TF_CHECK_OK(Run("ResourceOutput", opts, {{"T", DT_FLOAT}}, inst_opts,
{x1, x2}, {&returned_handle, &y2}));
EXPECT_FALSE(IsCUDATensor(returned_handle));
EXPECT_TRUE(IsCUDATensor(y2));
y2 = GPUToCPU(y2);
test::ExpectTensorEqual<float>(y2, test::AsTensor<float>({2, 4}));
inst_opts = MakeOptions("GPU:0", {"GPU:0"}, {"GPU:0"});
Tensor read_resource;
TF_CHECK_OK(Run("ReadResourceVariable", opts, {{"T", DT_FLOAT}}, inst_opts,
{returned_handle}, {&read_resource}));
EXPECT_TRUE(IsCUDATensor(read_resource));
read_resource = GPUToCPU(read_resource);
test::ExpectTensorEqual<float>(read_resource,
test::AsTensor<float>({10, 20}));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_PlacerError) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0", "GPU:0"}, {"CPU:0", "GPU:0"});
Init({test::function::ResourceOutput(),
test::function::ReadResourceVariable()});
FunctionLibraryRuntime::Handle handle;
Status status = proc_flr_->Instantiate(
"ResourceOutput", test::function::Attrs({{"T", DT_FLOAT}}), inst_opts,
&handle);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
ASSERT_TRUE(absl::StrContains(status.message(), "Cannot place"));
}
REGISTER_OP("BrokenOp")
.Input("in: T")
.Output("out: T")
.Attr("T: type")
.SetShapeFn(shape_inference::UnknownShape);
class BrokenOp : public OpKernel {
public:
explicit BrokenOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
ctx->SetStatus(errors::Internal("I am broken"));
}
void Compute(OpKernelContext* ctx) override {
ctx->SetStatus(errors::Internal("I am broken"));
}
};
REGISTER_KERNEL_BUILDER(Name("BrokenOp").Device(DEVICE_CPU), BrokenOp);
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_CreateKernelsEagerly) {
auto T = DT_INT32;
FunctionDef broken_func = FunctionDefHelper::Define(
"Broken",
{"x: int32"},
{"y: int32"},
{},
{{{"y"}, "BrokenOp", {"x"}, {{"T", T}}}});
Init({broken_func});
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0"});
FunctionLibraryRuntime::Handle handle;
TF_CHECK_OK(Instantiate("Broken", {{"T", DT_INT32}}, inst_opts, &handle));
TF_CHECK_OK(proc_flr_->ReleaseHandle(handle));
inst_opts.create_kernels_eagerly = true;
Status status = Instantiate("Broken", {{"T", DT_INT32}}, inst_opts, &handle);
EXPECT_TRUE(errors::IsInternal(status));
}
TEST_F(ProcessFunctionLibraryRuntimeTest, MultiDevice_StateHandle) {
auto T = DT_INT32;
FunctionDef stateful_func = FunctionDefHelper::Define(
"RandomUniformWrapper",
{"x: resource"},
{"y: int32"},
{},
{FunctionDefHelper::Const<int32>("shape", absl::Span<const int32>({1})),
FunctionDefHelper::Const<int32>("minval", 0),
{{"maxval"}, "ReadVariableOp", {"x"}, {{"dtype", T}}, {}},
{{"y"},
"RandomUniformInt",
{"shape", "minval", "maxval"},
{{"seed", 37}, {"seed2", 48}, {"Tout", T}, {"T", T}}}});
Init({stateful_func});
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
ResourceMgr* mgr = gpu_device_->resource_manager();
Tensor resource_value = CPUToGPU(test::AsScalar<int>(10));
Var* resource = new Var(T);
*resource->tensor() = resource_value;
resource->is_initialized = true;
Status status = mgr->Create(mgr->default_container(), "my_gpu_var", resource);
ASSERT_TRUE(status.ok()) << status.message();
Tensor x = GetResourceHandle("my_gpu_var", mgr->default_container(),
"/job:a/replica:0/task:0/device:GPU:0");
Tensor y;
FunctionLibraryRuntime::InstantiateOptions inst_opts =
MakeOptions("CPU:0", {"GPU:0"}, {"CPU:0"});
FunctionLibraryRuntime::Handle handle;
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&handle));
for (auto expected : {6, 4}) {
TF_CHECK_OK(RunInstantiated(handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
FunctionLibraryRuntime::Handle other_handle;
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_EQ(handle, other_handle);
for (auto expected : {0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_1";
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_2";
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}}, inst_opts,
&other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
inst_opts.state_handle = "handle_3";
for (int i = 0; i < 2; ++i) {
TF_CHECK_OK(Instantiate("RandomUniformWrapper", {{"T", DT_INT32}},
inst_opts, &other_handle));
EXPECT_NE(handle, other_handle);
for (auto expected : {6, 4, 0, 1}) {
TF_CHECK_OK(RunInstantiated(other_handle, {}, {x}, {&y}));
test::ExpectTensorEqual<int>(y, test::AsTensor<int>({expected}));
}
TF_CHECK_OK(proc_flr_->ReleaseHandle(other_handle));
}
}
REGISTER_OP("SessionMetadataReader")
.Input("x: int64")
.Output("y: string")
.SetIsStateful()
.Doc(R"doc(SessionMetadataReader returns the session metadata.
x: int64
y: string
)doc");
class SessionMetadataReaderOp : public OpKernel {
public:
explicit SessionMetadataReaderOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor* out_tensor = nullptr;
OP_REQUIRES_OK(ctx,
ctx->allocate_output("y", TensorShape({}), &out_tensor));
if (ctx->session_metadata() != nullptr) {
out_tensor->scalar<tstring>()() =
tsl::LegacyUnredactedDebugString(*ctx->session_metadata());
} else {
out_tensor->scalar<tstring>()() = "";
}
}
};
REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_CPU),
SessionMetadataReaderOp);
FunctionDef SessionMetadataReaderOpFn() {
return FunctionDefHelper::Define(
"SessionMetadataReaderFn",
{"x: int64"},
{"y: string"},
{},
{{{"y"}, "SessionMetadataReader", {"x"}, {}}});
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataAbsent) {
Init({SessionMetadataReaderOpFn()}, nullptr);
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
TF_CHECK_OK(
Run("SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y}));
EXPECT_EQ("", y.scalar<tstring>()());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataPresent) {
const SessionMetadata session_metadata = GenerateSessionMetadata();
Init({SessionMetadataReaderOpFn()}, &session_metadata);
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
TF_CHECK_OK(
Run("SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y}));
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(y.scalar<tstring>()(),
&read_metadata));
EXPECT_EQ(session_metadata.name(), read_metadata.name());
EXPECT_EQ(session_metadata.version(), read_metadata.version());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, CompositeDevicesAfterCloning) {
Init({AddVarAcrossDevices()});
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0_->name(), device1_->name()},
0,
device_mgr_->HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
AddCompositeDevice(composite_device.get());
auto* flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
ASSERT_NE(nullptr, flr);
std::unique_ptr<FunctionLibraryDefinition> cloned_lib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> cloned_proc_flr;
FunctionLibraryRuntime* cloned_flr;
TF_ASSERT_OK(flr->Clone(&cloned_lib_def, &cloned_proc_flr, &cloned_flr));
EXPECT_EQ(
cloned_proc_flr->device_set()->FindDeviceByName(composite_device->name()),
composite_device.get());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SessionMetadataPresentAfterCloning) {
const SessionMetadata session_metadata = GenerateSessionMetadata();
Init({SessionMetadataReaderOpFn()}, &session_metadata);
auto* flr = proc_flr_->GetFLR("/job:a/replica:0/task:0/cpu:0");
ASSERT_NE(nullptr, flr);
std::unique_ptr<FunctionLibraryDefinition> cloned_lib_def;
std::unique_ptr<ProcessFunctionLibraryRuntime> cloned_proc_flr;
FunctionLibraryRuntime* cloned_flr;
TF_ASSERT_OK(flr->Clone(&cloned_lib_def, &cloned_proc_flr, &cloned_flr));
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:a/replica:0/task:0/cpu:0";
opts.remote_execution = true;
FunctionLibraryRuntime::InstantiateOptions instantiate_opts;
instantiate_opts.target = "/job:a/replica:0/task:0/cpu:0";
const auto x = test::AsTensor<int64_t>({17});
Tensor y;
Status s = RunWithRuntime<std::vector<Tensor>, Tensor>(
"SessionMetadataReaderFn", opts, {}, instantiate_opts, {x}, {&y},
cloned_proc_flr.get());
TF_CHECK_OK(s);
SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(y.scalar<tstring>()(),
&read_metadata));
EXPECT_EQ(session_metadata.name(), read_metadata.name());
EXPECT_EQ(session_metadata.version(), read_metadata.version());
}
TEST_F(ProcessFunctionLibraryRuntimeTest, SimpleGraphAllowsSync) {
auto async_safe =
metrics::TestDelta("subgraph_async_summary", "safe_for_sync");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {}, {});
opts.allow_small_function_optimizations = true;
TestInstantiateSimpleFunction(this, opts);
EXPECT_GT(async_safe.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, UnsafeOpRequiresAsync) {
auto async_safe =
metrics::TestDelta("subgraph_async_summary", "safe_for_sync");
auto async_unsafe_op =
metrics::TestDelta("subgraph_async_summary", "unsafe_op");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0"});
opts.allow_small_function_optimizations = true;
TestControlFlow(this, opts);
EXPECT_EQ(async_safe.Get(), 0);
EXPECT_GT(async_unsafe_op.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, PartitionedGraphRequiresAsync) {
if (gpu_device_ == nullptr) {
GTEST_SKIP() << "No GPUs available";
}
auto async_send_only =
metrics::TestDelta("subgraph_async_summary", "send_only");
auto async_recv_only =
metrics::TestDelta("subgraph_async_summary", "recv_only");
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {"CPU:0"}, {"CPU:0", "GPU:0"});
opts.allow_small_function_optimizations = true;
TestTwoDeviceMult(this, opts);
EXPECT_GT(async_send_only.Get(), 0);
EXPECT_GT(async_recv_only.Get(), 0);
}
TEST_F(ProcessFunctionLibraryRuntimeTest, RecordAotSavingTimeAndHitCount) {
FunctionLibraryRuntime::InstantiateOptions opts =
MakeOptions("CPU:0", {}, {});
opts.allow_small_function_optimizations = true;
FunctionLibraryRuntime::Handle h;
OptimizedFunctionGraph optimized_graph_proto;
optimized_graph_proto.set_name("FindDevice");
optimized_graph_proto.set_optimization_time_usecs(10);
Init({test::function::FindDevice()}, nullptr,
{optimized_graph_proto});
Instantiate("FindDevice",
{{"_target", "/job:b/replica:0/task:0/device:CPU:0"}}, opts, &h)
.IgnoreError();
EXPECT_EQ(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kAot),
10);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kAot),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_function_library_runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_function_library_runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff9480bd-20ca-4f9a-97b5-2962374a7103 | cpp | tensorflow/tensorflow | rendezvous_util | tensorflow/core/common_runtime/rendezvous_util.cc | tensorflow/core/common_runtime/rendezvous_util_test.cc | #include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/reffed_status_callback.h"
namespace tensorflow {
Status SendTensorsToRendezvous(
RendezvousInterface* rendezvous, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
const std::vector<string>& keys, absl::Span<const Tensor> tensors_to_send) {
if (keys.size() != tensors_to_send.size()) {
return errors::InvalidArgument(
"keys and tensors_to_send are not the same size. keys.size() = ",
keys.size(), "; tensors_to_send.size() = ", tensors_to_send.size());
}
if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) {
return errors::InvalidArgument(
"keys and alloc_attrs are not the same size. ",
"keys.size() = ", keys.size(),
"; alloc_attrs.size() = ", alloc_attrs.size());
}
if (!rendezvous) {
return errors::InvalidArgument("Rendezvous is null.");
}
Rendezvous::ParsedKey parsed;
for (int i = 0; i < keys.size(); ++i) {
Rendezvous::Args rendez_args;
rendez_args.device_context = device_context;
if (!alloc_attrs.empty()) {
rendez_args.alloc_attrs = alloc_attrs[i];
}
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(keys[i], &parsed));
TF_RETURN_IF_ERROR(
rendezvous->Send(parsed, rendez_args, tensors_to_send[i], false));
}
return absl::OkStatus();
}
void RecvOutputsFromRendezvousAsync(
RendezvousInterface* rendezvous, DeviceContext* device_context,
const std::vector<AllocatorAttributes>& alloc_attrs,
const std::vector<string>& keys, std::vector<Tensor>* received_tensors,
StatusCallback done) {
if (keys.empty()) {
done(absl::OkStatus());
return;
}
if (!alloc_attrs.empty() && (keys.size() != alloc_attrs.size())) {
done(errors::InvalidArgument(
"keys and alloc_attrs are not the same size. ", "keys.size() = ",
keys.size(), "; alloc_attrs.size() = ", alloc_attrs.size()));
}
received_tensors->reserve(keys.size());
std::vector<
std::tuple<string, Tensor*, Rendezvous::ParsedKey, AllocatorAttributes>>
arguments;
for (int i = 0; i < keys.size(); ++i) {
Rendezvous::ParsedKey parsed;
Status s = Rendezvous::ParseKey(keys[i], &parsed);
received_tensors->push_back(Tensor());
if (!s.ok()) {
done(s);
return;
}
AllocatorAttributes alloc_attr;
if (!alloc_attrs.empty()) {
alloc_attr = alloc_attrs[i];
}
arguments.emplace_back(keys[i], &((*received_tensors)[i]), parsed,
alloc_attr);
}
auto status_cb = new ReffedStatusCallback(std::move(done));
for (auto& p : arguments) {
const string& key = std::get<0>(p);
Tensor* val = std::get<1>(p);
Rendezvous::ParsedKey parsed = std::get<2>(p);
Rendezvous::Args rendez_args;
rendez_args.device_context = device_context;
rendez_args.alloc_attrs = std::get<3>(p);
status_cb->Ref();
rendezvous->RecvAsync(
parsed, rendez_args,
[val, key, status_cb](const Status& s,
const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args,
const Tensor& v, const bool is_dead) {
Status status = s;
if (status.ok()) {
*val = v;
if (is_dead) {
status = errors::InvalidArgument("The tensor returned for ", key,
" was not valid.");
}
}
status_cb->UpdateStatus(status);
status_cb->Unref();
});
}
status_cb->Unref();
}
Status RecvOutputsFromRendezvous(RendezvousInterface* rendezvous,
NamedTensors* out,
const Rendezvous::Args& args) {
Rendezvous::ParsedKey parsed;
for (auto& p : *out) {
const string& key = p.first;
Tensor* val = &p.second;
bool is_dead = false;
TF_RETURN_IF_ERROR(Rendezvous::ParseKey(key, &parsed));
TF_RETURN_IF_ERROR(rendezvous->Recv(parsed, args, val, &is_dead));
if (is_dead) {
return errors::InvalidArgument("The tensor returned for ", key,
" was not valid.");
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/rendezvous_util.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RendezvousUtilTest : public ::testing::Test {
public:
RendezvousUtilTest() { rendez_ = NewLocalRendezvous(); }
~RendezvousUtilTest() override { rendez_->Unref(); }
Rendezvous* rendez_;
};
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
string MakeStringKey(const string& name) {
return Rendezvous::CreateKey(
"/job:localhost/replica:0/task:0/device:CPU:0", 0,
"/job:localhost/replica:0/task:0/device:GPU:0", name, FrameAndIter(0, 0));
}
TEST_F(RendezvousUtilTest, SendBeforeRecv) {
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n](const Status& status) { n.Notify(); });
n.WaitForNotification();
EXPECT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
TEST_F(RendezvousUtilTest, RecvBeforeSend) {
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n](const Status& status) { n.Notify(); });
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
n.WaitForNotification();
EXPECT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
TEST(RendezvousUtilCallerThreadTest, RecvBeforeSend) {
Rendezvous* rendez_ = NewLocalRendezvous();
Notification n;
std::vector<Tensor> received_keys;
RecvOutputsFromRendezvousAsync(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
&received_keys, [&n, rendez_](const Status& status) {
rendez_->Unref();
n.Notify();
});
TF_ASSERT_OK(SendTensorsToRendezvous(
rendez_, nullptr, {}, {MakeStringKey("hello1"), MakeStringKey("hello2")},
{V("hello1"), V("hello2")}));
n.WaitForNotification();
ASSERT_EQ(2, received_keys.size());
EXPECT_EQ("hello1", V(received_keys[0]));
EXPECT_EQ("hello2", V(received_keys[1]));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/rendezvous_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/rendezvous_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e216e1c-4bba-4884-a26e-b3fea70d8fbb | cpp | tensorflow/tensorflow | quantize_training | tensorflow/core/common_runtime/quantize_training.cc | tensorflow/core/common_runtime/quantize_training_test.cc | #include "tensorflow/core/common_runtime/quantize_training.h"
#include <algorithm>
#include <atomic>
#include <set>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const uint32 kAllowedInputs = 2;
const float kEMADecay = 0.999;
const auto* nodes_to_rewrite =
new std::unordered_set<string, StringPieceHasher>{"MatMul", "Conv2D"};
struct EdgeToConvert {
const Edge* edge;
int32 num_bits;
bool signed_input;
bool range_given;
float input_min;
float input_max;
EdgeToConvert(const Edge* e, int32_t bits, bool sign, bool range, float min,
float max)
: edge(e),
num_bits(bits),
signed_input(sign),
range_given(range),
input_min(min),
input_max(max) {}
};
inline bool IsGradientNode(const Graph* graph, const Node* node) {
static const string tag = "gradients";
return (node->name().compare(0, tag.size(), tag) == 0);
}
bool FindType(const Graph* graph, const Node* node, bool* signed_input,
bool* range_given, float* input_min, float* input_max) {
const string& src_op = node->type_string();
if (src_op == "Const" || src_op == "Variable" || src_op == "VariableV2") {
*signed_input = true;
*range_given = false;
} else if (src_op == "Relu") {
*signed_input = false;
*range_given = false;
} else if (src_op == "Relu6") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 6;
} else if (src_op == "Sigmoid") {
*signed_input = false;
*range_given = true;
*input_min = 0;
*input_max = 1;
} else if (src_op == "Tanh") {
*signed_input = true;
*range_given = true;
*input_min = -1;
*input_max = 1;
} else if (src_op == "Reshape" || src_op == "ConcatV2") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot && edge->dst_input() == 0) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else if (src_op == "Identity" || src_op == "MaxPool" ||
src_op == "AvgPool" || src_op == "MaxPool3D" ||
src_op == "AvgPool3D") {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() != Graph::kControlSlot) {
FindType(graph, edge->src(), signed_input, range_given, input_min,
input_max);
}
}
} else {
*signed_input = true;
*range_given = false;
return false;
}
return true;
}
Status FindSaveOp(const Graph* graph, Node** save_op,
std::vector<const Edge*>* in_edges, bool* found) {
*found = false;
for (Node* node : graph->op_nodes()) {
if (node->type_string() == "SaveV2") {
if (*found) {
return errors::InvalidArgument("Input graph has multiple SaveV2 ops.");
}
*save_op = node;
*found = true;
TF_RETURN_IF_ERROR(node->input_edges(in_edges));
}
}
return absl::OkStatus();
}
Node* FindRestoreAllOp(const Graph* graph, StringPiece save_prefix) {
for (Node* node : graph->op_nodes()) {
if (node->name() == strings::StrCat(save_prefix, "/restore_all")) {
return node;
}
}
return nullptr;
}
StringPiece GetNodeNamePrefix(const Node* node) {
StringPiece name = node->name();
return name.substr(0, name.rfind('/'));
}
void FillStringTensor(Tensor* dst, const Tensor& src) {
auto dst_flat = dst->flat<tstring>();
auto src_flat = src.flat<tstring>();
for (int i = 0; i < src.NumElements(); i++) {
dst_flat(i) = src_flat(i);
}
}
Status ConnectVariablesToSaveOp(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& added_variables) {
Node* tensor_names_op = in_edges[1]->src();
Node* shape_and_slices_op = in_edges[2]->src();
Tensor tensor_names;
Tensor shape_and_slices;
TF_RETURN_IF_ERROR(
GetNodeAttr(tensor_names_op->attrs(), "value", &tensor_names));
TF_RETURN_IF_ERROR(
GetNodeAttr(shape_and_slices_op->attrs(), "value", &shape_and_slices));
int tn_size = tensor_names.NumElements();
int var_size = added_variables.size();
NodeBuilder save_op_builder =
NodeBuilder(save_op->name(), save_op->type_string());
for (int i = 0; i < 3; i++) {
save_op_builder = save_op_builder.Input(in_edges[i]->src());
}
std::vector<NodeBuilder::NodeOut> var_nodeouts;
var_nodeouts.reserve(tn_size + var_size);
for (int i = 3; i < in_edges.size(); i++) {
var_nodeouts.emplace_back(in_edges[i]->src());
}
Tensor new_tensor_names(DT_STRING, TensorShape({tn_size + var_size}));
Tensor new_shape_and_slices(DT_STRING, TensorShape({tn_size + var_size}));
FillStringTensor(&new_tensor_names, tensor_names);
FillStringTensor(&new_shape_and_slices, shape_and_slices);
for (int i = 0; i < var_size; i++) {
Node* var = added_variables[i];
new_tensor_names.flat<tstring>()(tn_size + i) = var->name();
new_shape_and_slices.flat<tstring>()(tn_size + i) = "";
var_nodeouts.emplace_back(var);
}
save_op_builder = save_op_builder.Input(var_nodeouts);
tensor_names_op->AddAttr("value", new_tensor_names);
shape_and_slices_op->AddAttr("value", new_shape_and_slices);
Node* new_save_op;
TF_RETURN_IF_ERROR(save_op_builder.Finalize(graph, &new_save_op));
for (const Edge* edge : save_op->out_edges()) {
graph->AddControlEdge(new_save_op, edge->dst());
}
graph->RemoveNode(save_op);
return absl::OkStatus();
}
Status AddRestoreVariableSubgraphs(Graph* graph, Node* save_op,
const std::vector<const Edge*>& in_edges,
const std::vector<Node*>& variables) {
Node* prefix_op = in_edges[0]->src();
StringPiece name_prefix = GetNodeNamePrefix(save_op);
Node* restore_all = FindRestoreAllOp(graph, name_prefix);
if (restore_all == nullptr) {
return errors::InvalidArgument("graph has SaveOp, but no restore_all NoOp");
}
const string restore_op_name = strings::StrCat(name_prefix, "/RestoreV2");
const string assign_op_name = strings::StrCat(name_prefix, "/Assign");
for (Node* var : variables) {
string new_restore_op_name =
strings::StrCat(graph->NewName(restore_op_name), "_qt");
string new_assign_op_name =
strings::StrCat(graph->NewName(assign_op_name), "_qt");
string tensor_names_op_name =
strings::StrCat(new_restore_op_name, "/tensor_names");
string shape_and_slices_op_name =
strings::StrCat(new_restore_op_name, "/shape_and_slices");
Node* tensor_names;
Tensor tensor_names_val(DT_STRING, TensorShape({1}));
tensor_names_val.flat<tstring>()(0) = var->name();
TF_RETURN_IF_ERROR(NodeBuilder(tensor_names_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", tensor_names_val)
.Finalize(graph, &tensor_names));
Node* shape_and_slices;
Tensor shape_and_slices_val(DT_STRING, TensorShape({1}));
shape_and_slices_val.flat<tstring>()(0) = "";
TF_RETURN_IF_ERROR(NodeBuilder(shape_and_slices_op_name, "Const")
.Attr("dtype", DT_STRING)
.Attr("value", shape_and_slices_val)
.Finalize(graph, &shape_and_slices));
Node* restore_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_restore_op_name, "RestoreV2")
.Input(prefix_op)
.Input(tensor_names)
.Input(shape_and_slices)
.Attr("dtypes", {DT_FLOAT})
.Finalize(graph, &restore_op));
Node* assign_op;
TF_RETURN_IF_ERROR(NodeBuilder(new_assign_op_name, "Assign")
.Input(var)
.Input(restore_op)
.Finalize(graph, &assign_op));
graph->AddControlEdge(assign_op, restore_all);
}
return absl::OkStatus();
}
Status AddSaveAndRestore(Graph* graph, const std::vector<Node*>& variables) {
Node* save_op = nullptr;
std::vector<const Edge*> in_edges;
bool found = false;
TF_RETURN_IF_ERROR(FindSaveOp(graph, &save_op, &in_edges, &found));
if (found) {
TF_RETURN_IF_ERROR(
AddRestoreVariableSubgraphs(graph, save_op, in_edges, variables));
TF_RETURN_IF_ERROR(
ConnectVariablesToSaveOp(graph, save_op, in_edges, variables));
}
return absl::OkStatus();
}
Status MakeReductionAxes(Graph* graph, string name_prefix, Node* input,
Node** output) {
name_prefix = strings::StrCat(name_prefix, "/ReductionAxes");
Node* start;
Tensor zero_tensor(DT_INT32, TensorShape());
zero_tensor.flat<int32>()(0) = 0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeStart"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", zero_tensor)
.Finalize(graph, &start));
Node* delta;
Tensor one_tensor(DT_INT32, TensorShape());
one_tensor.flat<int32>()(0) = 1;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/RangeDelta"), "Const")
.Attr("dtype", DT_INT32)
.Attr("value", one_tensor)
.Finalize(graph, &delta));
Node* rank;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputRank"), "Rank")
.Input(input)
.Finalize(graph, &rank));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ReductionAxes"), "Range")
.Input(start)
.Input(rank)
.Input(delta)
.Finalize(graph, output));
return absl::OkStatus();
}
Status MakeExponentialMovingAverage(Graph* graph, string name_prefix,
const NodeBuilder::NodeOut& input,
Node* decay, Node* update_variable,
Node** assign_value) {
name_prefix = strings::StrCat(name_prefix, "/EMA");
Node* one;
Tensor one_tensor(DT_FLOAT, TensorShape());
one_tensor.flat<float>()(0) = 1.0;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/OneConst"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", one_tensor)
.Finalize(graph, &one));
Node* decay_complement;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/DecayComplement"), "Sub")
.Input(one)
.Input(decay)
.Finalize(graph, &decay_complement));
Node* value_diff;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/ValueDiff"), "Sub")
.Input(update_variable)
.Input(input)
.Finalize(graph, &value_diff));
Node* update_value;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/UpdateValue"), "Mul")
.Input(value_diff)
.Input(decay_complement)
.Finalize(graph, &update_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/EMAValue"), "Sub")
.Input(update_variable)
.Input(update_value)
.Finalize(graph, assign_value));
return absl::OkStatus();
}
Status MakeInitializedEMAVariable(Graph* graph, const string& name, Node* decay,
Node* init_val,
std::vector<Node*>* added_variables,
Node** var) {
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/Variable"), "VariableV2")
.Attr("shape", TensorShape())
.Attr("dtype", DT_FLOAT)
.Finalize(graph, var));
added_variables->push_back(*var);
Node* is_initialized;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/IsInitialized"),
"IsVariableInitialized")
.Input(*var)
.Finalize(graph, &is_initialized));
Node* switch_node;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Switch"), "Switch")
.Input(init_val)
.Input(is_initialized)
.Finalize(graph, &switch_node));
NodeBuilder::NodeOut output_false = NodeBuilder::NodeOut(switch_node, 0);
NodeBuilder::NodeOut output_true = NodeBuilder::NodeOut(switch_node, 1);
Node* ema_value;
TF_RETURN_IF_ERROR(MakeExponentialMovingAverage(graph, name, output_true,
decay, *var, &ema_value));
Node* assign_value;
TF_RETURN_IF_ERROR(NodeBuilder(strings::StrCat(name, "/Merge"), "Merge")
.Input({output_false, ema_value})
.Finalize(graph, &assign_value));
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name, "/AssignValue"), "Assign")
.Input(*var)
.Input(assign_value)
.Finalize(graph, var));
return absl::OkStatus();
}
Status MakeEMAMinMaxVars(Graph* graph, const string& name_prefix, Node* input,
std::vector<Node*>* added_variables, Node** min_var,
Node** max_var) {
Tensor decay_tensor(DT_FLOAT, TensorShape());
decay_tensor.flat<float>()(0) = kEMADecay;
Node* decay;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/Decay"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", decay_tensor)
.Finalize(graph, &decay));
Node* reduction_axes;
TF_RETURN_IF_ERROR(
MakeReductionAxes(graph, name_prefix, input, &reduction_axes));
Node* min;
string min_name = strings::StrCat(name_prefix, "/Min");
TF_RETURN_IF_ERROR(NodeBuilder(min_name, "Min")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &min));
Node* max;
string max_name = strings::StrCat(name_prefix, "/Max");
TF_RETURN_IF_ERROR(NodeBuilder(max_name, "Max")
.Input(input)
.Input(reduction_axes)
.Finalize(graph, &max));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, min_name, decay, min,
added_variables, min_var));
TF_RETURN_IF_ERROR(MakeInitializedEMAVariable(graph, max_name, decay, max,
added_variables, max_var));
return absl::OkStatus();
}
Status MakeInputMinMax(Graph* graph, const string& name_prefix,
const EdgeToConvert& edge,
std::vector<Node*>* added_variables, Node** input_min,
Node** input_max) {
if (edge.range_given) {
Tensor input_min_tensor(DT_FLOAT, TensorShape());
input_min_tensor.flat<float>()(0) = edge.input_min;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMin"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_min_tensor)
.Finalize(graph, input_min));
Tensor input_max_tensor(DT_FLOAT, TensorShape());
input_max_tensor.flat<float>()(0) = edge.input_max;
TF_RETURN_IF_ERROR(
NodeBuilder(strings::StrCat(name_prefix, "/InputMax"), "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", input_max_tensor)
.Finalize(graph, input_max));
} else {
TF_RETURN_IF_ERROR(MakeEMAMinMaxVars(graph, name_prefix, edge.edge->src(),
added_variables, input_min,
input_max));
}
return absl::OkStatus();
}
Status MakeQuantizeOp(Graph* graph, const string& name_prefix,
const string& quant_op_type, const EdgeToConvert& edge,
std::vector<Node*>* added_variables,
Node** convert_node) {
Node* input_min;
Node* input_max;
TF_RETURN_IF_ERROR(MakeInputMinMax(graph, name_prefix, edge, added_variables,
&input_min, &input_max));
string quant_name = strings::StrCat(name_prefix, "/", quant_op_type);
if (quant_op_type == "QuantizeAndDequantizeV2") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("signed_input", edge.signed_input)
.Attr("num_bits", edge.num_bits)
.Attr("range_given", true)
.Finalize(graph, convert_node));
} else if (quant_op_type == "FakeQuantWithMinMaxVars") {
TF_RETURN_IF_ERROR(NodeBuilder(quant_name, quant_op_type)
.Input(edge.edge->src())
.Input(input_min)
.Input(input_max)
.Attr("num_bits", edge.num_bits)
.Finalize(graph, convert_node));
} else {
return errors::InvalidArgument("Unknown quant op type: ", quant_op_type);
}
return absl::OkStatus();
}
Status ProcessTargetEdges(Graph* graph, const string& quant_op_type,
const std::vector<EdgeToConvert>& target_edges) {
std::unordered_map<string, Node*, StringPieceHasher> name_index;
std::vector<Node*> added_variables;
for (const EdgeToConvert edge : target_edges) {
Node* convert_node;
string name_prefix = edge.edge->src()->name();
auto iter = name_index.find(name_prefix);
if (iter == name_index.end()) {
TF_RETURN_IF_ERROR(MakeQuantizeOp(graph, name_prefix, quant_op_type, edge,
&added_variables, &convert_node));
name_index[name_prefix] = convert_node;
} else {
convert_node = iter->second;
}
graph->AddEdge(convert_node, 0, edge.edge->dst(), edge.edge->dst_input());
graph->RemoveEdge(edge.edge);
}
TF_RETURN_IF_ERROR(AddSaveAndRestore(graph, added_variables));
return absl::OkStatus();
}
}
Status DoQuantizeTraining(int32_t num_bits, const string& quant_op_type,
Graph* graph) {
if (graph == nullptr) {
return errors::InvalidArgument("Cannot accept empty graph pointer.");
}
if (num_bits < 1 || num_bits > 63) {
return errors::OutOfRange("num_bits should be in range [1, 63] but is: ",
num_bits);
}
int potential_input = 0;
std::vector<EdgeToConvert> target_edges;
for (Node* node : graph->nodes()) {
if (nodes_to_rewrite->find(node->type_string()) !=
nodes_to_rewrite->end() &&
!IsGradientNode(graph, node)) {
for (const Edge* edge : node->in_edges()) {
if (edge->src_output() == Graph::kControlSlot) {
continue;
} else {
bool signed_input = false;
bool range_given = false;
float input_min = 0;
float input_max = 0;
bool known_op = FindType(graph, edge->src(), &signed_input,
&range_given, &input_min, &input_max);
if (!known_op) {
potential_input++;
if (potential_input > kAllowedInputs) {
return errors::Unimplemented(
"Found an unknown op: ", edge->src()->name(),
" with type: ", edge->src()->type_string(),
"; Unknown ops are considered as model input for now and "
"only ",
kAllowedInputs, " inputs are supported currently.");
}
}
target_edges.emplace_back(EdgeToConvert(
edge, num_bits, signed_input, range_given, input_min, input_max));
}
}
}
}
TF_RETURN_IF_ERROR(ProcessTargetEdges(graph, quant_op_type, target_edges));
return absl::OkStatus();
}
Status DoQuantizeTrainingOnGraphDef(const GraphDef& input_graphdef,
int32_t num_bits,
const string& quant_op_type,
GraphDef* result_graphdef) {
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, input_graphdef, &graph));
TF_RETURN_IF_ERROR(DoQuantizeTraining(num_bits, quant_op_type, &graph));
graph.ToGraphDef(result_graphdef);
return absl::OkStatus();
}
Status DoQuantizeTrainingOnSerializedGraphDef(const string& input_graph_string,
int32_t num_bits,
const string& quant_op_type,
string* result_graph_string) {
GraphDef input_graphdef;
if (!ParseProtoUnlimited(&input_graphdef, input_graph_string)) {
return errors::InvalidArgument(
"input_graph_string is not a serialized GraphDef protocol buffer");
}
GraphDef output_graphdef;
TF_RETURN_IF_ERROR(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, quant_op_type, &output_graphdef));
if (!output_graphdef.SerializeToString(result_graph_string)) {
return errors::Internal(
"quantize training transformation resulted in invalid GraphDef");
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/quantize_training.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class QuantizeTrainingTest : public ::testing::Test {
protected:
QuantizeTrainingTest() { Reset(); }
void Reset() { g_.reset(new Graph(OpRegistry::Global())); }
template <typename T>
Node* Constant(gtl::ArraySlice<T> values, TensorShape shape) {
return test::graph::Constant(g_.get(), test::AsTensor(values, shape));
}
Status Placeholder(Graph* g, const string& name, TensorShape shape,
Node** out) {
TF_RETURN_IF_ERROR(NodeBuilder(name, "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", shape)
.Finalize(g, out));
return absl::OkStatus();
}
Status FindNode(Graph* g, const string& name, Node** out) {
for (Node* node : g->nodes()) {
if (node->name() == name) {
*out = node;
return absl::OkStatus();
}
}
return errors::Unimplemented("Node ", name, " not found.");
}
std::unique_ptr<Graph> g_;
};
TEST_F(QuantizeTrainingTest, SignedInput) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(63, g->num_nodes());
Node* identity_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&identity_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*identity_q_node->attrs().Find("signed_input")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("false",
SummarizeAttrValue(*relu_q_node->attrs().Find("signed_input")));
}
TEST_F(QuantizeTrainingTest, RangeGivenTrue) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, b);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(38, g->num_nodes());
Node* relu6_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu6->name(), "/QuantizeAndDequantizeV2"),
&relu6_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu6_q_node->attrs().Find("range_given")));
Node* relu_q_node;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&relu_q_node));
ASSERT_EQ("true",
SummarizeAttrValue(*relu_q_node->attrs().Find("range_given")));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/QuantizeAndDequantizeV2"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/QuantizeAndDequantizeV2"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/QuantizeAndDequantizeV2"), &found_node));
}
TEST_F(QuantizeTrainingTest, WithBackwardNodes_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* c = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
Node* d = Constant<float>({0.0, 1.0, 1.0, 0.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), b);
g->AddControlEdge(g->source_node(), c);
g->AddControlEdge(g->source_node(), d);
Node* relu = test::graph::Relu(g, a);
Node* identity = test::graph::Identity(g, b);
Node* m1 = test::graph::Matmul(g, relu, identity, false, false);
Node* m2 = test::graph::Matmul(g, identity, c, false, false);
g->AddControlEdge(m1, g->sink_node());
g->AddControlEdge(m2, g->sink_node());
Node* backward_m;
TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul")
.Input(d)
.Input(m2)
.Attr("transpose_a", true)
.Attr("transpose_b", false)
.Finalize(g, &backward_m));
g->AddControlEdge(backward_m, g->sink_node());
int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
EXPECT_EQ(95, g->num_nodes());
Node* found_node;
Status s = FindNode(g, strings::StrCat(d->name(), "/FakeQuantWithMinMaxVars"),
&found_node);
EXPECT_TRUE(absl::StrContains(s.ToString(), "not found")) << s;
TF_ASSERT_OK(
FindNode(g, strings::StrCat(relu->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(
FindNode(g, strings::StrCat(identity->name(), "/FakeQuantWithMinMaxVars"),
&found_node));
TF_ASSERT_OK(FindNode(
g, strings::StrCat(c->name(), "/FakeQuantWithMinMaxVars"), &found_node));
}
TEST_F(QuantizeTrainingTest, QuantizeSerializedGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graph;
graph->ToGraphDef(&input_graph);
string input_string;
input_graph.SerializeToString(&input_string);
string result_string;
TF_ASSERT_OK(DoQuantizeTrainingOnSerializedGraphDef(
input_string, num_bits, "QuantizeAndDequantizeV2", &result_string));
GraphDef result_graphdef;
EXPECT_TRUE(ParseProtoUnlimited(&result_graphdef, result_string));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, QuantizeGraphDef) {
Reset();
Graph* graph = g_.get();
Node* const_a = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
Node* const_b = Constant<float>({1.0, 2.0, 3.0, 4.0}, {2, 2});
graph->AddControlEdge(graph->source_node(), const_a);
graph->AddControlEdge(graph->source_node(), const_b);
Node* relu = test::graph::Relu(graph, const_a);
Node* identity = test::graph::Identity(graph, const_b);
Node* matmul = test::graph::Matmul(graph, relu, identity, false, false);
graph->AddControlEdge(matmul, graph->sink_node());
int num_bits = 8;
GraphDef input_graphdef;
graph->ToGraphDef(&input_graphdef);
GraphDef result_graphdef;
TF_ASSERT_OK(DoQuantizeTrainingOnGraphDef(
input_graphdef, num_bits, "QuantizeAndDequantizeV2", &result_graphdef));
GraphConstructorOptions opts;
Graph result_graph(OpRegistry::Global());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, result_graphdef, &result_graph));
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", graph));
EXPECT_EQ(graph->num_nodes(), result_graph.num_nodes());
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_QuantizeAndDequantize) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "QuantizeAndDequantizeV2", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
TEST_F(QuantizeTrainingTest, FixedRangeAndEMARange_FakeQuant) {
Reset();
Graph* g = g_.get();
Node* a;
TF_ASSERT_OK(Placeholder(g, "a", {2, 2}, &a));
Node* c = Constant<float>({2.0, 3.0, 4.0, 5.0}, {2, 2});
g->AddControlEdge(g->source_node(), a);
g->AddControlEdge(g->source_node(), c);
Node* relu = test::graph::Relu(g, a);
Node* relu6 = test::graph::Relu6(g, c);
Node* m1 = test::graph::Matmul(g, relu, relu6, false, false);
g->AddControlEdge(m1, g->sink_node());
const int num_bits = 8;
TF_ASSERT_OK(DoQuantizeTraining(num_bits, "FakeQuantWithMinMaxVars", g));
SessionOptions options;
Session* sess;
TF_ASSERT_OK(NewSession(options, &sess));
GraphDef gdef;
g->ToGraphDef(&gdef);
TF_ASSERT_OK(sess->Create(gdef));
string min_const_name = strings::StrCat(relu6->name(), "/InputMin");
string max_const_name = strings::StrCat(relu6->name(), "/InputMax");
std::vector<Tensor> outputs;
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
Tensor a1(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a1, {0.0, 1.0, 2.0, 3.0});
Tensor a2(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a2, {1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK(sess->Run({{"a", a1}}, {m1->name()}, {}, &outputs));
string min_var_name = strings::StrCat(relu->name(), "/Min/Variable");
string max_var_name = strings::StrCat(relu->name(), "/Max/Variable");
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 3.0);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
TF_ASSERT_OK(sess->Run({{"a", a2}}, {m1->name()}, {}, &outputs));
TF_ASSERT_OK(sess->Run({}, {min_var_name, max_var_name}, {}, &outputs));
const float decay = 0.999;
const float expected_min = 0.0 * decay + 1.0 * (1.0 - decay);
const float expected_max = 3.0 * decay + 4.0 * (1.0 - decay);
EXPECT_NEAR(outputs[0].flat<float>()(0), expected_min, 1e-4);
EXPECT_NEAR(outputs[1].flat<float>()(0), expected_max, 1e-4);
TF_ASSERT_OK(sess->Run({}, {min_const_name, max_const_name}, {}, &outputs));
EXPECT_EQ(outputs[0].flat<float>()(0), 0.0);
EXPECT_EQ(outputs[1].flat<float>()(0), 6.0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/quantize_training.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/quantize_training_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7f46326-cc72-4b4e-b225-22247bd2f23a | cpp | tensorflow/tensorflow | placer_inspection_required_ops_utils | tensorflow/core/common_runtime/placer_inspection_required_ops_utils.cc | tensorflow/core/common_runtime/placer_inspection_required_ops_utils_test.cc | #include "tensorflow/core/common_runtime/placer_inspection_required_ops_utils.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
namespace {
bool IsFunctionCall(const Node& node) {
const string& op_type = node.op_def().name();
return op_type == "PartitionedCall" || op_type == "StatefulPartitionedCall";
}
Status Set(const Node& node, bool value, bool* is_deep,
std::vector<absl::optional<bool>>* cache) {
*is_deep = value;
(*cache)[node.id()] = value;
return absl::OkStatus();
}
}
PlacerInspectionRequiredOpChecker::PlacerInspectionRequiredOpChecker(
const Graph* graph, const FunctionLibraryDefinition* flib_def)
: graph_(*graph), flib_def_(*flib_def) {
cache_.resize(graph_.num_node_ids());
}
Status PlacerInspectionRequiredOpChecker::IsPlacerInspectionRequired(
const Node& node, bool* is_deep) {
if (cache_[node.id()].has_value()) {
*is_deep = cache_[node.id()].value();
return absl::OkStatus();
}
if (!IsFunctionCall(node)) {
return Set(node, false, is_deep, &cache_);
}
core::RefCountPtr<FunctionRecord> fdef;
NameAttrList func;
TF_RETURN_IF_ERROR(GetFunctionDefAndAttrs(flib_def_, node, &fdef, &func));
DataTypeVector types;
TF_RETURN_IF_ERROR(OutputTypesForNode(AttrSlice(&func.attr()),
fdef->fdef().signature(), &types));
for (DataType type : types) {
if (type == DT_RESOURCE) {
return Set(node, true, is_deep, &cache_);
}
}
return Set(node, false, is_deep, &cache_);
}
Status GetFunctionDefAndAttrs(const FunctionLibraryDefinition& flib_def,
const Node& node,
core::RefCountPtr<FunctionRecord>* fdef,
NameAttrList* func) {
TF_RETURN_IF_ERROR(GetNodeAttr(node.def(), "f", func));
const string& function_name = func->name();
*fdef = flib_def.FindRecord(function_name);
if (*fdef == nullptr) {
return errors::InvalidArgument(
"Failed to find function \"", function_name,
"\" in function library: ", flib_def.ToProto().DebugString());
}
return absl::OkStatus();
}
FunctionStack::FunctionStack(const string& function_name)
: current_function_name_(function_name) {}
FunctionStack FunctionStack::Push(const Node* node_in_current_function,
const string& new_current_function) const {
FunctionStack new_stack(new_current_function);
new_stack.frames_ = frames_;
new_stack.frames_.emplace_back(current_function_name_,
node_in_current_function);
return new_stack;
}
bool FunctionStack::HasFunction(const string& function_name) const {
if (current_function_name_ == function_name) {
return true;
}
for (const Frame& frame : frames_) {
if (frame.function_name == function_name) {
return true;
}
}
return false;
}
string FunctionStack::FormatForError() const {
std::vector<string> msgs;
for (int i = 0; i < frames_.size(); ++i) {
if (frames_[i].function_name.empty()) {
msgs.push_back(absl::StrCat("Graph contains node ",
FormatNodeForError(*frames_[i].node)));
} else {
msgs.push_back(absl::StrCat(
"Function ", errors::FormatFunctionForError(frames_[i].function_name),
" contains node ", FormatNodeForError(*frames_[i].node)));
}
const string& fname = (i + 1 < frames_.size())
? frames_[i + 1].function_name
: current_function_name_;
msgs.push_back(absl::StrCat("Node ", FormatNodeForError(*frames_[i].node),
" calls function ",
errors::FormatFunctionForError(fname)));
}
return absl::StrJoin(msgs, "\n ");
}
namespace {
using OutputEdgeMap = std::vector<std::vector<const Edge*>>;
constexpr char kIdentityOp[] = "Identity";
string Uniquify(const string& candidate_name,
std::unordered_set<string>* node_names) {
if (node_names->find(candidate_name) == node_names->end()) {
node_names->insert(candidate_name);
return candidate_name;
}
for (int counter = 0;; ++counter) {
string candidate = absl::StrCat(candidate_name, "_", counter);
if (node_names->find(candidate) == node_names->end()) {
node_names->insert(candidate);
return candidate;
}
}
}
Status AddInputIdentity(Node* node, int input_idx, Graph* graph,
std::unordered_set<string>* node_names) {
const Edge* edge;
TF_RETURN_IF_ERROR(node->input_edge(input_idx, &edge));
string identity_name = Uniquify(
absl::StrCat(edge->src()->name(), "_", node->name()), node_names);
NodeDefBuilder builder(identity_name, kIdentityOp);
builder.Attr("T", node->input_type(input_idx));
NodeDefBuilder::NodeOut input(edge->src()->name(), edge->src_output(),
node->input_type(input_idx));
builder.Input(input);
NodeDef identity_def;
TF_RETURN_IF_ERROR(builder.Finalize(&identity_def));
MergeDebugInfo(NodeDebugInfo(*node), &identity_def);
VLOG(6) << "Adding identity into " << edge->src()->name() << ":"
<< edge->src_output() << " -> " << edge->dst()->name() << ":"
<< input_idx << " \n"
<< identity_def.DebugString();
TF_ASSIGN_OR_RETURN(Node * identity_node, graph->AddNode(identity_def));
graph->AddEdge(edge->src(), edge->src_output(), identity_node, 0);
TF_RETURN_IF_ERROR(graph->UpdateEdge(identity_node, 0, node, input_idx));
VLOG(6) << "Successfully inserted identity. Modified node: \n"
<< node->DebugString();
return absl::OkStatus();
}
struct EdgePtrCompare {
bool operator()(const Edge* lhs, const Edge* rhs) const {
return lhs->id() < rhs->id();
}
};
Status AddOutputIdentities(Node* node, Graph* graph,
std::unordered_set<string>* node_names) {
auto add_identity = [&](int src_output, const string& identity_name,
Node** identity_node) {
NodeDefBuilder builder(identity_name, kIdentityOp);
builder.Attr("T", node->output_type(src_output));
NodeDefBuilder::NodeOut input(node->name(), src_output,
node->output_type(src_output));
builder.Input(input);
NodeDef identity_def;
TF_RETURN_IF_ERROR(builder.Finalize(&identity_def));
MergeDebugInfo(NodeDebugInfo(*node), &identity_def);
TF_ASSIGN_OR_RETURN(*identity_node, graph->AddNode(identity_def));
graph->AddEdge(node, src_output, *identity_node, 0);
return absl::OkStatus();
};
std::vector<bool> output_used(node->num_outputs(), false);
const EdgeSet& out_edges = node->out_edges();
std::vector<const Edge*> edge_vector(out_edges.begin(), out_edges.end());
std::sort(edge_vector.begin(), edge_vector.end(), EdgePtrCompare());
for (const Edge* edge : edge_vector) {
if (edge->IsControlEdge()) {
continue;
}
output_used[edge->src_output()] = true;
Node* dst = edge->dst();
int dst_input = edge->dst_input();
int src_output = edge->src_output();
string identity_name =
Uniquify(absl::StrCat(node->name(), "_", dst->name()), node_names);
Node* identity_node;
TF_RETURN_IF_ERROR(add_identity(src_output, identity_name, &identity_node));
VLOG(6) << "Adding identity into " << node->name() << ":" << src_output
<< " -> " << dst->name() << ":" << dst_input << " \n"
<< identity_node->DebugString();
TF_RETURN_IF_ERROR(graph->UpdateEdge(identity_node, 0, dst, dst_input));
}
for (int output_idx = 0; output_idx < node->num_outputs(); ++output_idx) {
if (output_used[output_idx]) {
continue;
}
string identity_name = Uniquify(node->name(), node_names);
Node* identity_node;
TF_RETURN_IF_ERROR(add_identity(output_idx, identity_name, &identity_node));
VLOG(6) << "Added identity into " << node->name() << ":" << output_idx
<< " -> <no consumer>: \n"
<< identity_node->DebugString();
}
return absl::OkStatus();
}
Status IsolateNode(Node* node, Graph* graph) {
std::unordered_set<string> node_names(graph->num_nodes());
for (Node* n : graph->nodes()) {
node_names.insert(n->name());
}
for (int i = 0; i < node->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(AddInputIdentity(node, i, graph, &node_names));
}
TF_RETURN_IF_ERROR(AddOutputIdentities(node, graph, &node_names));
return absl::OkStatus();
}
}
Status IsolatePlacerInspectionRequiredOps(
const FunctionLibraryDefinition& flib_def, Graph* graph) {
PlacerInspectionRequiredOpChecker checker(graph, &flib_def);
for (Node* node : graph->op_nodes()) {
bool should_be_isolated = false;
TF_RETURN_IF_ERROR(
checker.IsPlacerInspectionRequired(*node, &should_be_isolated));
if (!should_be_isolated) {
continue;
}
TF_RETURN_IF_ERROR(IsolateNode(node, graph));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/placer_inspection_required_ops_utils.h"
#include <map>
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
void VerifyPlacerInspectionRequiredOps(const GraphDef& graph_def,
std::map<string, bool> deep_nodes) {
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), graph_def.library());
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
PlacerInspectionRequiredOpChecker checker(&graph, &flib_def);
std::unordered_map<string, Node*> node_map = graph.BuildNodeNameIndex();
for (const auto& entry : deep_nodes) {
const Node* node = node_map[entry.first];
ASSERT_NE(node, nullptr) << "Failed to find node " << entry.first
<< " in the graph " << graph_def.DebugString();
const bool expected_is_deep = entry.second;
bool actual_is_deep;
TF_EXPECT_OK(checker.IsPlacerInspectionRequired(*node, &actual_is_deep));
EXPECT_EQ(expected_is_deep, actual_is_deep)
<< " Expected is_deep to be " << expected_is_deep << " for node "
<< entry.first;
}
}
TEST(PlacerInspectionRequiredOpCheckerTest, Basic) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", true}, {"y", false}});
}
TEST(PlacerInspectionRequiredOpCheckerTest, DirectCallsAreNotDeep) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "ResourceIdentity", {"x"}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", false}, {"y", false}});
}
TEST(PlacerInspectionRequiredOpCheckerTest,
FunctionsNotReturningResourcesAreNotDeep) {
FunctionDef func = test::function::ReadResourceVariable();
GraphDef graph_def = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("ReadResourceVariable", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_FLOAT}}),
},
{func});
VerifyPlacerInspectionRequiredOps(graph_def,
{{"x", false}, {"f", false}, {"y", false}});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/placer_inspection_required_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/placer_inspection_required_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8eb513de-403b-4f62-8099-cef65c43e9f9 | cpp | tensorflow/tensorflow | scoped_allocator_mgr | tensorflow/core/common_runtime/scoped_allocator_mgr.cc | tensorflow/core/common_runtime/scoped_allocator_mgr_test.cc | #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/framework/allocator.h"
namespace tensorflow {
Status ScopedAllocatorContainer::AddScopedAllocator(
const Tensor& backing_tensor, int32_t scope_id, const string& scope_name,
const absl::Span<const ScopedAllocator::Field>& fields,
int32_t expected_call_count) {
VLOG(1) << "AddScopedAllocator " << mgr_->device_name()
<< " step_id_=" << step_id_ << " scope_id=" << scope_id;
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
return errors::Internal("Cannot create ScopedAllocator because scope_id ",
scope_id, " for name ", scope_name,
" already exists");
}
for (auto& f : fields) {
if (allocators_.find(f.scope_id) != allocators_.end()) {
return errors::Internal(
"Cannot create ScopedAllocator because field scope_id ", f.scope_id,
" for name ", scope_name, " already exists");
}
}
VLOG(2) << " container " << this << " step_id " << step_id_;
ScopedAllocator* sa = new ScopedAllocator(
backing_tensor, scope_id, scope_name, fields, expected_call_count, this);
allocators_[scope_id] =
ScopedAllocatorContainer::SAField(ScopedAllocator::kBackingIndex, sa);
VLOG(2) << "#fields " << fields.size();
for (int i = 0; i < fields.size(); ++i) {
const ScopedAllocator::Field& f = fields[i];
VLOG(2) << "Adding instance with for " << mgr_->device_name()
<< " scope_id=" << f.scope_id;
allocators_[f.scope_id] = ScopedAllocatorContainer::SAField(
i, new ScopedAllocatorInstance(sa, i));
}
return absl::OkStatus();
}
ScopedAllocator* ScopedAllocatorContainer::GetAllocator(int32_t scope_id) {
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
CHECK_EQ(ScopedAllocator::kBackingIndex, it->second.field_index);
return it->second.scoped_allocator;
} else {
LOG(ERROR) << "Failed to find ScopedAllocator for " << scope_id
<< " in container for step " << step_id_ << " on "
<< mgr_->device_name();
return nullptr;
}
}
ScopedAllocatorInstance* ScopedAllocatorContainer::GetInstance(
int32_t scope_id) {
VLOG(2) << "GetInstance " << scope_id << " step " << step_id_ << " on "
<< mgr_->device_name();
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
return it->second.instance;
}
LOG(FATAL) << "Failed to find instance " << scope_id << " in container "
<< step_id_ << " on " << mgr_->device_name();
return nullptr;
}
void ScopedAllocatorContainer::Drop(int32_t scope_id, ScopedAllocator* sa) {
VLOG(2) << "Drop " << scope_id << " from container " << this << " step "
<< step_id_ << " on " << mgr_->device_name();
mutex_lock l(mu_);
auto it = allocators_.find(scope_id);
if (it != allocators_.end()) {
if (it->second.field_index != ScopedAllocator::kBackingIndex) {
it->second.instance->DropFromTable();
}
allocators_.erase(it);
}
}
ScopedAllocatorContainer::~ScopedAllocatorContainer() {
VLOG(2) << "~ScopedAllocatorContainer " << this << " step " << step_id_
<< " on " << mgr_->device_name();
mutex_lock l(mu_);
for (auto& it : allocators_) {
if (it.second.field_index == ScopedAllocator::kBackingIndex) {
delete it.second.scoped_allocator;
} else {
it.second.instance->DropFromTable();
}
}
}
ScopedAllocatorMgr::~ScopedAllocatorMgr() {
mutex_lock l(mu_);
for (auto it : per_step_map_) {
while (!it.second->Unref()) {
}
}
}
void ScopedAllocatorMgr::Cleanup(int64_t step_id) {
mutex_lock l(mu_);
auto it = per_step_map_.find(step_id);
if (it != per_step_map_.end()) {
it->second->Unref();
per_step_map_.erase(it);
}
}
ScopedAllocatorContainer* ScopedAllocatorMgr::GetContainer(int64_t step_id) {
VLOG(2) << "GetContainer " << step_id << " on " << device_name();
ScopedAllocatorContainer* sac = nullptr;
mutex_lock l(mu_);
auto it = per_step_map_.find(step_id);
if (it == per_step_map_.end()) {
sac = new ScopedAllocatorContainer(this, step_id);
per_step_map_[step_id] = sac;
} else {
sac = it->second;
}
return sac;
}
Status ScopedAllocatorMgr::AddScopedAllocator(
const Tensor& backing_tensor, int64_t step_id, int32_t scope_id,
const string& scope_name,
const absl::Span<const ScopedAllocator::Field>& fields,
int32_t expected_call_count) {
ScopedAllocatorContainer* sac = GetContainer(step_id);
return sac->AddScopedAllocator(backing_tensor, scope_id, scope_name, fields,
expected_call_count);
}
size_t ScopedAllocatorMgr::PopulateFields(
int32_t scope_id, const absl::Span<const TensorShape>& shapes,
const DataType dtype, std::vector<ScopedAllocator::Field>* fields) {
const int32_t num_fields = static_cast<int32>(shapes.size());
fields->resize(num_fields);
size_t offset = 0;
for (int32_t i = 0; i < num_fields; ++i) {
size_t bytes_requested = shapes[i].num_elements() * DataTypeSize(dtype);
auto* field = &((*fields)[i]);
field->scope_id = scope_id + 1 + i;
field->bytes_requested = bytes_requested;
field->offset = offset;
offset += bytes_requested;
size_t bytes_allocated = bytes_requested;
size_t overshoot = offset % Allocator::kAllocatorAlignment;
if (overshoot > 0) {
size_t alignment_bytes = Allocator::kAllocatorAlignment - overshoot;
bytes_allocated += alignment_bytes;
offset += alignment_bytes;
}
field->bytes_allocated = bytes_allocated;
VLOG(1) << "field=" << i << " scope_id=" << field->scope_id
<< " bytes_requested=" << field->bytes_requested
<< " offset=" << field->offset
<< " bytes_allocated=" << field->bytes_allocated;
}
return offset;
}
} | #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class ScopedAllocatorMgrTest : public ::testing::Test {
public:
ScopedAllocatorMgrTest() : sam_("CPU0") {}
void InitTensor() {
backing_tensor_ = Tensor(cpu_allocator(), DT_FLOAT, backing_tensor_shape_);
}
void PopulateFields() {
ScopedAllocatorMgr::PopulateFields(scope_id_, fields_shapes_, DT_FLOAT,
&fields_);
}
Status AddScopedAllocator(int expected_use_count, int scope_id) {
VLOG(2) << "Adding ScopedAllocator step_id " << step_id_ << " scope_id "
<< scope_id_ << " #fields " << fields_.size()
<< " expected_use_count " << expected_use_count;
return sam_.AddScopedAllocator(backing_tensor_, step_id_, scope_id,
"tensor_shape_599", fields_,
expected_use_count);
}
Status PrepScopedAllocatorMgr(int expected_use_count) {
InitTensor();
PopulateFields();
return AddScopedAllocator(expected_use_count, scope_id_);
}
void SaveInstances(int num_instances) {
sa_instances_.clear();
sa_instances_.resize(num_instances);
ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
for (int i = 0; i < num_instances; i++) {
sa_instances_[i] = sac->GetInstance(scope_id_ + 1 + i);
}
}
int AlignmentPadding() {
int alignment_padding =
(Allocator::kAllocatorAlignment -
(521 * sizeof(float)) % Allocator::kAllocatorAlignment) %
Allocator::kAllocatorAlignment;
return alignment_padding;
}
void PrintShapes() {
VLOG(2) << "tensor_shape=" << backing_tensor_shape_.DebugString();
for (int i = 0; i < fields_shapes_.size(); i++) {
VLOG(2) << "fields_shapes[" << i
<< "]=" << fields_shapes_[i].DebugString();
}
}
protected:
TensorShape backing_tensor_shape_;
Tensor backing_tensor_;
std::vector<TensorShape> fields_shapes_;
std::vector<ScopedAllocator::Field> fields_;
ScopedAllocatorMgr sam_;
const int step_id_ = 101;
const int scope_id_ = 599;
std::vector<ScopedAllocatorInstance*> sa_instances_;
};
TEST_F(ScopedAllocatorMgrTest, ContainerAllocation) {
ScopedAllocatorContainer* sac_101 = sam_.GetContainer(101);
EXPECT_TRUE(sac_101 != nullptr);
ScopedAllocatorContainer* sac_201 = sam_.GetContainer(201);
EXPECT_TRUE(sac_201 != nullptr);
EXPECT_NE(sac_101, sac_201);
ScopedAllocatorContainer* also_sac_101 = sam_.GetContainer(101);
EXPECT_EQ(sac_101, also_sac_101);
sam_.Cleanup(101);
}
TEST_F(ScopedAllocatorMgrTest, PopulateFields) {
backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
InitTensor();
PopulateFields();
EXPECT_EQ(0, fields_[0].offset);
EXPECT_EQ(512 * sizeof(float), fields_[0].bytes_requested);
EXPECT_EQ(scope_id_ + 1, fields_[0].scope_id);
EXPECT_EQ(512 * sizeof(float), fields_[1].offset);
EXPECT_EQ(9 * sizeof(float), fields_[1].bytes_requested);
EXPECT_EQ(scope_id_ + 2, fields_[1].scope_id);
EXPECT_EQ(521 * sizeof(float) + AlignmentPadding(), fields_[2].offset);
EXPECT_EQ(512 * sizeof(float), fields_[2].bytes_requested);
EXPECT_EQ(scope_id_ + 3, fields_[2].scope_id);
}
TEST_F(ScopedAllocatorMgrTest, ContainerAddAllocator) {
backing_tensor_shape_ = TensorShape({1024});
fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
Status s = PrepScopedAllocatorMgr(2);
EXPECT_TRUE(s.ok());
SaveInstances(fields_shapes_.size());
s = AddScopedAllocator(2, scope_id_);
EXPECT_FALSE(s.ok());
fields_[0].scope_id = scope_id_ + 1;
s = AddScopedAllocator(2, scope_id_ + 3);
EXPECT_FALSE(s.ok());
void* ptr0 =
sa_instances_[0]->AllocateRaw(0 , 512 * sizeof(float));
void* ptr1 =
sa_instances_[1]->AllocateRaw(0 , 512 * sizeof(float));
sa_instances_[0]->DeallocateRaw(ptr0);
sa_instances_[1]->DeallocateRaw(ptr1);
}
TEST_F(ScopedAllocatorMgrTest, AllocatorSuccess) {
ScopedAllocatorContainer* sac = sam_.GetContainer(step_id_);
ScopedAllocator* other = sac->GetAllocator(scope_id_);
EXPECT_EQ(other, nullptr);
backing_tensor_shape_ = TensorShape({512 + 9 + 512 + 16});
fields_shapes_ = std::vector<TensorShape>({{512}, {3, 3}, {2, 256}});
Status s = PrepScopedAllocatorMgr(3);
other = sac->GetAllocator(scope_id_);
ScopedAllocatorInstance* inst0 = sac->GetInstance(scope_id_ + 1);
char* ptr0 = static_cast<char*>(inst0->AllocateRaw(0, 512 * sizeof(float)));
const char* base =
static_cast<const char*>(DMAHelper::base(&backing_tensor_));
EXPECT_EQ(ptr0, base);
ScopedAllocatorInstance* inst1 = sac->GetInstance(scope_id_ + 2);
char* ptr1 = static_cast<char*>(inst1->AllocateRaw(0, 9 * sizeof(float)));
EXPECT_EQ(ptr1, ptr0 + (512 * sizeof(float)));
ScopedAllocatorInstance* inst2 = sac->GetInstance(scope_id_ + 3);
char* ptr2 = static_cast<char*>(inst2->AllocateRaw(0, 512 * sizeof(float)));
EXPECT_EQ(ptr2, ptr1 + AlignmentPadding() + (9 * sizeof(float)));
EXPECT_EQ(nullptr, sac->GetAllocator(scope_id_));
inst0->DeallocateRaw(ptr0);
inst1->DeallocateRaw(ptr1);
inst2->DeallocateRaw(ptr2);
}
TEST_F(ScopedAllocatorMgrTest, AllocatorInitFail) {
backing_tensor_shape_ = TensorShape({8});
InitTensor();
fields_.resize(1);
fields_[0].scope_id = scope_id_ + 1;
fields_[0].offset = 0;
fields_[0].bytes_requested =
backing_tensor_shape_.num_elements() * 2 * sizeof(float);
EXPECT_DEATH(Status s = AddScopedAllocator(1, scope_id_), "");
}
TEST_F(ScopedAllocatorMgrTest, AllocatorFail) {
backing_tensor_shape_ = TensorShape({1024});
fields_shapes_ = std::vector<TensorShape>({{512}, {512}});
Status s = PrepScopedAllocatorMgr(2);
EXPECT_TRUE(s.ok());
SaveInstances(fields_shapes_.size());
char* ptr0 =
static_cast<char*>(sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
VLOG(2) << "Should fail because we deallocate ptr="
<< static_cast<void*>(ptr0 + 8) << " which we never allocated.";
EXPECT_DEATH(sa_instances_[0]->DeallocateRaw(ptr0 + 8), "");
VLOG(2) << "Should fail because we allocate smaller than the size of the "
<< "field.";
EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 256 * sizeof(float)));
VLOG(2) << "Should fail because we allocate larger than the size of the "
<< "field.";
EXPECT_EQ(nullptr, sa_instances_[1]->AllocateRaw(0, 1024 * sizeof(float)));
void* ptr1 = sa_instances_[1]->AllocateRaw(0, 512 * sizeof(float));
VLOG(2) << "Should fail because we exceed expected_use_count.";
EXPECT_EQ(nullptr, sa_instances_[0]->AllocateRaw(0, 512 * sizeof(float)));
sa_instances_[0]->DeallocateRaw(ptr0);
sa_instances_[1]->DeallocateRaw(ptr1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/scoped_allocator_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/scoped_allocator_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3e707eb-aa99-42f0-8beb-c5644b98862f | cpp | tensorflow/tensorflow | device_propagation | tensorflow/core/common_runtime/device_propagation.cc | tensorflow/core/common_runtime/device_propagation_test.cc | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
namespace {
const std::string& AssignedOrRequestedDevice(const Node& node) {
if (!node.assigned_device_name().empty()) {
return node.assigned_device_name();
}
return node.requested_device();
}
bool UpdateDeviceFromInputs(
const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter, Node* node) {
if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) {
return false;
}
string proposed_device = "";
Node* proposed_src = nullptr;
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
Node* src = e->src();
const string& src_device = AssignedOrRequestedDevice(*src);
if ((node->IsSwitch() && src->IsLoopCond()) ||
(node->IsMerge() && src->IsEnter())) {
continue;
}
if (!device_filter(src_device)) return false;
if (proposed_src == nullptr) {
proposed_device = src_device;
proposed_src = src;
} else if (proposed_device != src_device) {
return false;
}
}
if (proposed_src) {
node->set_assigned_device_name(proposed_src->assigned_device_name());
node->set_requested_device(proposed_src->requested_device());
return true;
} else {
return false;
}
}
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph) {
bool nodes_changed = true;
while (nodes_changed) {
nodes_changed = false;
BreadthFirstTraversal(
*graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) {
nodes_changed |=
UpdateDeviceFromInputs(node_filter, device_filter, node);
});
}
}
} | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
using ::testing::UnorderedElementsAreArray;
namespace tensorflow {
namespace {
const char kTpu0[] = "/job:localhost/replica:0/task:0/device:TPU:0";
const char kTpu1[] = "/job:localhost/replica:0/task:0/device:TPU:1";
const char kTpu2[] = "/job:localhost/replica:0/task:0/device:TPU:2";
const char kGpu0[] = "/job:localhost/replica:0/task:0/device:GPU:0";
bool IsTPUDevice(StringPiece device_name) {
return absl::StrContains(device_name, "device:TPU:");
}
device_propagation::NodeFilter TargetOps(
const absl::flat_hash_set<std::string>& ops) {
return [&ops](const Node& n) { return ops.contains(n.type_string()); };
}
absl::flat_hash_map<std::string, std::string> GetNodeNameDevices(
const Graph& graph) {
absl::flat_hash_map<std::string, std::string> node_name_devices;
for (const Node* node : graph.nodes()) {
if (node->IsSource() || node->IsSink()) {
continue;
}
const string& device = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
node_name_devices[node->name()] = device;
}
return node_name_devices;
}
TEST(DevicePropagationTest, PropagateTPUDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu1);
auto c = ops::Identity(scope.WithOpName("C"), a);
auto d =
ops::Merge(scope.WithOpName("D"), std::initializer_list<Input>{a, c});
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{b, c});
auto f = ops::Identity(scope.WithOpName("F"), a);
f.node()->set_assigned_device_name(kTpu2);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity", "Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", kTpu1},
{"C", kTpu0},
{"D", kTpu0},
{"E", ""},
{"F", kTpu2},
}));
}
TEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kGpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kGpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_BOOL);
auto b = ops::LoopCond(scope.WithOpName("B"), a);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_FLOAT);
c.node()->set_assigned_device_name(kTpu2);
auto d = ops::Switch(scope.WithOpName("D"), c, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Switch", "LoopCond"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", ""},
{"C", kTpu2},
{"D", kTpu2},
}));
}
TEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu2);
auto c = ops::internal::Enter(scope.WithOpName("C"), a, "Enter");
auto d = ops::NextIteration(scope.WithOpName("D"), b);
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{c, d});
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Enter", "Merge", "NextIteration"}), IsTPUDevice,
&graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", kTpu2},
{"C", ""},
{"D", kTpu2},
{"E", kTpu2},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f4efb58-88af-470d-a5f6-790410917c2f | cpp | tensorflow/tensorflow | buf_rendezvous | tensorflow/core/common_runtime/buf_rendezvous.cc | tensorflow/core/common_runtime/buf_rendezvous_test.cc | #include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
namespace tensorflow {
namespace {
void DeregisterCancellation(BufRendezvous::Hook* h) {
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->DeregisterCallback(h->cancellation_token);
h->cancellation_manager = nullptr;
h->cancellation_token = CancellationManager::kInvalidToken;
}
}
}
BufRendezvous::~BufRendezvous() {
mutex_lock l(mu_);
if (!hook_table_.empty()) {
PurgeTable(errors::Internal("Delete called on non-empty BufRendezvous"),
&hook_table_);
}
}
void BufRendezvous::StartAbort(const Status& s) {
CHECK(!s.ok());
HookTable dummy_table;
{
mutex_lock l(mu_);
status_.Update(StatusGroup::MakeDerived(s));
hook_table_.swap(dummy_table);
}
PurgeTable(s, &dummy_table);
}
void BufRendezvous::PurgeTable(const Status& s, HookTable* table) {
for (auto& it : *table) {
Hook* h = it.second;
if (h->cancellation_manager != nullptr) {
h->cancellation_manager->TryDeregisterCallback(h->cancellation_token);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
delete h;
}
table->clear();
}
string BufRendezvous::Hook::DebugString() const {
return absl::StrCat(
"[dev:", (prod_dev ? prod_dev->name() : "none"),
", ctx:", reinterpret_cast<uint64>(prod_ctx),
", val:", reinterpret_cast<uint64>(prod_value),
", pcb:", prod_cb ? reinterpret_cast<uint64>(&prod_cb) : 0,
", ccb:", cons_cb ? reinterpret_cast<uint64>(&cons_cb) : 0, "]");
}
void BufRendezvous::ProvideBuf(const string& key, Device* dev,
DeviceContext* dev_ctx, const Tensor* v,
const AllocatorAttributes& attr,
const ProducerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ProvideBuf: key = " << key;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Hook* h = nullptr;
Status providebuf_status;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
providebuf_status = status_;
break;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
auto it = hook_table_.find(key);
if (it == hook_table_.end()) {
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
}
h = new Hook(cancellation_manager, cancellation_token);
it = hook_table_.insert(std::make_pair(key, h)).first;
} else {
if (it->second->prod_cb != nullptr) {
providebuf_status = errors::Internal(
"BufRendezvous::ProvideBuf already called for key ", key);
break;
}
h = it->second;
}
h->prod_dev = dev;
h->prod_ctx = dev_ctx;
h->prod_value = v;
h->prod_attr = attr;
h->prod_cb = done;
if (h->cons_cb != nullptr) {
hook_table_.erase(it);
} else {
if (cancellation_manager != nullptr &&
!cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); })) {
providebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
hook_table_.erase(it);
delete h;
}
h = nullptr;
}
}
} while (false);
if (h) {
DVLOG(4) << "ProvideBuf: key = " << key << ": calling cons_cb"
<< h->DebugString();
DeregisterCancellation(h);
h->cons_cb(absl::OkStatus(), h);
}
if (!providebuf_status.ok()) {
done(providebuf_status);
}
}
void BufRendezvous::ConsumeBuf(const string& key, const string& device_name,
const uint64 device_incarnation,
const ConsumerCallback& done,
CancellationManager* cancellation_manager) {
DVLOG(4) << "ConsumeBuf: key = " << key << " device_name = " << device_name;
#ifndef NDEBUG
if (VLOG_IS_ON(4)) {
LogContents();
}
#endif
Device* device;
Status consumebuf_status = dev_mgr_->LookupDevice(device_name, &device);
if (consumebuf_status.ok() &&
device->attributes().incarnation() != device_incarnation) {
consumebuf_status = errors::FailedPrecondition(
"RecvBuf expects a different device incarnation: ", device_incarnation,
" vs. ", device->attributes().incarnation(),
". Your worker job that contains the device (\"", device_name,
"\") was probably restarted. Check your "
"worker job for the reason why it was restarted.");
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
Hook* existing_hook = nullptr;
do {
mutex_lock l(mu_);
if (!status_.ok()) {
consumebuf_status = status_;
break;
}
auto it = hook_table_.find(key);
if (it != hook_table_.end()) {
if (it->second->cons_cb) {
consumebuf_status =
errors::Internal("Second consumer arrived for key ", key);
break;
}
existing_hook = it->second;
hook_table_.erase(it);
existing_hook->cons_cb = done;
} else {
CancellationToken cancellation_token = CancellationManager::kInvalidToken;
bool already_cancelled = false;
if (cancellation_manager != nullptr) {
cancellation_token = cancellation_manager->get_cancellation_token();
already_cancelled = !cancellation_manager->RegisterCallback(
cancellation_token, [this, key]() { CancelHook(key); });
}
if (already_cancelled) {
consumebuf_status = errors::Cancelled(
"Operation was cancelled for BufRendezvous key ", key);
} else {
Hook* h = new Hook(cancellation_manager, cancellation_token);
h->cons_cb = done;
it = hook_table_.insert(std::make_pair(key, h)).first;
return;
}
}
} while (false);
if (existing_hook) {
DVLOG(4) << "ConsumeBuf: key = " << key << ": calling cons_cb"
<< existing_hook->DebugString();
DeregisterCancellation(existing_hook);
existing_hook->cons_cb(absl::OkStatus(), existing_hook);
return;
}
if (!consumebuf_status.ok()) {
done(consumebuf_status, nullptr);
return;
}
}
void BufRendezvous::CancelHook(const string& key) {
Hook* h = nullptr;
{
mutex_lock l(mu_);
auto it = hook_table_.find(key);
if (it == hook_table_.end()) return;
h = it->second;
hook_table_.erase(it);
}
if (h != nullptr) {
auto s = errors::Cancelled("Operation was cancelled for BufRendezvous key ",
key);
if (h->prod_cb != nullptr) {
h->prod_cb(s);
}
if (h->cons_cb != nullptr) {
h->cons_cb(s, nullptr);
}
delete h;
}
}
void BufRendezvous::DoneWithHook(Hook* h) {
h->prod_cb(absl::OkStatus());
delete h;
}
void BufRendezvous::LogContents() {
mutex_lock l(mu_);
LOG(INFO) << strings::StrCat("BufRendezvous ",
strings::Hex(reinterpret_cast<uint64>(this)),
" step_id=", step_id_, " current contents:");
for (const auto& it : hook_table_) {
LOG(INFO) << it.first << ":" << it.second->DebugString();
}
}
} | #include "tensorflow/core/common_runtime/buf_rendezvous.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class BufRendezvousTest : public ::testing::Test {
protected:
static std::unique_ptr<Device> NewDevice(const string& name,
const string& type,
const uint64 incarnation) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attrs)
: Device(nullptr, attrs) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attrs;
attrs.set_name(name);
attrs.set_device_type(type);
attrs.set_incarnation(incarnation);
return std::make_unique<FakeDevice>(attrs);
}
void InitializeDevice(const string& device, const string& type,
const uint64 incarnation) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(NewDevice(device, type, incarnation));
dev_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
br_ = std::make_unique<BufRendezvous>(123, dev_mgr_.get());
}
BufRendezvousTest()
: a_(Tensor(DT_FLOAT, TensorShape({24}))),
b_(Tensor(DT_FLOAT, TensorShape({24}))),
fake_device_context_(reinterpret_cast<DeviceContext*>(1024LLU)) {
InitializeDevice(*kDefaultDeviceName, "CPU", kDefaultIncarnation);
TF_CHECK_OK(dev_mgr_->LookupDevice(*kDefaultDeviceName, &default_device_));
}
Tensor a_;
Tensor b_;
AllocatorAttributes aa_;
Device* default_device_;
DeviceContext* fake_device_context_;
std::unique_ptr<DeviceMgr> dev_mgr_;
std::unique_ptr<BufRendezvous> br_;
CancellationManager cm_;
static const string* const kDefaultKey;
static const string* const kDefaultDeviceName;
static const uint64 kDefaultIncarnation;
};
const string* const BufRendezvousTest::kDefaultKey = new string("key0");
const string* const BufRendezvousTest::kDefaultDeviceName =
new string("/device:CPU:0");
const uint64 BufRendezvousTest::kDefaultIncarnation = 12345;
TEST_F(BufRendezvousTest, CorrectUseProducerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CorrectUseConsumerFirst) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
EXPECT_FALSE(cons_callback_called);
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_TRUE(cons_callback_called);
note.WaitForNotification();
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, ErrorDuplicatePut) {
bool prod_callback_called = false;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_callback_called](const Status& s) { prod_callback_called = true; },
&cm_);
Status bad_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&bad_status, ¬e](const Status& s) {
bad_status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_FALSE(bad_status.ok());
EXPECT_EQ(absl::StrCat("BufRendezvous::ProvideBuf already called for key ",
*kDefaultKey),
bad_status.message());
EXPECT_FALSE(prod_callback_called);
br_.reset();
}
TEST_F(BufRendezvousTest, ErrorDeleteNonEmpty) {
Status cons_status;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_EQ(h, nullptr);
},
&cm_);
EXPECT_TRUE(cons_status.ok());
br_.reset();
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ("Delete called on non-empty BufRendezvous", cons_status.message());
}
TEST_F(BufRendezvousTest, AbortNonEmpty) {
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
br_->StartAbort(errors::Internal("Falling sky detected"));
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_EQ(prod_status.message(), "Falling sky detected");
EXPECT_FALSE(cons_status.ok());
EXPECT_EQ(cons_status.message(), "Falling sky detected");
}
TEST_F(BufRendezvousTest, AbortEmpty) {
br_->StartAbort(errors::Internal("Falling sky detected"));
}
TEST_F(BufRendezvousTest, UseAfterAbort) {
br_->StartAbort(errors::Internal("Falling sky detected"));
Status cons_status;
Status prod_status;
Notification prod_note;
Notification cons_note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_note, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
cons_note.Notify();
},
&cm_);
br_->ProvideBuf(
"key1", default_device_, fake_device_context_, &a_, aa_,
[&prod_note, &prod_status](const Status& s) {
prod_status = s;
prod_note.Notify();
},
&cm_);
prod_note.WaitForNotification();
cons_note.WaitForNotification();
EXPECT_FALSE(prod_status.ok());
EXPECT_NE(prod_status.message().find("Falling sky detected"), string::npos);
EXPECT_FALSE(cons_status.ok());
EXPECT_NE(cons_status.message().find("Falling sky detected"), string::npos);
}
TEST_F(BufRendezvousTest, DeviceIncarnationMismatch) {
Status cons_status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[](const Status&) {}, nullptr);
const uint64 incorrect_incarnation = 23456;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, incorrect_incarnation,
[¬e, &cons_status](const Status& s, BufRendezvous::Hook* h) {
cons_status = s;
note.Notify();
},
nullptr);
note.WaitForNotification();
EXPECT_TRUE(errors::IsFailedPrecondition(cons_status));
}
TEST_F(BufRendezvousTest, ProvideThenCancel) {
Status status;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenProvide) {
Status status;
Notification note;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&status, ¬e](const Status& s) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ConsumeThenCancel) {
Status status;
Notification note;
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
cm_.StartCancel();
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, CancelThenConsume) {
Status status;
Notification note;
cm_.StartCancel();
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&status, ¬e](const Status& s, BufRendezvous::Hook* h) {
status = s;
note.Notify();
},
&cm_);
note.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(status));
EXPECT_NE(
status.message().find(absl::StrCat(
"Operation was cancelled for BufRendezvous key ", *kDefaultKey)),
string::npos);
}
TEST_F(BufRendezvousTest, ProvideConsumeThenCancel) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
Notification note;
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[¬e, &prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
prod_callback_called = true;
note.Notify();
},
&cm_);
EXPECT_FALSE(prod_callback_called);
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[this, &cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
cons_callback_called = true;
ASSERT_TRUE(h != nullptr);
EXPECT_EQ(h->prod_dev, default_device_);
EXPECT_EQ(h->prod_ctx, fake_device_context_);
EXPECT_EQ(h->prod_value, &a_);
br_->DoneWithHook(h);
},
&cm_);
note.WaitForNotification();
cm_.StartCancel();
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(prod_callback_called);
TF_EXPECT_OK(cons_status);
TF_EXPECT_OK(prod_status);
}
TEST_F(BufRendezvousTest, CancelThenProvideConsume) {
Status prod_status;
Status cons_status;
bool prod_callback_called = false;
bool cons_callback_called = false;
cm_.StartCancel();
br_->ProvideBuf(
*kDefaultKey, default_device_, fake_device_context_, &a_, aa_,
[&prod_status, &prod_callback_called](const Status& s) {
prod_status = s;
EXPECT_TRUE(errors::IsCancelled(prod_status));
prod_callback_called = true;
},
&cm_);
EXPECT_TRUE(prod_callback_called);
EXPECT_TRUE(errors::IsCancelled(prod_status));
br_->ConsumeBuf(
*kDefaultKey, *kDefaultDeviceName, kDefaultIncarnation,
[&cons_status, &cons_callback_called](const Status& s,
BufRendezvous::Hook* h) {
cons_status = s;
EXPECT_TRUE(errors::IsCancelled(cons_status));
cons_callback_called = true;
},
&cm_);
EXPECT_TRUE(cons_callback_called);
EXPECT_TRUE(errors::IsCancelled(cons_status));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/buf_rendezvous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/buf_rendezvous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d7045fb-b6b0-4986-a243-6e549d212000 | cpp | tensorflow/tensorflow | lower_if_op | tensorflow/core/common_runtime/lower_if_op.cc | tensorflow/core/common_runtime/lower_if_op_test.cc | #include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class CondBuilder {
public:
enum Branch { kElseBranch = 0, kThenBranch = 1 };
CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph);
Status CreatePivotNodes();
Status AddInputs();
Status AddOutputs();
Status BuildLoweredIfOutput();
private:
string NewName(const string& infix);
Status AddInput(Node* src, int src_output);
Status SetColocationAndFinalize(NodeBuilder node_builder, Graph* graph,
Node** created_node);
std::vector<NodeOut> outputs_;
Node* control_predecessor_;
Node* if_op_;
const AttrValue* coloc_attr_;
Node* lowered_if_output_;
OutputTensor pred_;
Node* pivot_f_;
Node* pivot_t_;
Node* then_call_node_;
Node* else_call_node_;
Node* branch_executed_node_;
Graph* graph_;
string name_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
NodeBuilder then_call_builder_;
NodeBuilder else_call_builder_;
};
CondBuilder::CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph)
: if_op_(if_op),
coloc_attr_(if_op_->attrs().Find(kColocationAttrName)),
graph_(graph),
name_(if_op->name()),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*if_op_),
then_call_builder_(NewName("then"), then_fn.name(), graph->op_registry(),
&debug_info_),
else_call_builder_(NewName("else"), else_fn.name(), graph->op_registry(),
&debug_info_) {
TF_CHECK_OK(if_op_->input_tensor(0, &pred_));
then_call_builder_.Device(if_op_->requested_device());
then_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : then_fn.attr()) {
then_call_builder_.Attr(i.first, i.second);
}
else_call_builder_.Device(if_op_->requested_device());
else_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : else_fn.attr()) {
else_call_builder_.Attr(i.first, i.second);
}
}
Status CondBuilder::SetColocationAndFinalize(NodeBuilder node_builder,
Graph* graph,
Node** created_node) {
if (coloc_attr_ != nullptr) {
node_builder = node_builder.Attr(kColocationAttrName, *coloc_attr_);
}
return node_builder.Finalize(graph, created_node);
}
Status CondBuilder::CreatePivotNodes() {
Node* switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("switch_pred"), "Switch",
graph_->op_registry(), &debug_info_)
.Input(NodeOut(pred_))
.Input(NodeOut(pred_))
.Device(if_op_->requested_device()),
graph_, &switch_pred));
control_predecessor_ = switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_f"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kElseBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_f_));
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_t"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kThenBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_t_));
return absl::OkStatus();
}
string CondBuilder::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
Status CondBuilder::AddInput(Node* src, int src_output) {
Node* input;
NodeDebugInfo debug_info(*src);
TF_RETURN_IF_ERROR(
NodeBuilder(NewName(src->name()), "Switch", graph_->op_registry(),
&debug_info)
.Input(src, src_output)
.Input(pred_)
.Device(src->requested_device())
.Attr(kColocationAttrName,
{absl::StrCat(kColocationGroupPrefix, src->name())})
.Finalize(graph_, &input));
then_call_builder_.Input(input, kThenBranch);
else_call_builder_.Input(input, kElseBranch);
return absl::OkStatus();
}
Status CondBuilder::AddInputs() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(if_op_->input_edges(&edges));
for (int i = 1; i < edges.size(); ++i) {
const Edge* e = edges[i];
TF_RETURN_IF_ERROR(AddInput(e->src(), e->src_output()));
}
for (const Edge* e : if_op_->in_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(e->src(), control_predecessor_);
}
}
return absl::OkStatus();
}
Status CondBuilder::AddOutputs() {
TF_RETURN_IF_ERROR(then_call_builder_.Finalize(graph_, &then_call_node_));
graph_->AddControlEdge(pivot_t_, then_call_node_);
TF_RETURN_IF_ERROR(else_call_builder_.Finalize(graph_, &else_call_node_));
graph_->AddControlEdge(pivot_f_, else_call_node_);
std::vector<Node*> merges(then_call_node_->num_outputs());
outputs_.resize(merges.size());
for (int i = 0; i < then_call_node_->num_outputs(); ++i) {
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("output"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({NodeOut(then_call_node_, i), NodeOut(else_call_node_, i)})
.Device(if_op_->requested_device()),
graph_, &merges[i]));
outputs_[i] = NodeOut(merges[i], 0);
}
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("branch_executed"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({pivot_t_, pivot_f_})
.ControlInputs({then_call_node_, else_call_node_})
.Device(if_op_->requested_device()),
graph_, &branch_executed_node_));
TF_RETURN_IF_ERROR(BuildLoweredIfOutput());
for (const Edge* e : if_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(branch_executed_node_, e->dst());
} else {
graph_->AddEdge(merges[e->src_output()], 0, e->dst(), e->dst_input());
}
}
return absl::OkStatus();
}
Status CondBuilder::BuildLoweredIfOutput() {
NodeBuilder builder = keep_node_fetchable_ && !outputs_.empty()
? NodeBuilder(name_, "IdentityN").Input(outputs_)
: NodeBuilder(name_, "NoOp");
return builder.Device(if_op_->requested_device())
.ControlInput(branch_executed_node_)
.Finalize(graph_, &lowered_if_output_);
}
}
Status RewriteIfNode(Node* n, Graph* g, bool keep_node_fetchable) {
VLOG(2) << "Lower If node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* then_attr = n->attrs().Find("then_branch");
if (then_attr == nullptr) {
return errors::InvalidArgument("Then branch function missing");
}
const AttrValue* else_attr = n->attrs().Find("else_branch");
if (else_attr == nullptr) {
return errors::InvalidArgument("Else branch function missing");
}
CondBuilder cb(n, then_attr->func(), else_attr->func(), keep_node_fetchable,
g);
TF_RETURN_IF_ERROR(cb.CreatePivotNodes());
TF_RETURN_IF_ERROR(cb.AddInputs());
TF_RETURN_IF_ERROR(cb.AddOutputs());
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerIfOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", FuncAttr("XTimesTwo"))
.Attr("else_branch", FuncAttr("XTimesFour"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
ASSERT_NE(op->type_string(), "If");
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
TEST(LowerIfOpTest, BranchFunctionsWithoutOutputs) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
const auto assign_add = [](const string& fn_name, int v) {
const Tensor tensor = test::AsScalar<int32>(v);
return FDH::Create(
fn_name, {"v: resource"}, {}, {},
{
{{"c"}, "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}},
{{"upd"},
"AssignAddVariableOp",
{"v", "c:output"},
{{"dtype", DT_INT32}}},
},
{},
{{"side_effects", "upd"}});
};
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = assign_add("AddOne", 1);
*(f_lib_proto.add_function()) = assign_add("AddTwo", 2);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto initial_val = ops::Placeholder(root.WithOpName("initial_val"), DT_INT32);
auto var = ops::VarHandleOp(root.WithOpName("var"), DT_INT32, {});
auto init = ops::AssignVariableOp(root.WithOpName("init"), var, initial_val);
Node* if_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(var.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.ControlInput(init.operation.node())
.Attr("then_branch", FuncAttr("AddOne"))
.Attr("else_branch", FuncAttr("AddTwo"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", DataTypeSlice{})
.Finalize(root.graph(), &if_node));
auto read = ops::ReadVariableOp(
root.WithOpName("read").WithControlDependencies(Output(if_node)), var,
DT_INT32);
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) ++switch_count;
if (op->IsMerge()) ++merge_count;
if (op->name() == "if") ++node_called_if_count;
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 11);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
TEST(LowerIfOpTest, DoNotInlineLoweredFunction) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef x_times_four = test::function::XTimesFour();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*x_times_four.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = x_times_two;
*(f_lib_proto.add_function()) = x_times_four;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue tb;
tb.mutable_func()->set_name("XTimesTwo");
AttrValue eb;
eb.mutable_func()->set_name("XTimesFour");
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", tb)
.Attr("else_branch", eb)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int x_times_four_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == x_times_four.signature().name()) {
x_times_four_count++;
}
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(x_times_four_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_if_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_if_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc2c582a-cd25-44b2-88af-7fa487bdb013 | cpp | tensorflow/tensorflow | ring_reducer | tensorflow/core/common_runtime/ring_reducer.cc | tensorflow/core/common_runtime/ring_reducer_test.cc | #include "tensorflow/core/common_runtime/ring_reducer.h"
#include <stdlib.h>
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
RingReducer::~RingReducer() { group_size_tensor_ready_.WaitForNotification(); }
Status RingReducer::InitializeCollectiveParams(CollectiveParams* col_params) {
CHECK_EQ(col_params->instance.type, REDUCTION_COLLECTIVE);
CHECK_EQ(col_params->instance.impl_details.collective_name, "RingReduce");
return RingAlg::InitializeCollectiveParams(col_params);
}
void RingReducer::Run(StatusCallback done) {
CHECK(col_ctx_);
CHECK(col_params_);
col_ctx_->col_exec->UnblockDependencies(*col_params_);
done_ = std::move(done);
group_size_ = col_params_->group.group_size;
num_subdivs_ = static_cast<int>(
col_params_->instance.impl_details.subdiv_permutations.size());
CHECK_GT(num_subdivs_, 0);
if (VLOG_IS_ON(1)) {
string buf;
for (int r = 0; r < col_params_->group.members.size(); ++r) {
strings::StrAppend(&buf, "dev ", r, " : ",
col_params_->group.members[r].device.name(), "\n");
}
for (int sd = 0;
sd < col_params_->instance.impl_details.subdiv_permutations.size();
++sd) {
strings::StrAppend(&buf, "\nsubdiv ", sd, " perm: ");
for (auto x :
col_params_->instance.impl_details.subdiv_permutations[sd]) {
strings::StrAppend(&buf, x, ", ");
}
}
VLOG(1) << "RingReducer::Run for device " << col_ctx_->device_name
<< " default_rank " << col_params_->default_rank << "\n"
<< buf;
}
if ((col_ctx_->input != col_ctx_->output) &&
(DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) {
Notification note;
Status status;
tsl::profiler::TraceMe activity("MemCpyAsync",
tsl::profiler::TraceMeLevel::kInfo);
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), col_ctx_->input,
col_ctx_->output, 0 ,
[¬e, &status](const Status& s) {
status.Update(s);
note.Notify();
});
note.WaitForNotification();
if (!status.ok()) {
done_(status);
return;
}
}
ContinueAfterInputCopy();
}
void RingReducer::ContinueAfterInputCopy() {
AllocatorAttributes attr = col_ctx_->op_ctx->output_alloc_attr(0);
ca_.reset(MakeCollectiveAdapter(col_ctx_->output, group_size_ * num_subdivs_,
col_ctx_->device->GetAllocator(attr)));
if (col_params_->final_op) {
Tensor group_size_val = ca_->Scalar(group_size_);
if (col_params_->group.device_type != "CPU") {
uint64 safe_alloc_frontier = col_ctx_->device->SafeAllocFrontier(0);
AllocationAttributes aa;
std::function<uint64()> freed_by_func = [this, &safe_alloc_frontier]() {
safe_alloc_frontier =
col_ctx_->device->SafeAllocFrontier(safe_alloc_frontier);
return safe_alloc_frontier;
};
if (safe_alloc_frontier > 0) {
aa.freed_by_func = &freed_by_func;
}
group_size_tensor_ = ca_->Scalar(
col_ctx_->device->GetAllocator(col_ctx_->op_ctx->input_alloc_attr(0)),
aa);
DeviceContext* op_dev_ctx = col_ctx_->op_ctx->op_device_context();
op_dev_ctx->CopyCPUTensorToDevice(
&group_size_val, col_ctx_->device, &group_size_tensor_,
[this](const Status& s) {
if (!s.ok()) {
StartAbort(s);
}
group_size_tensor_ready_.Notify();
},
(safe_alloc_frontier == 0));
} else {
group_size_tensor_ = group_size_val;
group_size_tensor_ready_.Notify();
}
} else {
group_size_tensor_ready_.Notify();
}
Finish(RunAsyncParts());
}
void RingReducer::InitRingField(RingField* rf, int chunk_idx, int subdiv_idx,
int field_idx) {
RingAlg::InitRingField(rf, chunk_idx, subdiv_idx, field_idx);
if (rf->do_recv) {
rf->tmp_chunk = ca_->TempChunk(rf->sc_idx);
}
}
bool RingReducer::RunAsyncParts() {
rfv_.clear();
rfv_.resize(group_size_ * num_subdivs_);
PCQueue ready_queue;
for (int chunk_idx = 0; chunk_idx < group_size_; ++chunk_idx) {
for (int subdiv_idx = 0; subdiv_idx < num_subdivs_; ++subdiv_idx) {
int rf_index = (chunk_idx * num_subdivs_) + subdiv_idx;
InitRingField(&rfv_[rf_index], chunk_idx, subdiv_idx, rf_index);
ready_queue.Enqueue(&rfv_[rf_index]);
}
}
const DeviceBase::AcceleratorDeviceInfo* gpu_info =
col_ctx_->device->tensorflow_accelerator_device_info();
if (gpu_info) {
tsl::profiler::TraceMe activity("WaitForQueuedEvents",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status s = gpu_info->default_context->ThenExecute(
col_ctx_->device, gpu_info->stream, [¬e]() { note.Notify(); });
if (s.ok()) {
note.WaitForNotification();
} else {
mutex_lock l(status_mu_);
status_ =
errors::Internal("Failed to dispatch ThenExecute in RingReducer");
return false;
}
}
int field_done_count = 0;
int send_pending_count = 0;
int recv_pending_count = 0;
std::atomic<bool> aborted(false);
{
tsl::profiler::TraceMe activity("Loop", tsl::profiler::TraceMeLevel::kInfo);
while (field_done_count < rfv_.size()) {
VLOG(4) << FieldState();
RingField* rf = ready_queue.Dequeue();
bool dispatched = false;
do {
if (aborted) {
ready_queue.Enqueue(rf);
break;
}
switch (rf->action) {
case RF_INIT:
if (rf->do_recv) {
rf->action = RF_RECV;
auto requeue = [this, rf, &ready_queue, &aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchRecv(rf, requeue);
dispatched = true;
++recv_pending_count;
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_RECV:
CHECK_GT(recv_pending_count, 0);
--recv_pending_count;
if (!rf->second_pass) {
rf->action = RF_REDUCE;
Status s = collective_util::ComputeBinOp(
col_ctx_->op_ctx, col_ctx_->op_params, col_ctx_->device,
col_params_->merge_op, &rf->chunk, &rf->tmp_chunk);
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_REDUCE:
if (!rf->second_pass && col_params_->final_op && rf->is_final) {
rf->action = RF_FINALIZE;
group_size_tensor_ready_.WaitForNotification();
Status s = collective_util::ComputeBinOp(
col_ctx_->op_ctx, col_ctx_->op_params, col_ctx_->device,
col_params_->final_op, &rf->chunk, &group_size_tensor_);
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_FINALIZE:
rf->action = RF_DONE;
break;
case RF_SEND_READY:
if (rf->do_send) {
rf->action = RF_SEND;
auto send_complete = [this, rf, &ready_queue,
&aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchSend(rf, send_complete);
dispatched = true;
++send_pending_count;
} else {
rf->action = RF_DONE;
}
break;
case RF_SEND:
CHECK_GT(send_pending_count, 0);
--send_pending_count;
rf->action = RF_DONE;
break;
case RF_DONE:
break;
}
if (rf->action == RF_DONE) {
if (rf->second_pass) {
++field_done_count;
break;
} else {
AdvanceToSecondPass(rf);
}
}
} while (!dispatched);
if (aborted) break;
}
if (aborted) {
while ((send_pending_count > 0) || (recv_pending_count > 0)) {
RingField* rf = ready_queue.Dequeue();
switch (rf->action) {
case RF_RECV:
--recv_pending_count;
break;
case RF_SEND:
--send_pending_count;
break;
default: {
}
}
}
}
}
CHECK_EQ(send_pending_count, 0);
CHECK_EQ(recv_pending_count, 0);
VLOG(2) << this << " device=" << col_ctx_->device_name << " finish;"
<< " final value " << TensorDebugString(ca_->Value());
return !aborted;
}
namespace {
REGISTER_COLLECTIVE(RingReduce, RingReducer);
}
} | #include "tensorflow/core/common_runtime/ring_reducer.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
std::unique_ptr<OpKernel> GetKernel(const NodeDef& node,
const DeviceType& device_type,
DeviceBase* device) {
Status status;
std::unique_ptr<OpKernel> k = CreateOpKernel(
device_type, device, device->GetAllocator(AllocatorAttributes()), node,
TF_GRAPH_DEF_VERSION, &status);
if (!status.ok()) {
LOG(FATAL) << status;
}
return k;
}
std::unique_ptr<OpKernel> GetAdd(DataType dtype, const DeviceType& device_type,
DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Add");
TF_CHECK_OK(builder.Attr("T", dtype)
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Finalize(&node_def));
return GetKernel(node_def, device_type, device);
}
std::unique_ptr<OpKernel> GetDiv(DataType dtype, const DeviceType& device_type,
DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Div");
TF_CHECK_OK(builder.Attr("T", dtype)
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Finalize(&node_def));
return GetKernel(node_def, device_type, device);
}
class RingReducerTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int num_subdivs, int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, num_subdivs, dtype, shape, test_env_.get()));
}
}
}
void Reduce(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoReduce();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < static_cast<int>(instances_.size())) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int num_subdivs, int tensor_len,
int fail_after) {
Init(num_workers, num_devices, dtype, TensorShape({tensor_len}),
device_type, num_subdivs, fail_after);
std::vector<T> expected(tensor_len);
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
instances_[di]->InitTensor([&expected, dtype, di](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
if (dtype == DT_INT32 || dtype == DT_INT64) {
value = di * 10 + i;
}
t->flat<T>()(i) = static_cast<T>(value);
expected[i] += static_cast<T>(value);
}
});
}
Reduce(fail_after);
if (fail_after > 0) {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
EXPECT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
}
} else {
for (int i = 0; i < tensor_len; ++i) {
expected[i] /= static_cast<T>(num_workers * num_devices);
}
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(test::AsTensor<T>(expected),
instances_[di]->tensor());
}
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, int num_subdivs, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env), tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "RingReduce",
REDUCTION_COLLECTIVE, dtype, shape);
if (num_subdivs > 0) {
col_params_->instance.impl_details.subdiv_offsets =
GenerateEvenSubdivOffsets(test_env->num_devices_per_worker,
num_subdivs);
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
merge_op_ = GetAdd(col_params_->instance.data_type,
test_env_->device_type, device_);
final_op_ = GetDiv(col_params_->instance.data_type,
test_env_->device_type, device_);
col_params_->merge_op = merge_op_.get();
col_params_->final_op = final_op_.get();
}
void InitTensor(const std::function<void(Tensor*)>& init_f) {
init_f(&tensor_);
}
void DoReduce() {
status_ = RunCollective(test_env_, col_params_.get(), device_, &tensor_,
&tensor_);
}
const Tensor& tensor() { return tensor_; }
CollectiveTestEnv* test_env_;
Tensor tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
std::unique_ptr<OpKernel> merge_op_;
std::unique_ptr<OpKernel> final_op_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
mutex mu_;
int32 reduce_counter_ TF_GUARDED_BY(mu_) = 0;
};
class RingReducerInitParamsTest : public ::testing::Test {
protected:
void RunSubdivPermsTest(
CollectiveParams* cp,
const std::vector<std::vector<int>>& expected_subdiv_perms,
const std::vector<int>& expected_subdiv_rank) {
cp->instance.impl_details.subdiv_permutations.clear();
cp->subdiv_rank.clear();
core::RefCountPtr<RingReducer> reducer(new RingReducer());
TF_CHECK_OK(reducer->InitializeCollectiveParams(cp));
EXPECT_EQ(expected_subdiv_perms,
cp->instance.impl_details.subdiv_permutations);
EXPECT_EQ(expected_subdiv_rank, cp->subdiv_rank);
reducer->group_size_tensor_ready_.Notify();
}
};
TEST_F(RingReducerInitParamsTest, SpecifiedSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets = {0, 4};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
8, 9, 10, 11, 20, 21, 22, 23, 16, 17, 18, 19}},
{0, 4});
cp->instance.impl_details.subdiv_offsets = {0, -4};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8,
15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20}},
{0, 3});
cp->default_rank = 3;
cp->instance.impl_details.subdiv_offsets = {3, -3};
RunSubdivPermsTest(cp.get(),
{{3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14,
15, 8, 9, 10, 19, 20, 21, 22, 23, 16, 17, 18},
{4, 3, 2, 1, 0, 7, 6, 5, 12, 11, 10, 9,
8, 15, 14, 13, 20, 19, 18, 17, 16, 23, 22, 21}},
{0, 1});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
const int kNumDevs = kNumDevsPerWorker * kNumWorkers;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
{
int num_subdivs = 2;
int num_chunks = kNumDevs * num_subdivs;
size_t chunk_size = 3 * 1048576;
size_t tensor_size = chunk_size * num_chunks;
cp->instance.shape = TensorShape(
{static_cast<int64_t>(tensor_size / DataTypeSize(DT_FLOAT))});
}
cp->instance.impl_details.subdiv_offsets.clear();
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8,
15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20}},
{0, 3});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivUpperBound) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}, {0, 1, 2, 3}}, {0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivIgnoresMaxNumSubdivs) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.max_subdivs_per_device = 4;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}}, {0});
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 4;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3}, {0, 1, 2, 3}, {0, 1, 2, 3}, {0, 1, 2, 3}},
{0, 0, 0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivUsesDefault) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}, {0, 1, 2, 3}}, {0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivDisabled) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = -1;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}}, {0});
}
#define DEF_TEST(B, T, W, D, S, L, A) \
TEST_F(RingReducerTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Sdiv##S##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_BFLOAT16: { \
RunTest<tensorflow::bfloat16>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 4096, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 3, 1045991, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 4, 1045991, 0)
DEF_TEST(DOUBLE, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(BFLOAT16, CPU, 1, 2, 1, 8, 0)
DEF_TEST(BFLOAT16, CPU, 2, 8, 3, 16, 0)
DEF_TEST(INT32, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT32, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(INT64, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 1)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 7)
DEF_TEST(FLOAT, CPU, 2, 8, 2, 9408, 11)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4096, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 3, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 3, 1045991, 0)
DEF_TEST(FLOAT, GPU, 1, 4, 4, 1045991, 0)
DEF_TEST(DOUBLE, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 2)
DEF_TEST(FLOAT, GPU, 1, 8, 2, 9408, 5)
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_reducer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_reducer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13ecf62c-b147-4735-9f37-cf4474c6761c | cpp | tensorflow/tensorflow | isolate_placer_inspection_required_ops_pass | tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass.cc | tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass_test.cc | #include "tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/placer_inspection_required_ops_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
Status IsolatePlacerInspectionRequiredOpsPass::Run(
const GraphOptimizationPassOptions& options) {
if (options.graph == nullptr) {
VLOG(1) << "Not running IsolatePlacerInspectionRequiredOpsPass because no "
"graph is provided";
return absl::OkStatus();
}
VLOG(1) << "IsolatePlacerInspectionRequiredOpsPass::Run";
Graph* graph = options.graph->get();
if (VLOG_IS_ON(3)) {
DumpGraphToFile("isolate_deep_ops_before", *graph, nullptr, "/tmp");
}
Status status = IsolatePlacerInspectionRequiredOps(*options.flib_def, graph);
if (VLOG_IS_ON(3) && status.ok()) {
DumpGraphToFile("isolate_deep_ops_after", *graph, nullptr, "/tmp");
}
return status;
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 35,
IsolatePlacerInspectionRequiredOpsPass);
} | #include "tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass.h"
#include <map>
#include <unordered_map>
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
static void RunPass(const GraphDef& original, GraphDef* rewritten,
FunctionLibraryDefinition* flib_def) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
GraphConstructorOptions opts;
opts.add_default_attributes = false;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, original, graph.get()));
GraphOptimizationPassOptions options;
options.graph = &graph;
options.flib_def = flib_def;
IsolatePlacerInspectionRequiredOpsPass pass;
TF_ASSERT_OK(pass.Run(options));
graph->ToGraphDef(rewritten);
}
static void RunPass(const GraphDef& original, GraphDef* rewritten) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(), original.library());
RunPass(original, rewritten, &flib_def);
}
void RunPassAndCompare(const GraphDef& original, const GraphDef& expected) {
GraphDef rewritten;
RunPass(original, &rewritten);
TF_EXPECT_GRAPH_EQ(expected, rewritten);
}
void RunPassAndCompare(const GraphDef& original,
const std::vector<GraphDef>& expected_alternatives) {
GraphDef rewritten;
RunPass(original, &rewritten);
std::vector<string> errors;
errors.push_back(absl::StrCat("Graphs did not match.\n Rewritten graph:\n",
SummarizeGraphDef(rewritten)));
for (const GraphDef& alternative : expected_alternatives) {
string diff;
bool graphs_equal = EqualGraphDef(rewritten, alternative, &diff);
if (graphs_equal) {
return;
}
errors.push_back(absl::StrCat(" Expected alternative:\n",
SummarizeGraphDef(alternative)));
}
EXPECT_TRUE(false) << absl::StrJoin(errors, "\n");
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, Basic) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef original = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("x_f", "Identity", {"x"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("f_y", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("y", "_Retval", {"f_y:0"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, expected);
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, FunctionDefinitionNotInGraph) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef original = GDef({
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
});
GraphDef expected = GDef({
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("x_f", "Identity", {"x"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE}},
{"f", FDH::FunctionRef("ResourceIdentity", {})}}),
NDef("f_y", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("y", "_Retval", {"f_y:0"}, {{"T", DT_RESOURCE}}),
});
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
TF_ASSERT_OK(flib_def.AddFunctionDef(func));
GraphDef rewritten;
RunPass(original, &rewritten, &flib_def);
TF_EXPECT_GRAPH_EQ(expected, rewritten);
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, MultipleInputsAndOutputs) {
FunctionDef func = test::function::Swap();
GraphDef original = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("r1", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "_Retval", {"f:1"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b_f", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a_f", "b_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_r1", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r1", "_Retval", {"f_r1"}, {{"T", DT_RESOURCE}}),
NDef("f_r2", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
NDef("r2", "_Retval", {"f_r2"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, expected);
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, UnusedOutput) {
FunctionDef func = test::function::Swap();
GraphDef original = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("r1", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b_f", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a_f", "b_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_r1", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r1", "_Retval", {"f_r1"}, {{"T", DT_RESOURCE}}),
NDef("f_0", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, expected);
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, OutputsConsumedBySameOp) {
FunctionDef func = test::function::Swap();
GraphDef original = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("add", "Add", {"f:0", "f:1"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected1 = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b_f", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a_f", "b_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_add", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("f_add_0", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
NDef("add", "Add", {"f_add", "f_add_0"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected2 = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("b", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("b_f", "Identity", {"b"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a_f", "b_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_add", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
NDef("f_add_0", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("add", "Add", {"f_add_0", "f_add"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, {expected1, expected2});
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, IdenticalInputs) {
FunctionDef func = test::function::Swap();
GraphDef original = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a", "a"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("r1", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r2", "_Retval", {"f:1"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected1 = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("a_f_0", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"a_f", "a_f_0"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_r1", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r1", "_Retval", {"f_r1"}, {{"T", DT_RESOURCE}}),
NDef("f_r2", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
NDef("r2", "_Retval", {"f_r2"}, {{"T", DT_RESOURCE}}),
},
{func});
GraphDef expected2 = GDef(
{
NDef("a", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("a_f", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("a_f_0", "Identity", {"a"}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall",
{"a_f_0", "a_f"},
{{"Tin", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_RESOURCE, DT_RESOURCE}},
{"f", FDH::FunctionRef("Swap", {{"T", DT_RESOURCE}})}}),
NDef("f_r1", "Identity", {"f:0"}, {{"T", DT_RESOURCE}}),
NDef("r1", "_Retval", {"f_r1"}, {{"T", DT_RESOURCE}}),
NDef("f_r2", "Identity", {"f:1"}, {{"T", DT_RESOURCE}}),
NDef("r2", "_Retval", {"f_r2"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, {expected1, expected2});
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest, DirectCallsAreNotIsolated) {
FunctionDef func = test::function::ResourceIdentity();
GraphDef original = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "ResourceIdentity", {"x"}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_RESOURCE}}),
},
{func});
RunPassAndCompare(original, original);
}
TEST(IsolatePlacerInspectionRequiredOpsPassTest,
FunctionsNotReturningResourcesAreNotIsolated) {
FunctionDef func = test::function::ReadResourceVariable();
GraphDef original = GDef(
{
NDef("x", "_Arg", {}, {{"T", DT_RESOURCE}}),
NDef("f", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("ReadResourceVariable", {})}}),
NDef("y", "_Retval", {"f:0"}, {{"T", DT_FLOAT}}),
},
{func});
RunPassAndCompare(original, original);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/isolate_placer_inspection_required_ops_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce6ef79f-c050-4a7f-b50f-126d68923ea5 | cpp | tensorflow/tensorflow | process_util | tensorflow/core/common_runtime/process_util.cc | tensorflow/core/common_runtime/process_util_test.cc | #include "tensorflow/core/common_runtime/process_util.h"
#if defined(ENABLE_MKL) && defined(ENABLE_ONEDNN_OPENMP)
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
#include <string.h>
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/util.h"
#include "tsl/platform/tracing.h"
namespace tensorflow {
namespace {
int32 GetEnvNumInterOpThreads() {
static int32_t env_num_threads = NumInterOpThreadsFromEnvironment();
return env_num_threads;
}
int32 DefaultNumInterOpThreads() {
#ifndef __ANDROID__
int32_t env_num_threads = GetEnvNumInterOpThreads();
if (env_num_threads > 0) {
return env_num_threads;
}
return port::MaxParallelism();
#else
return 1;
#endif
}
static thread::ThreadPool* InitComputePool(const SessionOptions& options) {
int32_t inter_op_parallelism_threads =
options.config.inter_op_parallelism_threads();
if (inter_op_parallelism_threads == 0) {
inter_op_parallelism_threads = DefaultNumInterOpThreads();
}
return new thread::ThreadPool(
Env::Default(), ThreadOptions(), "Compute", inter_op_parallelism_threads,
!options.config.experimental().disable_thread_spinning(),
nullptr);
}
}
thread::ThreadPool* ComputePool(const SessionOptions& options) {
static thread::ThreadPool* compute_pool = InitComputePool(options);
return compute_pool;
}
int32 NumInterOpThreadsFromEnvironment() {
int32_t num;
const char* val = std::getenv("TF_NUM_INTEROP_THREADS");
return (val && strings::safe_strto32(val, &num)) ? num : 0;
}
int32 NumIntraOpThreadsFromEnvironment() {
int32_t num;
const char* val = std::getenv("TF_NUM_INTRAOP_THREADS");
return (val && strings::safe_strto32(val, &num)) ? num : 0;
}
#if defined(ENABLE_ONEDNN_OPENMP) && defined(ENABLE_MKL)
int32 OMPThreadsFromEnvironment() {
int32 num;
const char* val = std::getenv("OMP_NUM_THREADS");
return (val && strings::safe_strto32(val, &num)) ? num : 0;
}
int32 DefaultNumIntraOpThreads() {
static int env_num_threads = NumIntraOpThreadsFromEnvironment();
if (env_num_threads > 0) {
return env_num_threads;
}
return port::MaxParallelism();
}
#endif
int32 NumInterOpThreadsFromSessionOptions(const SessionOptions& options) {
const int32_t inter_op = options.config.inter_op_parallelism_threads();
if (inter_op > 0) return inter_op;
const int32_t env_inter_op = GetEnvNumInterOpThreads();
if (env_inter_op > 0) return env_inter_op;
#if defined(ENABLE_ONEDNN_OPENMP) && defined(ENABLE_MKL)
if (IsMKLEnabled()) {
const int32 intra_op = options.config.intra_op_parallelism_threads();
const int32 omp_max_threads = OMPThreadsFromEnvironment();
const int32 mkl_intra_op =
(omp_max_threads > 0)
? omp_max_threads
: (intra_op > 0) ? intra_op : DefaultNumIntraOpThreads();
DCHECK_GE(mkl_intra_op, 1);
const int32 mkl_inter_op = std::max(
(DefaultNumInterOpThreads() + mkl_intra_op - 1) / mkl_intra_op, 2);
VLOG(0)
<< "Creating new thread pool with default inter op setting: "
<< mkl_inter_op
<< ". Tune using inter_op_parallelism_threads for best performance.";
return mkl_inter_op;
}
#endif
return DefaultNumInterOpThreads();
}
thread::ThreadPool* NewThreadPoolFromSessionOptions(
const SessionOptions& options, int32_t num_threads) {
const int32_t num_threads_real =
num_threads > 0 ? num_threads
: NumInterOpThreadsFromSessionOptions(options);
VLOG(1) << "Session inter op parallelism threads: " << num_threads_real;
return new thread::ThreadPool(
options.env, ThreadOptions(), "Compute", num_threads_real,
!options.config.experimental().disable_thread_spinning(),
nullptr);
}
void SchedClosure(absl::AnyInvocable<void()> closure) {
if (!tsl::tracing::EventCollector::IsEnabled()) {
return Env::Default()->SchedClosure(std::move(closure));
}
uint64 id = tsl::tracing::GetUniqueArg();
tsl::tracing::RecordEvent(tsl::tracing::EventCategory::kScheduleClosure, id);
Env::Default()->SchedClosure([id, closure = std::move(closure)]() mutable {
tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kRunClosure,
id);
closure();
});
}
void SchedNonBlockingClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) {
Env::Default()->SchedClosureAfter(micros, std::move(closure));
}
} | #include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(ProcessUtilTest, NumThreads) {
SessionOptions opts;
opts.config.set_inter_op_parallelism_threads(10);
EXPECT_EQ(10, NumInterOpThreadsFromSessionOptions(opts));
}
TEST(ProcessUtilTest, ThreadPool) {
SessionOptions opts;
opts.config.set_inter_op_parallelism_threads(10);
thread::ThreadPool* pool = NewThreadPoolFromSessionOptions(opts);
EXPECT_EQ(10, pool->NumThreads());
delete pool;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/process_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3ed1296-aa66-4676-b427-1b02afef8845 | cpp | tensorflow/tensorflow | replicate_constants_pass | tensorflow/core/common_runtime/replicate_constants_pass.cc | tensorflow/core/common_runtime/replicate_constants_pass_test.cc | #include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr int64_t kMaxSize = 16;
void SetUniqueName(Graph* graph, Node* node) {
node->set_name(graph->NewName(absl::StrCat(node->name(), "/replicate")));
}
bool HasControlOut(Node* node) {
auto control_out_it =
std::find_if(node->out_edges().begin(), node->out_edges().end(),
[](const auto& e) { return e->IsControlEdge(); });
return control_out_it != node->out_edges().end();
}
bool HasCpuDevice(const Node* node) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(node->assigned_device_name(), &device))
return false;
return device.type == "CPU";
}
Status DeviceNameToCpuDeviceNameWithDeviceId(const string& device_name,
string* host_device_name) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(device_name, &device)) {
return absl::InternalError(
absl::StrCat("Could not parse device name ", device_name));
}
if (flags::Global().enable_aggressive_constant_replication.value() &&
device.type == "CPU") {
*host_device_name = device_name;
} else {
device.type = "CPU";
device.has_type = true;
device.id = 0;
device.has_id = true;
*host_device_name = DeviceNameUtils::ParsedNameToString(device);
}
return absl::OkStatus();
}
Status GetDestinationCpuDevice(const Node* dst, std::string* device) {
if (!dst->has_assigned_device_name())
return absl::AbortedError(
absl::StrCat("Node name: ", dst->name(), " has no assigned device."));
return DeviceNameToCpuDeviceNameWithDeviceId(dst->assigned_device_name(),
device);
}
Status GetSuccessorEdges(
Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& edge : node->out_edges()) {
const Node* dst = edge->dst();
std::string device;
TF_RETURN_IF_ERROR(GetDestinationCpuDevice(dst, &device));
if (!device_to_edges.count(device)) device_to_edges.insert({device, {}});
device_to_edges[device].push_back(edge);
}
return absl::OkStatus();
}
void ReplicateToEachDevice(
Graph* graph, Node* node,
absl::btree_map<std::string, std::vector<const Edge*>>& device_to_edges) {
for (const auto& pair : device_to_edges) {
Node* copy = graph->CopyNode(node);
SetUniqueName(graph, copy);
const std::string device = pair.first;
copy->set_assigned_device_name(device);
for (const Edge* edge : pair.second) {
graph->AddEdge(copy, edge->src_output(), edge->dst(), edge->dst_input());
}
for (Node* src : node->in_nodes()) {
graph->AddControlEdge(src, copy, true);
}
}
graph->RemoveNode(node);
}
}
Status ReplicateConstantsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "replicate_constants_pass will replicate constants with "
"number-of-elements <= "
<< kMaxSize;
if (options.graph == nullptr) {
VLOG(1) << "No graph in replicate_constants_pass.";
return absl::OkStatus();
}
Graph* graph = options.graph->get();
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("before_replicate_constants_pass", *graph,
options.flib_def);
}
int64_t min_skipped = std::numeric_limits<int64_t>::max();
int64_t max_skipped = std::numeric_limits<int64_t>::min();
for (Node* node : graph->nodes()) {
if (!node->IsConstant()) continue;
if (node->out_edges().size() <= 1) continue;
if (HasControlOut(node)) continue;
const TensorProto* value = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "value", &value));
TF_ASSIGN_OR_RETURN(TensorShape shape,
TensorShape::BuildTensorShape(value->tensor_shape()));
if (shape.num_elements() > kMaxSize) {
min_skipped = std::min(min_skipped, shape.num_elements());
max_skipped = std::max(max_skipped, shape.num_elements());
continue;
}
if (!node->has_assigned_device_name()) continue;
if (!HasCpuDevice(node)) continue;
absl::btree_map<std::string, std::vector<const Edge*>> device_to_edges;
TF_RETURN_IF_ERROR(GetSuccessorEdges(node, device_to_edges));
if (device_to_edges.size() <= 1) continue;
ReplicateToEachDevice(graph, node, device_to_edges);
}
if (min_skipped != std::numeric_limits<int64_t>::max()) {
VLOG(1) << "replicate_constants_pass skipped replicating constants with "
"number of elements in the range "
<< min_skipped << " to " << max_skipped << ".";
}
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("after_replicate_constants_pass", *graph,
options.flib_def);
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 3,
ReplicateConstantsPass);
} | #include "tensorflow/core/common_runtime/replicate_constants_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:1/device:CPU:0";
const char kTpu00[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:0";
const char kTpu01[] = "/job:tpu_host_worker/replica:0/task:0/device:TPU:1";
const char kTpu10[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:0";
const char kTpu11[] = "/job:tpu_host_worker/replica:0/task:1/device:TPU:1";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
CHECK(false) << "Unknown node name: " << name;
return nullptr;
}
Node* GetPredecessor(Node* node) {
auto it = node->in_nodes().begin();
CHECK(it != node->in_nodes().end())
<< "No predecessor for " << node->name() << "\n";
return *it;
}
bool IsEdge(Node* src, Node* dst) {
for (Node* node : src->out_nodes()) {
if (node == dst) return true;
}
return false;
}
TEST(ReplicateConstantsPassTest, TestSmallConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_EQ(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_EQ(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestLargeConstant) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestControlOut) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
Output ctrl_succ =
ops::Const(scope.WithOpName("ctrl_succ"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "ctrl_succ")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
graph->AddControlEdge(GetNode(*graph, "const0"),
GetNode(*graph, "ctrl_succ"));
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestTpuConst) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const0"), 1.0f, TensorShape({}));
ops::Negate dst0(scope.WithOpName("dst0"), const0);
ops::Negate dst1(scope.WithOpName("dst1"), const0);
ops::Negate dst2(scope.WithOpName("dst2"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst0")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst1")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst2")->set_assigned_device_name(kTpu10);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(dst0->assigned_device_name(),
GetPredecessor(dst0)->assigned_device_name());
EXPECT_NE(dst1->assigned_device_name(),
GetPredecessor(dst1)->assigned_device_name());
EXPECT_NE(dst2->assigned_device_name(),
GetPredecessor(dst2)->assigned_device_name());
}
TEST(ReplicateConstantsPassTest, TestSmallAndLargeConstants) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output small = ops::Const(scope.WithOpName("small"), 1.0f, TensorShape({}));
Output large =
ops::Const(scope.WithOpName("large"),
{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f});
ops::Add dst0(scope.WithOpName("dst0"), small, large);
ops::Add dst1(scope.WithOpName("dst1"), small, large);
ops::Add dst2(scope.WithOpName("dst2"), small, large);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "small")->set_assigned_device_name(kCpu0);
GetNode(*graph, "large")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst0")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst1")->set_assigned_device_name(kCpu1);
GetNode(*graph, "dst2")->set_assigned_device_name(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* small0 = GetNode(*graph, "small/replicate/_0");
Node* small1 = GetNode(*graph, "small/replicate/_1");
Node* large = GetNode(*graph, "large");
Node* dst0 = GetNode(*graph, "dst0");
Node* dst1 = GetNode(*graph, "dst1");
Node* dst2 = GetNode(*graph, "dst2");
EXPECT_EQ(small0->assigned_device_name(), kCpu0);
EXPECT_EQ(small1->assigned_device_name(), kCpu1);
EXPECT_EQ(large->assigned_device_name(), kCpu0);
EXPECT_EQ(dst0->assigned_device_name(), kCpu0);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_EQ(dst1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(small0, dst0));
EXPECT_TRUE(IsEdge(large, dst0));
EXPECT_TRUE(IsEdge(small1, dst1));
EXPECT_TRUE(IsEdge(large, dst1));
EXPECT_TRUE(IsEdge(small1, dst2));
EXPECT_TRUE(IsEdge(large, dst2));
}
TEST(ReplicateConstantsPassTest, TestTpuDestinations) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
Output const0 =
ops::Const(scope.WithOpName("const"), 1.0f, TensorShape({}));
ops::Negate dst00(scope.WithOpName("dst00"), const0);
ops::Negate dst01(scope.WithOpName("dst01"), const0);
ops::Negate dst10(scope.WithOpName("dst10"), const0);
ops::Negate dst11(scope.WithOpName("dst11"), const0);
TF_CHECK_OK(scope.ToGraph(graph.get()));
}
GetNode(*graph, "const")->set_assigned_device_name(kCpu0);
GetNode(*graph, "dst00")->set_assigned_device_name(kTpu00);
GetNode(*graph, "dst01")->set_assigned_device_name(kTpu01);
GetNode(*graph, "dst10")->set_assigned_device_name(kTpu10);
GetNode(*graph, "dst11")->set_assigned_device_name(kTpu11);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ReplicateConstantsPass pass;
TF_ASSERT_OK(pass.Run(options));
GraphDef actual;
graph->ToGraphDef(&actual);
Node* const0 = GetNode(*graph, "const/replicate/_0");
Node* const1 = GetNode(*graph, "const/replicate/_1");
Node* dst00 = GetNode(*graph, "dst00");
Node* dst01 = GetNode(*graph, "dst01");
Node* dst10 = GetNode(*graph, "dst10");
Node* dst11 = GetNode(*graph, "dst11");
EXPECT_EQ(const0->assigned_device_name(), kCpu0);
EXPECT_EQ(const1->assigned_device_name(), kCpu1);
EXPECT_TRUE(IsEdge(const0, dst00));
EXPECT_TRUE(IsEdge(const0, dst01));
EXPECT_TRUE(IsEdge(const1, dst10));
EXPECT_TRUE(IsEdge(const1, dst11));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_constants_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_constants_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5e23e48e-e613-4ffe-b6df-e833fb616742 | cpp | tensorflow/tensorflow | simplify_ici_dummy_variables_pass | tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.cc | tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass_test.cc | #include "tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kTpuExecute = "TPUExecute";
constexpr absl::string_view kParallelExecuteIds = "_parallel_execution_ids";
const char kICIWeightDistributionMlirBridgeMarker[] =
"ici_weight_distribution_mlir_bridge_marker";
std::string GetNewOpName(std::string op_name, int index, int task_id) {
return absl::StrCat(op_name, "_ici_specific_index_", std::to_string(index),
"_task_id_", std::to_string(task_id));
}
std::vector<Node*> GetNonMainReplicaIciTPUExecuteNodes(Graph* graph,
bool& is_spmd) {
std::vector<Node*> tpu_nodes;
for (Node* node : graph->nodes()) {
if (node->type_string() == kTpuExecute &&
HasNodeAttr(node->def(), kParallelExecuteIds)) {
auto parallel_exec_ids = node->attrs().Find(kParallelExecuteIds)->s();
std::vector<std::string> group_vec =
absl::StrSplit(parallel_exec_ids, ',');
if (group_vec.empty()) return tpu_nodes;
std::vector<std::string> replica_vec = absl::StrSplit(group_vec[0], ':');
int replica_id = std::stoi(replica_vec[1]);
if (replica_id != 0) tpu_nodes.push_back(node);
if (group_vec.size() > 1) {
std::vector<std::string> parallel_vec =
absl::StrSplit(group_vec[1], ':');
int parallel_id = std::stoi(parallel_vec[1]);
if (parallel_id != 0) is_spmd = true;
}
}
}
return tpu_nodes;
}
void RedirectEdge(Graph* graph, Node* old_src_node, Node* dst_node,
Node* new_src_node, int input_index) {
const Edge* delete_edge;
for (auto edge : dst_node->in_edges()) {
if (edge->src() == old_src_node) {
delete_edge = edge;
break;
}
}
if (delete_edge == nullptr) return;
graph->RemoveEdge(delete_edge);
graph->AddEdge(new_src_node, 0, dst_node, input_index);
}
string GetHostDeviceName(Node* tpu_node) {
auto device_name = tpu_node->requested_device();
if (device_name.empty()) device_name = tpu_node->assigned_device_name();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
string host_device_name = DeviceNameUtils::FullName(
parsed_device_name.job, parsed_device_name.replica,
parsed_device_name.task, "CPU", 0);
return host_device_name;
}
std::optional<std::vector<int>> GetOutputShapeVec(Node* node) {
auto output_shapes = node->attrs().Find("_output_shapes");
if (output_shapes == nullptr) return std::nullopt;
auto output_shape = output_shapes->list().shape()[0];
std::vector<int> output_shape_vec;
output_shape_vec.reserve(output_shape.dim_size());
for (auto i = 0; i < output_shape.dim_size(); i++) {
output_shape_vec.push_back(output_shape.dim()[i].size());
}
return output_shape_vec;
}
int GetTPUTaskId(Node* tpu_node) {
auto device_name = tpu_node->requested_device();
if (device_name.empty()) device_name = tpu_node->assigned_device_name();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
return parsed_device_name.task;
}
Node* BuildFillOp(GraphDefBuilder::Options& bopts, Node* tpu_node,
Node* in_node, int input_index, string host_device_name) {
auto output_shape_vec = GetOutputShapeVec(in_node);
if (!output_shape_vec.has_value()) return nullptr;
auto dtype = in_node->attrs().Find("T")->type();
int tpu_task_id = GetTPUTaskId(tpu_node);
TensorShape tensor_shape;
tensor_shape.AddDim(output_shape_vec.value().size());
Tensor const_op_shape_tensor(DT_INT32, tensor_shape);
for (int i = 0; i < output_shape_vec.value().size(); i++) {
const_op_shape_tensor.flat<int>()(i) = output_shape_vec.value()[i];
}
std::string const_1_name = GetNewOpName("const_1", input_index, tpu_task_id);
Node* fill_dim_input =
ops::SourceOp("Const", bopts.WithName(const_1_name)
.WithAttr("dtype", DT_INT32)
.WithAttr("value", const_op_shape_tensor));
TensorShape fill_dim_output_shape;
fill_dim_output_shape.AddDim(output_shape_vec.value().size());
fill_dim_input->AddAttr("_output_shapes",
std::vector<TensorShape>{fill_dim_output_shape});
std::string const_2_name = GetNewOpName("const_2", input_index, tpu_task_id);
auto scalar_tensor = Tensor(dtype, {});
if (dtype == DT_FLOAT) {
scalar_tensor.scalar<float>()() = 0;
} else if (dtype == DT_BFLOAT16) {
scalar_tensor.scalar<bfloat16>()() = bfloat16(0);
} else {
LOG(ERROR) << "Unsupported data type: ", DataTypeString(dtype);
return nullptr;
}
Node* fill_value_input =
ops::SourceOp("Const", bopts.WithName(const_2_name)
.WithAttr("dtype", dtype)
.WithAttr("value", scalar_tensor));
TensorShape fill_value_output_shape;
fill_value_input->AddAttr("_output_shapes",
std::vector<TensorShape>{fill_value_output_shape});
std::string fill_name = GetNewOpName("fill", input_index, tpu_task_id);
Node* new_fill =
ops::BinaryOp("Fill", fill_dim_input, fill_value_input,
bopts.WithName(fill_name).WithAttr("T", dtype));
TensorShape new_output_shape;
for (auto output_shape : output_shape_vec.value()) {
new_output_shape.AddDim(output_shape);
}
new_fill->AddAttr("_output_shapes",
std::vector<TensorShape>{new_output_shape});
new_fill->AddAttr("_xla_inferred_shapes",
std::vector<TensorShape>{new_output_shape});
fill_dim_input->set_requested_device(host_device_name);
fill_value_input->set_requested_device(host_device_name);
new_fill->set_requested_device(host_device_name);
return new_fill;
}
absl::Status ReplaceIciDummyVariables(Graph* graph, int input_index,
std::vector<Node*> tpu_nodes,
GraphDefBuilder::Options& bopts) {
absl::flat_hash_map<std::string, Node*> device_to_node_map;
for (Node* tpu_node : tpu_nodes) {
Node* in_node;
TF_RETURN_IF_ERROR(tpu_node->input_node(input_index, &in_node));
if (!in_node->attrs().Find(kICIWeightDistributionMlirBridgeMarker)) {
continue;
}
string host_device_name = GetHostDeviceName(tpu_node);
if (device_to_node_map.contains(host_device_name)) {
RedirectEdge(graph, in_node, tpu_node,
device_to_node_map[host_device_name], input_index);
continue;
}
Node* new_fill =
BuildFillOp(bopts, tpu_node, in_node, input_index, host_device_name);
if (new_fill == nullptr) continue;
device_to_node_map[host_device_name] = new_fill;
RedirectEdge(graph, in_node, tpu_node, device_to_node_map[host_device_name],
input_index);
}
return absl::OkStatus();
}
}
bool ShouldRunPass(const GraphOptimizationPassOptions& options) {
if (!flags::Global().enable_tf2min_ici_weight.value()) {
VLOG(1) << "SimplifyIciDummyVariablesPass is disabled.";
return false;
}
VLOG(1) << "SimplifyIciDummyVariablesPass is enabled.";
if (options.graph == nullptr) {
LOG(INFO) << "No graph in simplify_ici_dummy_variables_pass.";
return false;
}
return true;
}
Status SimplifyIciDummyVariablesPass::Run(
const GraphOptimizationPassOptions& options) {
if (!ShouldRunPass(options)) {
return absl::OkStatus();
}
Graph* graph = options.graph->get();
VLOG(1) << DumpGraphToFile("before_simplify_ici_dummy_variables_pass", *graph,
options.flib_def);
absl::Status status;
GraphDefBuilder::Options bopts(graph, &status);
if (!status.ok()) {
LOG(ERROR) << "GraphDefBuilder::Option failed to initialize.";
return status;
}
bool is_spmd = false;
std::vector<Node*> tpu_nodes =
GetNonMainReplicaIciTPUExecuteNodes(graph, is_spmd);
if (!is_spmd) {
VLOG(1) << "Not SPMD case, skip SimplifyIciDummyVariablesPass.";
return absl::OkStatus();
}
if (tpu_nodes.empty()) {
VLOG(1) << "tpu_nodes is empty, skip SimplifyIciDummyVariablesPass.";
return absl::OkStatus();
}
for (int i = 0; i < tpu_nodes[0]->num_inputs(); ++i) {
auto replace_status = ReplaceIciDummyVariables(graph, i, tpu_nodes, bopts);
if (!replace_status.ok()) {
LOG(ERROR) << "Replace ici dummy variables failed.";
return replace_status;
}
}
RemoveDeadNodes(graph);
VLOG(1) << DumpGraphToFile("after_simplify_ici_dummy_variables_pass", *graph,
options.flib_def);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 49,
SimplifyIciDummyVariablesPass);
} | #include "tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.h"
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/test.h"
namespace tensorflow {
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/core/common_runtime/testdata/");
}
TEST(SimplifyIciDummyVariablesPassTest, flag_is_false) {
flags::Global().enable_tf2min_ici_weight.reset(false);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
std::string graph_path =
TestDataPath() + "simplify_ici_dummy_variables_pass_before.pbtxt";
tensorflow::GraphDef graph_def;
absl::Status load_graph_status =
ReadTextProto(tensorflow::Env::Default(), graph_path, &graph_def);
EXPECT_EQ(load_graph_status.ok(), true);
TF_EXPECT_OK(ConvertGraphDefToGraph(GraphConstructorOptions(), graph_def,
graph.get()));
GraphOptimizationPassOptions options;
options.graph = &graph;
SimplifyIciDummyVariablesPass pass;
TF_ASSERT_OK(pass.Run(options));
Node* fill_1_dim = GetNode(*graph, "const_1_ici_specific_index_0_task_id_2");
Node* fill_1_value =
GetNode(*graph, "const_2_ici_specific_index_0_task_id_2");
Node* fill_1 = GetNode(*graph, "fill_ici_specific_index_0_task_id_2");
EXPECT_EQ(fill_1_dim, nullptr);
EXPECT_EQ(fill_1_value, nullptr);
EXPECT_EQ(fill_1, nullptr);
Node* fill_2_dim = GetNode(*graph, "const_1_ici_specific_index_1_task_id_2");
Node* fill_2_value =
GetNode(*graph, "const_2_ici_specific_index_1_task_id_2");
Node* fill_2 = GetNode(*graph, "fill_ici_specific_index_1_task_id_2");
EXPECT_EQ(fill_2_dim, nullptr);
EXPECT_EQ(fill_2_value, nullptr);
EXPECT_EQ(fill_2, nullptr);
}
TEST(SimplifyIciDummyVariablesPassTest, replace_dummy_variable) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
std::string graph_path =
TestDataPath() + "simplify_ici_dummy_variables_pass_before.pbtxt";
tensorflow::GraphDef graph_def;
tensorflow::Status load_graph_status =
ReadTextProto(tensorflow::Env::Default(), graph_path, &graph_def);
EXPECT_EQ(load_graph_status.ok(), true);
TF_EXPECT_OK(ConvertGraphDefToGraph(GraphConstructorOptions(), graph_def,
graph.get()));
GraphOptimizationPassOptions options;
options.graph = &graph;
SimplifyIciDummyVariablesPass pass;
TF_ASSERT_OK(pass.Run(options));
Node* fill_1_dim = GetNode(*graph, "const_1_ici_specific_index_0_task_id_2");
Node* fill_1_value =
GetNode(*graph, "const_2_ici_specific_index_0_task_id_2");
Node* fill_1 = GetNode(*graph, "fill_ici_specific_index_0_task_id_2");
EXPECT_NE(fill_1_dim, nullptr);
EXPECT_NE(fill_1_value, nullptr);
EXPECT_NE(fill_1, nullptr);
EXPECT_EQ(fill_1_dim->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_1_value->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_1->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
Node* fill_2_dim = GetNode(*graph, "const_1_ici_specific_index_1_task_id_2");
Node* fill_2_value =
GetNode(*graph, "const_2_ici_specific_index_1_task_id_2");
Node* fill_2 = GetNode(*graph, "fill_ici_specific_index_1_task_id_2");
EXPECT_NE(fill_2_dim, nullptr);
EXPECT_NE(fill_2_value, nullptr);
EXPECT_NE(fill_2, nullptr);
EXPECT_EQ(fill_2_dim->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_2_value->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(fill_2->requested_device(),
"/job:tpu_host_worker/replica:0/task:2/device:CPU:0");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/simplify_ici_dummy_variables_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c396709c-ef47-4f4e-8cf4-224ba0257c00 | cpp | tensorflow/tensorflow | composite_device | tensorflow/core/common_runtime/composite_device.cc | tensorflow/core/common_runtime/composite_device_test.cc | #include "tensorflow/core/common_runtime/composite_device.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
const char* const kCompositeDeviceType = "COMPOSITE";
std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice(
const std::vector<string>& underlying_devices, const int unique_device_id,
const DeviceNameUtils::ParsedName& host_name, Status* status) {
DeviceNameUtils::ParsedName parsed_name = host_name;
parsed_name.type = kCompositeDeviceType;
parsed_name.id = unique_device_id;
const string device_name = DeviceNameUtils::ParsedNameToString(parsed_name);
return CompositeDevice::MakeDevice(underlying_devices, device_name, status);
}
std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice(
const std::vector<string>& underlying_devices, const string& device_name,
Status* status) {
if (underlying_devices.empty()) {
status->Update(
errors::InvalidArgument("underlying_devices should not be empty."));
return nullptr;
}
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(underlying_devices.at(0), &parsed_name)) {
status->Update(tensorflow::errors::InvalidArgument(
"Cannot parse device name ", underlying_devices.at(0),
" when creating CompositeDevice."));
return nullptr;
}
const string& underlying_type = parsed_name.type;
for (int i = 1; i < underlying_devices.size(); ++i) {
DeviceNameUtils::ParsedName name;
if (!DeviceNameUtils::ParseFullName(underlying_devices.at(i), &name)) {
status->Update(tensorflow::errors::InvalidArgument(
"Cannot parse device name ", underlying_devices.at(i),
" when creating CompositeDevice."));
return nullptr;
}
if (name.type != underlying_type) {
status->Update(tensorflow::errors::InvalidArgument(
"Expect device type ", parsed_name.type, "; but got type ", name.type,
" from device: ", underlying_devices.at(i),
" when creating CompositeDevice."));
return nullptr;
}
}
DeviceAttributes device_attributes;
device_attributes.set_name(device_name);
device_attributes.set_device_type(kCompositeDeviceType);
return absl::WrapUnique(
new CompositeDevice(device_attributes, underlying_devices));
}
} | #include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
TEST(CompositeDeviceTest, Basic) {
const string host_name = "/job:localhost/replica:0/task:0/device:CPU:0";
DeviceNameUtils::ParsedName parsed_host_name;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(host_name, &parsed_host_name));
std::vector<string> underlying_devices;
{
Status status;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 0,
parsed_host_name, &status);
EXPECT_EQ(composite_device, nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, status.code());
EXPECT_TRUE(absl::StrContains(status.message(),
"underlying_devices should not be empty"))
<< status.ToString();
}
{
Status status;
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:CPU:0");
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:CPU:1");
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 0,
parsed_host_name, &status);
TF_ASSERT_OK(status);
EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);
EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());
}
{
Status status;
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:GPU:0");
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 1,
parsed_host_name, &status);
EXPECT_EQ(composite_device, nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, status.code());
EXPECT_TRUE(absl::StrContains(status.message(),
"Expect device type CPU; but got type GPU"))
<< status.ToString();
}
}
TEST(CompositeDeviceTest, DeviceName) {
const string composite_device_name =
"/job:localhost/replica:0/task:0/device:CPU:10";
std::vector<string> underlying_devices;
underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:0");
underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:1");
Status status;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, composite_device_name,
&status);
TF_ASSERT_OK(status);
EXPECT_EQ(composite_device->name(), composite_device_name);
EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);
EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
458adafa-04cc-4dde-85aa-2a8742b9d8ab | cpp | tensorflow/tensorflow | cost_measurement_registry | tensorflow/core/common_runtime/cost_measurement_registry.cc | tensorflow/core/common_runtime/cost_measurement_registry_test.cc | #include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
using RegistrationMap =
absl::flat_hash_map<std::string, CostMeasurementRegistry::Creator>;
RegistrationMap* GetRegistrationMap() {
static RegistrationMap* registered_cost_measurements = new RegistrationMap;
return registered_cost_measurements;
}
}
std::unique_ptr<CostMeasurement> CostMeasurementRegistry::CreateByNameOrNull(
const std::string& name, const CostMeasurement::Context& context) {
const auto it = GetRegistrationMap()->find(name);
if (it == GetRegistrationMap()->end()) {
LOG_FIRST_N(ERROR, 1) << "Cost type " << name << " is unregistered.";
return nullptr;
}
return it->second(context);
}
void CostMeasurementRegistry::RegisterCostMeasurement(absl::string_view name,
Creator creator) {
const auto it = GetRegistrationMap()->find(name);
CHECK(it == GetRegistrationMap()->end())
<< "CostMeasurement " << name << " is registered twice.";
GetRegistrationMap()->emplace(name, std::move(creator));
}
} | #include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr char kTestCostName[] = "test";
class TestCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::ZeroDuration(); }
absl::string_view GetCostType() const override { return kTestCostName; }
};
REGISTER_COST_MEASUREMENT(kTestCostName, TestCostMeasurement);
TEST(CostMeasurementRegistryTest, Basic) {
const CostMeasurement::Context context;
std::unique_ptr<const CostMeasurement> test_cost_measurement =
CostMeasurementRegistry::CreateByNameOrNull("unregistered", context);
EXPECT_EQ(test_cost_measurement, nullptr);
test_cost_measurement =
CostMeasurementRegistry::CreateByNameOrNull(kTestCostName, context);
EXPECT_NE(test_cost_measurement, nullptr);
}
TEST(CostMeasurementRegistryDeathTest, CrashWhenRegisterTwice) {
const auto creator = [](const CostMeasurement::Context& context) {
return std::make_unique<TestCostMeasurement>(context);
};
EXPECT_DEATH(
CostMeasurementRegistry::RegisterCostMeasurement(kTestCostName, creator),
absl::StrCat("CostMeasurement ", kTestCostName, " is registered twice."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_measurement_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_measurement_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3de0f6a1-48f4-438a-b8a4-3ea5085c9e4a | cpp | tensorflow/tensorflow | ring_gatherer | tensorflow/core/common_runtime/ring_gatherer.cc | tensorflow/core/common_runtime/ring_gatherer_test.cc | #include "tensorflow/core/common_runtime/ring_gatherer.h"
#include <stdlib.h>
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
Status RingGatherer::InitializeCollectiveParams(CollectiveParams* col_params) {
DCHECK_EQ(col_params->instance.type, GATHER_COLLECTIVE);
DCHECK_EQ(col_params->instance.impl_details.collective_name, "RingGather");
if (!col_params->instance.impl_details.subdiv_offsets.empty() &&
(col_params->instance.impl_details.subdiv_offsets.size() > 1 ||
col_params->instance.impl_details.subdiv_offsets[0] != 0)) {
return errors::InvalidArgument(
"RingGather cannot take any subdiv offset other than 0.");
}
if (col_params->instance.impl_details.subdiv_offsets.empty()) {
col_params->instance.impl_details.subdiv_offsets.push_back(0);
}
return RingAlg::InitializeCollectiveParams(col_params);
}
void RingGatherer::Run(StatusCallback done) {
DCHECK(col_ctx_);
DCHECK(col_params_);
done_ = std::move(done);
group_size_ = col_params_->group.group_size;
num_subdivs_ = static_cast<int>(
col_params_->instance.impl_details.subdiv_permutations.size());
DCHECK_GT(num_subdivs_, 0);
if (VLOG_IS_ON(1)) {
string buf;
for (int r = 0; r < col_params_->group.members.size(); ++r) {
strings::StrAppend(&buf, "dev ", r, " : ",
col_params_->group.members[r].device.name(), "\n");
}
for (int sd = 0;
sd < col_params_->instance.impl_details.subdiv_permutations.size();
++sd) {
strings::StrAppend(&buf, "\nsubdiv ", sd, " perm: ");
for (auto x :
col_params_->instance.impl_details.subdiv_permutations[sd]) {
strings::StrAppend(&buf, x, ", ");
}
}
VLOG(1) << "RingGatherer::Run for device " << col_ctx_->device_name
<< " default_rank " << col_params_->default_rank << "\n"
<< buf;
}
AllocatorAttributes attr = col_ctx_->op_ctx->output_alloc_attr(0);
ca_.reset(MakeCollectiveAdapter(col_ctx_->output, group_size_ * num_subdivs_,
col_ctx_->device->GetAllocator(attr),
false ));
{
tsl::profiler::TraceMe activity("MemCpyAsync",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status status;
Tensor alias_chunk(ca_->ChunkAlias(col_params_->subdiv_rank[0]));
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), col_ctx_->input, &alias_chunk,
0 , [¬e, &status](const Status& s) {
status.Update(s);
note.Notify();
});
note.WaitForNotification();
if (!status.ok()) {
done_(status);
return;
}
}
Finish(RunAsyncParts());
}
bool RingGatherer::RunAsyncParts() {
rfv_.clear();
rfv_.resize(group_size_ * num_subdivs_);
PCQueue ready_queue;
for (int chunk_idx = 0; chunk_idx < group_size_; ++chunk_idx) {
for (int subdiv_idx = 0; subdiv_idx < num_subdivs_; ++subdiv_idx) {
int rf_index = (chunk_idx * num_subdivs_) + subdiv_idx;
InitRingField(&rfv_[rf_index], chunk_idx, subdiv_idx, rf_index);
ready_queue.Enqueue(&rfv_[rf_index]);
}
}
const DeviceBase::AcceleratorDeviceInfo* gpu_info =
col_ctx_->device->tensorflow_accelerator_device_info();
if (gpu_info) {
tsl::profiler::TraceMe activity("WaitForQueuedEvents",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status s = gpu_info->default_context->ThenExecute(
col_ctx_->device, gpu_info->stream, [¬e]() { note.Notify(); });
if (s.ok()) {
note.WaitForNotification();
} else {
mutex_lock l(status_mu_);
status_ =
errors::Internal("Failed to dispatch ThenExecute in RingGatherer");
return false;
}
}
int field_done_count = 0;
int send_pending_count = 0;
int recv_pending_count = 0;
std::atomic<bool> aborted(false);
{
tsl::profiler::TraceMe activity("Loop", tsl::profiler::TraceMeLevel::kInfo);
while (field_done_count < rfv_.size()) {
VLOG(4) << FieldState();
RingField* rf = ready_queue.Dequeue();
bool dispatched = false;
do {
if (aborted) {
ready_queue.Enqueue(rf);
break;
}
switch (rf->action) {
case RF_INIT:
if (rf->do_recv) {
rf->action = RF_RECV;
auto requeue = [this, rf, &ready_queue, &aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchRecv(rf, requeue);
dispatched = true;
++recv_pending_count;
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_RECV:
DCHECK_GT(recv_pending_count, 0);
--recv_pending_count;
rf->action = RF_SEND_READY;
break;
case RF_REDUCE:
TF_FALLTHROUGH_INTENDED;
case RF_FINALIZE:
TF_FALLTHROUGH_INTENDED;
case RF_SEND_READY:
if (rf->do_send) {
rf->action = RF_SEND;
auto send_complete = [this, rf, &ready_queue,
&aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchSend(rf, send_complete);
dispatched = true;
++send_pending_count;
} else {
rf->action = RF_DONE;
}
break;
case RF_SEND:
DCHECK_GT(send_pending_count, 0);
--send_pending_count;
rf->action = RF_DONE;
break;
case RF_DONE:
break;
}
if (rf->action == RF_DONE) {
++field_done_count;
break;
}
} while (!dispatched);
if (aborted) break;
}
if (aborted) {
while ((send_pending_count > 0) || (recv_pending_count > 0)) {
RingField* rf = ready_queue.Dequeue();
switch (rf->action) {
case RF_RECV:
--recv_pending_count;
break;
case RF_SEND:
--send_pending_count;
break;
default: {
}
}
}
}
}
DCHECK_EQ(send_pending_count, 0);
DCHECK_EQ(recv_pending_count, 0);
VLOG(2) << this << " device=" << col_ctx_->device_name << " finish;"
<< " final value " << TensorDebugString(ca_->Value());
return !aborted;
}
namespace {
REGISTER_COLLECTIVE(RingGather, RingGatherer);
}
} | #include "tensorflow/core/common_runtime/ring_gatherer.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class RingGathererTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int num_subdivs, int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, num_subdivs, dtype, shape, test_env_.get()));
}
}
}
void Gather(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoGather();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < static_cast<int>(instances_.size())) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int num_subdivs, int tensor_len,
int fail_after) {
Init(num_workers, num_devices, dtype, TensorShape({tensor_len}),
device_type, num_subdivs, fail_after);
int32_t output_len = tensor_len * num_workers * num_devices;
std::vector<T> expected(output_len, 0.0);
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
int32_t instance_offset = di * tensor_len;
instances_[di]->InitTensor(
[instance_offset, &expected, dtype, di](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
if (dtype == DT_INT32 || dtype == DT_INT64) {
value = di * 10 + i;
}
t->flat<T>()(i) = static_cast<T>(value);
expected[instance_offset + i] = value;
}
});
}
Gather(fail_after);
if (fail_after > 0) {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
EXPECT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
}
} else {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(test::AsTensor<T>(expected),
instances_[di]->output_tensor());
}
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, int num_subdivs, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env), input_tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "RingGather",
GATHER_COLLECTIVE, dtype, shape);
if (num_subdivs > 0) {
col_params_->instance.impl_details.subdiv_offsets =
GenerateEvenSubdivOffsets(test_env->num_devices_per_worker,
num_subdivs);
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
TensorShape output_shape = shape;
output_shape.set_dim(
0, output_shape.dim_size(0) * col_params_->group.group_size);
output_tensor_ = Tensor(dtype, output_shape);
}
void InitTensor(const std::function<void(Tensor*)>& init_f) {
init_f(&input_tensor_);
}
void DoGather() {
status_ = RunCollective(test_env_, col_params_.get(), device_,
&input_tensor_, &output_tensor_);
}
const Tensor& input_tensor() { return input_tensor_; }
const Tensor& output_tensor() { return output_tensor_; }
CollectiveTestEnv* test_env_;
Tensor input_tensor_;
Tensor output_tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
};
class RingGathererInitParamsTest : public ::testing::Test {
protected:
void RunSubdivPermsTest(
CollectiveParams* cp,
const std::vector<std::vector<int>>& expected_subdiv_perms,
const std::vector<int>& expected_subdiv_rank) {
cp->instance.impl_details.subdiv_permutations.clear();
cp->subdiv_rank.clear();
core::RefCountPtr<RingGatherer> gatherer(new RingGatherer());
TF_CHECK_OK(gatherer->InitializeCollectiveParams(cp));
EXPECT_EQ(expected_subdiv_perms,
cp->instance.impl_details.subdiv_permutations);
EXPECT_EQ(expected_subdiv_rank, cp->subdiv_rank);
}
};
TEST_F(RingGathererInitParamsTest, SpecifiedSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingGather",
GATHER_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets = {};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
cp->instance.impl_details.subdiv_offsets = {0};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
cp->default_rank = 3;
cp->instance.impl_details.subdiv_offsets = {};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{3});
}
#define DEF_TEST(B, T, W, D, S, L, A) \
TEST_F(RingGathererTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Sdiv##S##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 4096, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 1, 32768, 0)
DEF_TEST(DOUBLE, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(INT32, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT32, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(INT64, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, CPU, 2, 8, 1, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 1)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 7)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 11)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4096, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 32768, 0)
DEF_TEST(FLOAT, GPU, 1, 4, 1, 32768, 0)
DEF_TEST(DOUBLE, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 2)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 5)
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_gatherer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_gatherer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99f5a28d-e886-4282-beb8-9ab9d5c4ff0d | cpp | tensorflow/tensorflow | optimize_function_graph_utils | tensorflow/core/common_runtime/optimize_function_graph_utils.cc | tensorflow/core/common_runtime/optimize_function_graph_utils_test.cc | #include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <algorithm>
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status ValidateNoListArguments(
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args, const char* arg_type,
const string& function_name) {
for (const OpDef::ArgDef& arg : args) {
if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) {
return errors::InvalidArgument(
"Function ", function_name, " has an ", arg_type, " named \"",
arg.name(),
"\" that is a list of tensors."
" Multi-device functions support only single-tensor inputs "
" and outputs");
}
}
return absl::OkStatus();
}
Status ValidateMultiDeviceOptions(
const FunctionDef& fdef,
const FunctionLibraryRuntime::InstantiateOptions& options) {
const OpDef& signature = fdef.signature();
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.input_arg(), "input",
signature.name()));
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.output_arg(), "output",
signature.name()));
if (fdef.attr().count(FunctionLibraryDefinition::kIntsOnDeviceAttr) != 0 &&
fdef.attr().at(FunctionLibraryDefinition::kIntsOnDeviceAttr).b()) {
return errors::Unimplemented(
"Function '", signature.name(), "' has `",
FunctionLibraryDefinition::kIntsOnDeviceAttr,
"` attribute set. This attribute is not currently supported by "
"multi-device functions.");
}
if (options.input_devices.size() != signature.input_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.input_devices must have the same length "
"as the number of arguments: input_devices length = ",
options.input_devices.size(),
" number of arguments = ", signature.input_arg_size());
}
if (!options.output_devices.empty() &&
options.output_devices.size() != signature.output_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.output_devices must either be empty or have the "
"same length as the number of arguments: output_devices length = ",
options.output_devices.size(),
" number of arguments = ", signature.output_arg_size());
}
return absl::OkStatus();
}
Status SetArgShape(const std::unordered_map<int, DtypeAndPartialTensorShape>&
input_resource_dtypes_and_shapes,
const std::vector<Node*>& arg_nodes) {
for (Node* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
if (dtype == DT_RESOURCE) {
auto dtype_and_shape_iter = input_resource_dtypes_and_shapes.find(index);
if (dtype_and_shape_iter != input_resource_dtypes_and_shapes.end()) {
AttrValue dtype_attr_value;
dtype_attr_value.mutable_list()->add_type(
dtype_and_shape_iter->second.dtype);
n->AddAttr("_handle_dtypes", dtype_attr_value);
TensorShapeProto shape_proto;
dtype_and_shape_iter->second.shape.AsProto(&shape_proto);
AttrValue shape_attr_value;
*shape_attr_value.mutable_list()->add_shape() = shape_proto;
n->AddAttr("_handle_shapes", shape_attr_value);
}
}
}
return absl::OkStatus();
}
const string* AssignedOrRequestedDeviceName(const Node& node) {
if (node.has_assigned_device_name()) {
return &node.assigned_device_name();
}
return &node.requested_device();
}
void GetColocationGroup(const Node* node, string* group) {
static const StringPiece kColocationAttrNameStringPiece(kColocationAttrName);
const AttrValue* attr_value =
node->attrs().Find(kColocationAttrNameStringPiece);
if (attr_value != nullptr && attr_value->has_list() &&
attr_value->list().s_size() > 0) {
*group = attr_value->list().s(0);
}
}
Status WriteToCache(const std::string& dir_name, const std::string& file_name,
OptimizedFunctionGraphInfo& optimized_function_graph_info,
Env* env) {
const absl::Time cache_writing_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
optimized_function_graph_proto =
OptimizedFunctionGraphInfo::ToProto(optimized_function_graph_info);
optimized_function_graph_proto.SerializeToString(
&optimized_function_graph_proto_str);
if (!env->FileExists(dir_name).ok()) {
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir_name));
}
{
bool has_atomic_move = false;
TF_RETURN_IF_ERROR(env->HasAtomicMove(dir_name, &has_atomic_move));
if (!has_atomic_move) {
LOG_EVERY_POW_2(WARNING)
<< "Filesystem for OptimizedFunctionGraphInfo persistent cache at "
<< dir_name
<< " does not support atomic moves. Therefore the "
"persistent cache is racy if you have multiple optimizations "
"occurring simultaneously!";
}
}
std::string temp_file_name = file_name;
if (!env->CreateUniqueFileName(&temp_file_name, ".pb.tmp")) {
return absl::UnavailableError(
absl::StrCat("Could not create a unique file inside ", dir_name));
}
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(
env, temp_file_name, optimized_function_graph_proto_str));
TF_RETURN_IF_ERROR(env->RenameFile(temp_file_name, file_name));
const absl::Duration cache_writing_duration =
absl::Now() - cache_writing_start_time;
VLOG(3) << "Finished writing Tensorflow optimized graph into cache; took "
<< absl::ToInt64Milliseconds(cache_writing_duration)
<< " msecs, file name: " << file_name;
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> ReadFromCache(
const string& file_name, Env* env) {
absl::Time cache_reading_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env, file_name, &optimized_function_graph_proto_str));
optimized_function_graph_proto.ParseFromString(
optimized_function_graph_proto_str);
TF_ASSIGN_OR_RETURN(absl::StatusOr<OptimizedFunctionGraphInfo>
optimized_function_graph_info_restored,
OptimizedFunctionGraphInfo::FromProto(
std::move(optimized_function_graph_proto)));
const absl::Duration cache_reading_duration =
absl::Now() - cache_reading_start_time;
VLOG(3) << "Finished reading Tensorflow optimized graph from cache; took "
<< absl::ToInt64Milliseconds(cache_reading_duration) << " msecs";
return optimized_function_graph_info_restored;
}
string GetFileCacheName(const string& dir_name, const string& function_name,
const FunctionDef* fdef) {
string plain_func_name = function_name;
if (absl::StrContains(function_name, "_")) {
std::vector<string> func_name_tokens = absl::StrSplit(function_name, '_');
func_name_tokens.pop_back();
plain_func_name = absl::StrJoin(func_name_tokens, "_");
}
return absl::StrCat(dir_name, "/", tsl::port::JobName(), "_",
tsl::port::TaskId(), "_", plain_func_name, "_",
fdef->node_def_size());
}
Status GetGraphAndArgRets(const string& function_name, AttrSlice attrs,
core::RefCountPtr<FunctionRecord>&& fdef,
const FunctionLibraryDefinition* lib_def,
std::unique_ptr<Graph>* graph,
std::vector<Node*>* arg_nodes,
std::vector<Node*>* ret_nodes,
std::vector<string>* ret_node_names,
DataTypeVector* ret_types,
std::vector<string>* control_ret_node_names) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), attrs, lib_def, &fbody));
if (!fbody) {
LOG(ERROR) << "Failed to get FunctionBody for \"" << function_name << "\"";
return errors::Internal("Failed to construct FunctionBody for ",
function_name);
}
*graph = std::unique_ptr<Graph>(fbody->graph);
arg_nodes->reserve(fbody->arg_nodes.size());
std::copy(fbody->arg_nodes.begin(), fbody->arg_nodes.end(),
std::back_inserter(*arg_nodes));
ret_nodes->reserve(fbody->ret_nodes.size());
std::copy(fbody->ret_nodes.begin(), fbody->ret_nodes.end(),
std::back_inserter(*ret_nodes));
fbody->graph = nullptr;
ret_node_names->reserve(fbody->ret_nodes.size());
for (const Node* node : fbody->ret_nodes) {
ret_node_names->push_back(node->name());
}
for (const auto& ret_type : fbody->ret_types) {
ret_types->push_back(ret_type);
}
control_ret_node_names->reserve(fbody->control_ret_nodes.size());
for (const Node* node : fbody->control_ret_nodes) {
control_ret_node_names->push_back(node->name());
}
return absl::OkStatus();
}
}
Status PinArgsAndRets(const std::vector<string>& input_devices,
const std::vector<string>& output_devices,
const DeviceSet& device_set,
const std::vector<Node*>& arg_nodes,
const std::vector<Node*>& ret_nodes,
const FunctionLibraryDefinition* lib_def,
Device* default_device) {
for (Node* node : arg_nodes) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
node->set_assigned_device_name(input_devices[index]);
VLOG(3) << "Setting device to " << input_devices[index] << " for node "
<< node->name();
}
for (Node* node : ret_nodes) {
if (output_devices.empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "T", &dtype));
VLOG(3) << "Trying to determine device for node " << node->name()
<< "[T=" << DataTypeString(dtype) << "]";
for (const auto& it : node->in_edges()) {
if (it->IsControlEdge()) continue;
Node* src_node = it->src();
const string* src_device = AssignedOrRequestedDeviceName(*src_node);
string colocation_group = "";
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
while (src_device->empty() && colocation_group.empty() &&
src_node->IsIdentity()) {
Node* input_node;
TF_RETURN_IF_ERROR(src_node->input_node(0, &input_node));
src_node = input_node;
src_device = AssignedOrRequestedDeviceName(*src_node);
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
}
const bool can_use_src_node_device =
!(dtype == DT_RESOURCE && IsFunctionCall(*lib_def, *src_node));
if (!colocation_group.empty()) {
AttrValue::ListValue colo_attr;
colo_attr.add_s(colocation_group);
std::vector<string> colo_slice = {colocation_group};
node->AddAttr(kColocationAttrName, colo_slice);
} else if (!src_device->empty() && can_use_src_node_device) {
if (dtype == DT_VARIANT && !src_node->IsArg()) {
continue;
}
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(*src_device, &parsed)) {
return errors::InvalidArgument(
"Failed to parse explicit device specification ", *src_device);
}
std::vector<Device*> matching_devices;
device_set.FindMatchingDevices(parsed, &matching_devices);
if (matching_devices.empty()) {
if (default_device != nullptr) {
matching_devices.push_back(default_device);
} else {
return errors::InvalidArgument(
"Unable to find any devices for spec ", *src_device);
}
} else if (matching_devices.size() != 1) {
bool on_same_task = true;
for (int i = 1; i < matching_devices.size(); ++i) {
if (!DeviceNameUtils::IsSameAddressSpace(
matching_devices.at(0)->parsed_name(),
matching_devices.at(i)->parsed_name())) {
on_same_task = false;
break;
}
}
if (on_same_task) {
continue;
}
if (default_device != nullptr) {
int colocated_on_default_device = 0;
for (int i = 0; i < matching_devices.size(); ++i) {
if (DeviceNameUtils::IsSameAddressSpace(
default_device->parsed_name(),
matching_devices.at(i)->parsed_name())) {
colocated_on_default_device++;
}
}
if (colocated_on_default_device == 1) {
continue;
}
}
string devices = "[";
for (Device* device : matching_devices) {
devices.append(device->name());
devices.append(", ");
}
if (devices.size() > 2) {
devices.resize(devices.size() - 2);
}
devices.append("]");
return errors::InvalidArgument(
*src_device,
"When FunctionLibraryRuntime::Options.output_devices are "
"not specified for a multi-device function, the device "
"specification on the output node must match exactly one "
"device. Matched devices are ",
devices);
}
VLOG(3) << "Setting output device to " << matching_devices[0]->name()
<< " for node " << SummarizeNode(*node);
node->set_assigned_device_name(matching_devices[0]->name());
} else if (!src_device->empty() && !can_use_src_node_device) {
VLOG(3) << "Did not set device for a resource output node "
<< SummarizeNode(*node);
}
}
} else {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
DCHECK_GT(output_devices.size(), index);
VLOG(3) << "Setting output device to " << output_devices[index]
<< " for return at index " << index;
node->set_assigned_device_name(output_devices[index]);
}
}
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> OptimizeFunctionGraph(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
OptimizedFunctionGraph::OptimizationSource optimization_source) {
const uint64_t graph_optimization_start_time_usecs = env->NowMicros();
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
core::RefCountPtr<FunctionRecord> fdef = lib_def->FindRecord(function_name);
if (fdef == nullptr) {
return errors::InvalidArgument("Failed to find function \"", function_name,
"\" in function library: ", lib_def);
}
TF_RETURN_IF_ERROR(ValidateMultiDeviceOptions(fdef->fdef(), options));
std::unique_ptr<Graph> graph;
std::vector<Node*> arg_nodes, ret_nodes;
std::vector<string> ret_node_names;
DataTypeVector ret_types;
std::vector<string> control_ret_node_names;
TF_RETURN_IF_ERROR(GetGraphAndArgRets(
function_name, attrs, fdef.GetNewRef(), lib_def, &graph, &arg_nodes,
&ret_nodes, &ret_node_names, &ret_types, &control_ret_node_names));
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
function_name, kDebugGroupOpStacktrace, "before_opt", graph.get());
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
FunctionLibraryDefinition reachable_lib_def =
lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
if (options.graph_collector != nullptr) {
options.graph_collector->CollectRawGraph(graph_def);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain, "initial",
graph.get(), &reachable_lib_def, false);
if (!options.xla_compile_device_type.empty()) {
for (Node* node : graph->op_nodes()) {
node->AddAttr("_xla_compile_device_type",
options.xla_compile_device_type);
if (default_device) {
node->set_assigned_device_name(default_device->name());
}
}
}
TF_RETURN_IF_ERROR(
SetArgShape(options.input_resource_dtypes_and_shapes, arg_nodes));
TF_RETURN_IF_ERROR(PinArgsAndRets(
options.input_devices, options.output_devices, dev_set, arg_nodes,
ret_nodes, lib_def,
options.config_proto.allow_soft_placement() ? default_device : nullptr));
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_bridge", graph.get(),
&reachable_lib_def, false);
graph->mutable_flib_def()->set_default_registry(&reachable_lib_def);
graph->mutable_flib_def()->Clear();
const bool should_run_optimization_passes = !options.is_component_function;
if (!should_run_optimization_passes) {
VLOG(1) << "Skipping function/graph optimization passes when instantiating "
"component function "
<< function_name;
}
std::unordered_map<string, string> node_name_to_control_ret;
bool control_rets_updated = false;
if (should_run_optimization_passes) {
FunctionOptimizationPass::FunctionOptions function_options{
options.xla_compile_device_type, options.allow_soft_placement};
TF_RETURN_IF_ERROR(FunctionOptimizationPassRegistry::Global().Run(
function_name, dev_set, options.config_proto, function_options, &graph,
&reachable_lib_def, &control_ret_node_names, &control_rets_updated));
}
if (control_rets_updated) {
for (const auto& control_ret : control_ret_node_names) {
node_name_to_control_ret.emplace(control_ret, control_ret);
}
} else {
for (const auto& control_ret : fdef->fdef().control_ret()) {
node_name_to_control_ret.emplace(control_ret.second, control_ret.first);
}
}
GraphOptimizationPassOptions optimization_options;
SessionOptions session_options;
session_options.env = env;
session_options.config = options.config_proto;
optimization_options.session_options = &session_options;
optimization_options.graph = &graph;
optimization_options.flib_def = &reachable_lib_def;
optimization_options.device_set = &dev_set;
optimization_options.is_function_graph = true;
optimization_options.composite_devices = &composite_devices;
optimization_options.default_function_device = default_device;
optimization_options.function_def = &fdef->fdef();
optimization_options.shape_inference_on_tfe_dialect_import =
options.shape_inference_on_tfe_dialect_import;
optimization_options.debug_filename_prefix = function_name;
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_pre_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, optimization_options));
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_placer", graph.get(),
&reachable_lib_def, false);
Placer placer(graph.get(), function_name, optimization_options.flib_def,
&dev_set, default_device,
options.config_proto.allow_soft_placement(),
options.config_proto.log_device_placement());
TF_RETURN_IF_ERROR(placer.Run(optimization_options));
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PLACEMENT, optimization_options));
}
if (options.optimize_graph_fn) {
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_graph_optimization", graph.get(),
&reachable_lib_def, false);
Status status = options.optimize_graph_fn(
std::move(ret_node_names), std::move(control_ret_node_names),
&reachable_lib_def, dev_set, cpu_device, &graph);
if (!status.ok()) {
LOG(WARNING) << "Ignoring multi-device function optimization failure: "
<< status;
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"after_graph_optimization", graph.get(),
&reachable_lib_def, false);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_rewrite_for_exec_passes",
graph.get(), &reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, optimization_options));
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"after_post_rewrite_for_exec_passes",
graph.get(), &reachable_lib_def, false);
graph->mutable_flib_def()->set_default_registry(nullptr);
graph->mutable_flib_def()->Clear();
FunctionLibraryDefinition pruned_lib_def =
reachable_lib_def.ReachableDefinitions(*graph);
return OptimizedFunctionGraphInfo(
function_name, std::move(graph), std::move(pruned_lib_def),
node_name_to_control_ret, ret_types, ret_nodes.size(),
env->NowMicros() - graph_optimization_start_time_usecs,
optimization_source);
}
absl::StatusOr<OptimizedFunctionGraphInfo>
OptimizeFunctionGraphOrReadFromFileCache(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
absl::Duration caching_threshold_duration) {
const string dir_name = absl::StrCat(getenv(kGraphCachingEnvVariableName));
if (dir_name.empty() || options.is_component_function) {
return OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env,
OptimizedFunctionGraph::JIT);
}
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
const FunctionDef* fdef = lib_def->Find(function_name);
if (fdef == nullptr) {
return absl::AbortedError(absl::StrCat(
"Failed to find function ", function_name,
" in function library: ", lib_def->ToProto().DebugString()));
}
const string file_name = GetFileCacheName(dir_name, function_name, fdef);
if (env->FileExists(file_name).ok()) {
LOG(INFO)
<< "TensorFlow graph cache existed; reading from cache; function name: "
<< function_name << ", full cache file path: " << file_name;
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info =
ReadFromCache(file_name, env);
if (optimized_function_graph_info.ok()) {
metrics::UpdateFunctionGraphOptimizationSavingTime(
optimized_function_graph_info->optimization_duration_usecs,
metrics::GraphOptimizationSource::kJit);
metrics::IncrementFunctionGraphOptimizationCacheHitCount(
1, metrics::GraphOptimizationSource::kJit);
LOG(INFO)
<< "Successfully restored the Tensorflow optimized graph from "
"the cache for the function: "
<< function_name << ", saved optimized time: "
<< absl::ToInt64Milliseconds(absl::Microseconds(
optimized_function_graph_info->optimization_duration_usecs))
<< " msecs";
return optimized_function_graph_info;
}
metrics::IncrementFunctionGraphOptimizationCacheFailureCount(
1, metrics::GraphOptimizationSource::kJit);
LOG(ERROR)
<< "Reading from Tensorflow graph optimization cache failed. Continue "
"to run the Tensorflow graph optimization passes instead. Error: "
<< optimized_function_graph_info.status();
return OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env,
OptimizedFunctionGraph::JIT);
}
metrics::IncrementFunctionGraphOptimizationCacheMissCount(
1, metrics::GraphOptimizationSource::kJit);
VLOG(3) << "No cache existed; run the optimization passes. function name:"
<< " " << function_name;
absl::Time optimization_start_time = absl::Now();
TF_ASSIGN_OR_RETURN(
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info,
OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env, OptimizedFunctionGraph::JIT));
const absl::Duration graph_optimization_duration =
absl::Now() - optimization_start_time;
VLOG(3) << "Finished running the optimization passes; took "
<< absl::ToInt64Seconds(graph_optimization_duration)
<< " secs; function name: " << function_name;
if (graph_optimization_duration >= caching_threshold_duration) {
LOG(INFO)
<< "Writing the optimized TensorFlow graph into cache: function name: "
<< function_name << ", full cache file path: " << file_name;
Status s = WriteToCache(dir_name, file_name,
optimized_function_graph_info.value(), env);
if (!s.ok()) {
LOG(ERROR) << "Caching the Tensorflow graph optimization results failed; "
"cotinue without caching. Error message: "
<< s;
}
LOG(INFO) << "Successfully wrote the optimized Tensorflow graph into cache "
"for the function: "
<< function_name << ", graph optimization time ( / threshold): "
<< absl::ToInt64Milliseconds(graph_optimization_duration)
<< " / (" << absl::ToInt64Milliseconds(caching_threshold_duration)
<< ") msecs";
}
return optimized_function_graph_info;
}
absl::StatusOr<
std::unique_ptr<std::unordered_map<string, std::unique_ptr<Graph>>>>
PreprocessAndPartitionGraph(
const std::string& function_name,
OptimizedFunctionGraphInfo& input_optimized_graph,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Env* env) {
std::unique_ptr<Graph>& graph = input_optimized_graph.function_graph;
TF_RETURN_IF_ERROR(ReplicatePerReplicaNodesInFunctionGraph(
options.composite_devices, graph.get()));
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
if (options.graph_collector != nullptr) {
GraphDef def;
graph->ToGraphDef(&def);
*def.mutable_library() = lib_def->ReachableDefinitions(def).ToProto();
options.graph_collector->CollectOptimizedGraph(def);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_partition", graph.get(),
&input_optimized_graph.lib_def, VLOG_IS_ON(4));
auto device_name_to_subgraphs =
std::make_unique<std::unordered_map<string, std::unique_ptr<Graph>>>();
TF_RETURN_IF_ERROR(PartitionFunctionGraph(dev_set, std::move(graph),
device_name_to_subgraphs.get()));
for (const auto& pair : *device_name_to_subgraphs) {
std::string partitioned_func_name =
absl::StrCat(function_name, "_partition_" + pair.first);
const auto* optimized_subgraph = pair.second.get();
DEBUG_DATA_DUMPER()->DumpGraph(
partitioned_func_name, kDebugGroupMain, "before_partition_passes",
optimized_subgraph, &input_optimized_graph.lib_def, false);
}
GraphOptimizationPassOptions optimization_options;
SessionOptions session_options;
session_options.env = env;
session_options.config = options.config_proto;
optimization_options.session_options = &session_options;
optimization_options.flib_def = &(input_optimized_graph.lib_def);
optimization_options.is_function_graph = true;
optimization_options.graph = nullptr;
optimization_options.device_set = nullptr;
optimization_options.partition_graphs = device_name_to_subgraphs.get();
optimization_options.debug_filename_prefix = function_name;
if (cpu_device && std::is_same<decltype(cpu_device), LocalDevice>::value &&
cpu_device->tensorflow_cpu_worker_threads() != nullptr) {
session_options.config.set_intra_op_parallelism_threads(
cpu_device->tensorflow_cpu_worker_threads()->num_threads);
}
const bool should_run_optimization_passes = !options.is_component_function;
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
}
for (const auto& pair : *device_name_to_subgraphs) {
std::string partitioned_func_name =
absl::StrCat(function_name, "_partition_" + pair.first);
const auto* optimized_subgraph = pair.second.get();
DEBUG_DATA_DUMPER()->DumpGraph(partitioned_func_name, kDebugGroupMain,
"after_partition_passes", optimized_subgraph,
&input_optimized_graph.lib_def, false);
}
return std::move(device_name_to_subgraphs);
}
} | #include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
constexpr absl::string_view kDevicePrefix = "/job:a/replica:0/task:0/device:";
void CreateCpuDeviceList(absl::string_view name_prefix, int num_devices,
std::vector<std::unique_ptr<Device>>& devices) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", num_devices});
TF_ASSERT_OK(
DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0", &devices));
}
void TestOptimizeFunctionGraphWithFunctionNotFound(bool load_from_cache) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global());
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 1, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info;
if (load_from_cache) {
optimized_function_graph_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), absl::ZeroDuration());
} else {
optimized_function_graph_info = OptimizeFunctionGraph(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), OptimizedFunctionGraph::AOT);
}
EXPECT_TRUE(absl::IsInvalidArgument(optimized_function_graph_info.status()))
<< "Actual status: " << optimized_function_graph_info.status();
EXPECT_TRUE(
absl::StrContains(optimized_function_graph_info.status().message(),
"Failed to find function"))
<< "Actual error message: "
<< optimized_function_graph_info.status().message();
}
TEST(OptimizeFunctionGraphTest,
OptimizeFunctionGraphReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(false);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphReturnsCorrectResult) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::FindDevice();
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(), proto);
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 3, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
const absl::StatusOr<OptimizedFunctionGraphInfo> aot_result =
OptimizeFunctionGraph("FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(),
devices[1].get(), Env::Default(),
OptimizedFunctionGraph::AOT);
TF_EXPECT_OK(aot_result.status());
EXPECT_EQ(aot_result->name, "FindDevice");
EXPECT_EQ(aot_result->num_return_nodes, 1);
EXPECT_THAT(aot_result->ret_types, ElementsAre(DT_STRING));
EXPECT_GT(aot_result->optimization_duration_usecs, 0);
EXPECT_EQ(aot_result->optimization_source, OptimizedFunctionGraph::AOT);
}
TEST(OptimizeFunctionGraphTest, ReloadFromCacheReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(true);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphAndWriteToCache) {
Env* env = Env::Default();
const string temp_dir = "/tmp/testing_cache_direcroty";
EXPECT_TRUE(env->RecursivelyCreateDir(temp_dir).ok());
setenv(kGraphCachingEnvVariableName, temp_dir.c_str(), 1);
std::vector<string> empty_file_list;
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::Hours(48));
TF_ASSERT_OK(optimized_info.status());
std::vector<string> file_list;
TF_ASSERT_OK(env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
optimized_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice_1234", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_GT(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
EXPECT_EQ(optimized_info->name, "FindDevice_1234");
EXPECT_EQ(optimized_info->num_return_nodes, 1);
EXPECT_THAT(optimized_info->ret_types, ElementsAre(DT_STRING));
int64_t undeleted_files;
int64_t undeleted_dirs;
TF_EXPECT_OK(
env->DeleteRecursively(temp_dir, &undeleted_files, &undeleted_dirs));
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "/*"), &empty_file_list));
ASSERT_TRUE(empty_file_list.empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_function_graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_function_graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6163921-3eb0-4635-9477-0c8012ec8a36 | cpp | tensorflow/tensorflow | shape_refiner | tensorflow/core/common_runtime/shape_refiner.cc | tensorflow/core/common_runtime/shape_refiner_test.cc | #include "tensorflow/core/common_runtime/shape_refiner.h"
#include <deque>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
ShapeRefiner::ShapeRefiner(int graph_def_version,
const OpRegistryInterface* ops)
: graph_def_version_(graph_def_version),
ops_registry_(ops),
graph_runner_(Env::Default()) {}
ShapeRefiner::ShapeRefiner(const VersionDef& versions,
const OpRegistryInterface* ops)
: ShapeRefiner(versions.producer(), ops) {}
ShapeRefiner::~ShapeRefiner() {
const_tensor_map_.clear();
}
namespace {
constexpr char kArgOp[] = "_Arg";
constexpr char kRetvalOp[] = "_Retval";
}
Status ShapeRefiner::InferShapesForFunctionSubNode(
const Node* node, InferenceContext* outer_context) {
TF_RETURN_IF_ERROR(AddNodeInternal(node, outer_context));
InferenceContext* node_context = CHECK_NOTNULL(GetContext(node));
if (StringPiece(node->type_string()) == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_inputs() <= index) {
return errors::Internal(
"Function instantiation included invalid input index: ", index,
" not in [0, ", outer_context->num_inputs(), ").");
}
if (outer_context->input(index).SameHandle(ShapeHandle())) {
VLOG(1) << "Function instantiation has undefined input shape at "
<< "index: " << index << " in the outer inference context.";
node_context->set_output(0, node_context->UnknownShape());
} else {
node_context->set_output(0, outer_context->input(index));
}
auto* resource = outer_context->input_handle_shapes_and_types(index);
if (resource) {
node_context->set_output_handle_shapes_and_types(0, *resource);
}
} else if (StringPiece(node->type_string()) == kRetvalOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index));
if (index < 0 || outer_context->num_outputs() <= index) {
return errors::Internal(
"Function instantiation included invalid output index: ", index,
" not in [0, ", outer_context->num_outputs(), ").");
}
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(node_context->input(0), &proto);
TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle));
outer_context->set_output(index, handle);
const std::vector<ShapeAndType>* resource =
node_context->input_handle_shapes_and_types(0);
if (resource) {
std::vector<ShapeAndType> copied_shapes_and_types;
for (auto& shape_and_type : *resource) {
ShapeHandle handle;
TensorShapeProto proto;
node_context->ShapeHandleToProto(shape_and_type.shape, &proto);
TF_RETURN_IF_ERROR(
outer_context->MakeShapeFromShapeProto(proto, &handle));
copied_shapes_and_types.push_back(
ShapeAndType(handle, shape_and_type.dtype, shape_and_type.type));
}
outer_context->set_output_handle_shapes_and_types(
index, copied_shapes_and_types);
}
}
return absl::OkStatus();
}
Status ShapeRefiner::InferShapesForFunction(const FunctionDef* function_def,
AttrSlice attributes,
InferenceContext* outer_context) {
const Graph* graph;
const string& fname = function_def->signature().name();
auto it = functions_.find(fname);
if (it != functions_.end()) {
graph = it->second.get();
} else {
InstantiationResult result;
TF_RETURN_IF_ERROR(InstantiateFunction(
*function_def, attributes,
[this](const string& op, const OpDef** sig) {
return this->function_library_->LookUpOpDef(op, sig);
},
&result));
Graph* new_graph = new Graph(function_library_);
GraphConstructorOptions options;
options.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
ConvertNodeDefsToGraph(options, result.nodes, new_graph));
functions_[fname].reset(new_graph);
graph = new_graph;
}
absl::flat_hash_set<const Node*> function_nodes;
Status inference_status = absl::OkStatus();
{
auto node_shape_inference_lambda = [this, &outer_context, &function_nodes,
&inference_status](const Node* node) {
if (!inference_status.ok()) return;
inference_status = InferShapesForFunctionSubNode(node, outer_context);
function_nodes.insert(node);
};
ReverseDFS(*graph, {}, node_shape_inference_lambda);
}
for (const Node* node : function_nodes) {
node_to_context_.erase(node);
}
return inference_status;
}
Status ShapeRefiner::AddNode(const Node* node) {
return AddNodeInternal(node, nullptr);
}
Status ShapeRefiner::AddNodeInternal(
const Node* node, shape_inference::InferenceContext* outer_context) {
std::unique_ptr<InferenceContext> ic(new InferenceContext(
graph_def_version_, node->def(), node->op_def(),
std::vector<ShapeHandle>(node->num_inputs()), {}, {}, {}));
TF_RETURN_IF_ERROR(ic->construction_status());
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) continue;
if (e->dst_input() < 0) {
return tensorflow::errors::Internal(
"Index ", e->dst_input(), " is negative but not a control edge.");
}
const Node* input = e->src();
auto it = node_to_context_.find(input);
if (it == node_to_context_.end()) {
ic->SetInput(e->dst_input(), ic->UnknownShape());
continue;
}
InferenceContext* input_ic = it->second.get();
ic->SetInput(e->dst_input(), input_ic->output(e->src_output()));
const auto* in_v =
input_ic->output_handle_shapes_and_types(e->src_output());
if (in_v != nullptr) {
DataType input_type = e->src()->output_type(e->src_output());
DCHECK(input_type == DT_RESOURCE || input_type == DT_VARIANT);
ic->set_input_handle_shapes_and_types(e->dst_input(),
std::vector<ShapeAndType>(*in_v));
}
}
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(ops_registry_->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->shape_inference_fn == nullptr &&
require_shape_inference_fns_) {
return errors::InvalidArgument(
"No shape inference function exists for op '", node->type_string(),
"', did you forget to define it?");
}
TF_RETURN_IF_ERROR(RunShapeFn(node, op_reg_data, ic.get(), outer_context));
node_to_context_[node].swap(ic);
return absl::OkStatus();
}
Status ShapeRefiner::SetShape(const Node* node, int output_port,
ShapeHandle shape) {
auto c = GetContext(node);
if (c == nullptr) {
return errors::Internal("Could not find context for ", node->name());
}
if (output_port < 0 || output_port >= node->num_outputs()) {
return errors::InvalidArgument(
"output_port '", output_port, "' is out of range, ", "node '",
node->name(), "' has ", node->num_outputs(), " outputs");
}
if (node->num_outputs() > c->num_outputs()) {
TF_RETURN_IF_ERROR(c->ExpandOutputs(node->num_outputs()));
}
ShapeHandle existing_shape = c->output(output_port);
TF_RETURN_IF_ERROR(c->Merge(existing_shape, shape, &shape));
c->set_output(output_port, shape);
return absl::OkStatus();
}
Status ShapeRefiner::UpdateNode(const Node* node, bool relax, bool* refined) {
auto it = node_to_context_.find(node);
if (it == node_to_context_.end()) {
*refined = true;
return AddNode(node);
}
InferenceContext* node_context = it->second.get();
TF_RETURN_IF_ERROR(node_context->construction_status());
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) continue;
int dst_input = e->dst_input();
int src_output = e->src_output();
Node* input = e->src();
auto iter = node_to_context_.find(input);
if (iter == node_to_context_.end()) {
return errors::FailedPrecondition(
"Input ", dst_input, " ('", input->name(), "') for '", node->name(),
"' was not previously added to ShapeRefiner.");
}
InferenceContext* c = iter->second.get();
DCHECK_GE(dst_input, 0);
ShapeHandle existing_input = node_context->input(dst_input);
if (!relax) {
if (node_context->MergeInput(dst_input, c->output(src_output))) {
if (!SameDefinedShape(node_context, node_context->input(dst_input),
existing_input)) {
*refined = true;
}
}
} else {
if (node_context->RelaxInput(dst_input, c->output(src_output))) {
if (!SameDefinedShape(node_context, node_context->input(dst_input),
existing_input)) {
*refined = true;
}
}
}
if (node_context->requested_input_tensor_as_partial_shape(dst_input)) {
*refined = true;
}
if (e->src()->output_type(src_output) == DT_RESOURCE) {
auto* outputs = c->output_handle_shapes_and_types(src_output);
if (!outputs) continue;
if (!relax &&
node_context->MergeInputHandleShapesAndTypes(dst_input, *outputs)) {
*refined = true;
} else if (relax) {
std::vector<ShapeAndType> existing_inputs;
const std::vector<ShapeAndType>* inputs =
node_context->input_handle_shapes_and_types(dst_input);
if (inputs) {
existing_inputs = *inputs;
}
if (node_context->RelaxInputHandleShapesAndMergeTypes(dst_input,
*outputs)) {
if (IsUpdatedShapesOrTypes(
node_context, existing_inputs,
*node_context->input_handle_shapes_and_types(dst_input))) {
*refined = true;
}
}
}
}
}
if (!*refined) {
return absl::OkStatus();
}
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(ops_registry_->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->shape_inference_fn == nullptr &&
require_shape_inference_fns_) {
return errors::InvalidArgument(
"No shape inference function exists for op '", node->type_string(),
"', did you forget to define it?");
}
if (!op_reg_data->shape_inference_fn) {
return absl::OkStatus();
}
return RunShapeFn(node, op_reg_data, node_context);
}
Status ShapeRefiner::EvaluateConstantTensorForEdge(
const Node* node, int dst_idx, bool* evaluated, Tensor* result,
InferenceContext* outer_context) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(node->input_edge(dst_idx, &input_edge));
const Node& src = *input_edge->src();
const int src_output = input_edge->src_output();
auto lookup = [&](const Node& node, int index) -> std::optional<Tensor> {
if (node.IsArg() && outer_context != nullptr) {
int index;
if (GetNodeAttr(node.def(), "index", &index).ok() && 0 <= index &&
index < outer_context->num_inputs()) {
const auto* tensor = outer_context->input_tensor(index);
outer_context->request_input_tensor(index);
if (tensor != nullptr) {
return *tensor;
}
}
}
auto it = const_tensor_map_.find({node.id(), index});
if (it != const_tensor_map_.end()) {
return it->second;
}
return std::optional<Tensor>();
};
std::optional<EvaluateConstantTensorRunner> runner;
if (!disable_constant_propagation_) {
runner = EvaluateConstantTensorRunner{
ops_registry_,
graph_def_version_,
&graph_runner_,
};
}
TF_ASSIGN_OR_RETURN(auto tensor, EvaluateConstantTensor(
src, src_output, *this, lookup, runner));
*evaluated = tensor.has_value();
if (tensor.has_value()) {
if (tensor->TotalBytes() <= kMaxTensorSize) {
const_tensor_map_.emplace(std::make_pair(src.id(), src_output), *tensor);
}
*result = *std::move(tensor);
}
return absl::OkStatus();
}
Status ShapeRefiner::EvaluateConstantIntScalarEdge(
const Node* node, int dst_idx, bool* evaluated, int64_t* result,
shape_inference::InferenceContext* outer_context) {
Tensor scalar;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, evaluated,
&scalar, outer_context));
if (*evaluated) {
if (scalar.NumElements() != 1) {
return errors::InvalidArgument(
"EvaluateConstantIntScalarEdge called on non-scalar edge: ",
scalar.NumElements());
}
if (scalar.dtype() == DT_INT32) {
*result = scalar.scalar<int32>()();
} else {
if (scalar.dtype() != DT_INT64) {
return errors::InvalidArgument(
"EvaluateConstantIntScalarEdge called on non-integer edge: ",
scalar.dtype());
}
*result = scalar.scalar<int64_t>()();
}
}
return absl::OkStatus();
}
Status ShapeRefiner::ConstantPartialShape(
InferenceContext* target_context, const Node* node, int dst_idx,
ShapeHandle* result, shape_inference::InferenceContext* outer_context) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(node->input_edge(dst_idx, &input_edge));
InferenceContext* src_context = GetContext(input_edge->src());
if (src_context == nullptr) return errors::Internal("Missing src context");
ShapeHandle src_shape = src_context->output(input_edge->src_output());
if (src_context->Value(src_context->Rank(src_shape)) == 0) {
Tensor t;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, &evaluated,
&t, outer_context));
if (!evaluated) {
return errors::InvalidArgument(
"Received a shape scalar with unknown static value. A static value "
"of '-1' is required to represent an unknown shape.");
}
if (t.dims() == 0) {
if (t.dtype() == DT_INT32 && t.scalar<int32>()() == -1) {
*result = target_context->UnknownShape();
return absl::OkStatus();
} else if (t.dtype() == DT_INT64 && t.scalar<int64_t>()() == -1) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
}
return errors::InvalidArgument(
"Received an invalid shape scalar with a static value that is not "
"'-1': ",
t.DebugString());
}
TF_RETURN_IF_ERROR(src_context->WithRank(src_shape, 1, &src_shape));
const string& src_op = input_edge->src()->type_string();
if (src_context->Value(src_context->Dim(src_shape, 0)) == 0) {
*result = target_context->Scalar();
} else if (src_op == "Cast") {
Tensor t;
bool evaluated = false;
if (EvaluateConstantTensorForEdge(node, dst_idx, &evaluated, &t,
outer_context)
.ok()) {
if (evaluated &&
target_context->MakeShapeFromTensor(&t, src_shape, result).ok()) {
return absl::OkStatus();
}
}
ShapeHandle pre_cast_shape;
if (!ConstantPartialShape(target_context, input_edge->src(), 0,
&pre_cast_shape, outer_context)
.ok()) {
TF_RETURN_IF_ERROR(
target_context->MakeShapeFromTensor(nullptr, src_shape, result));
}
if (!target_context->RankKnown(pre_cast_shape)) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
auto* dest_type = input_edge->src()->attrs().Find("DstT");
if (dest_type == nullptr || dest_type->value_case() != AttrValue::kType ||
(dest_type->type() != DT_INT32 && dest_type->type() != DT_INT64)) {
*result = target_context->MakeShape(std::vector<DimensionHandle>(
target_context->Rank(pre_cast_shape), target_context->UnknownDim()));
return absl::OkStatus();
}
*result = pre_cast_shape;
} else if (src_op == "Shape") {
*result = src_context->input(0);
} else if (src_op == "ShapeN") {
*result = src_context->input(input_edge->src_output());
} else if (src_op == "Pack") {
std::vector<DimensionHandle> dims;
for (int i = 0; i < src_context->num_inputs(); ++i) {
int64_t size;
bool evaluated;
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(
input_edge->src(), i, &evaluated, &size, outer_context));
if (evaluated) {
dims.push_back(size < 0 ? target_context->UnknownDim()
: target_context->MakeDim(size));
} else {
dims.push_back(target_context->UnknownDim());
}
}
*result = target_context->MakeShape(dims);
} else if (src_op == "Concat" || src_op == "ConcatV2") {
*result = target_context->Scalar();
const int concat_dim =
src_op == "Concat" ? 0 : src_context->num_inputs() - 1;
for (int i = 0; i < src_context->num_inputs(); ++i) {
if (i == concat_dim) continue;
ShapeHandle sub_result;
TF_RETURN_IF_ERROR(ConstantPartialShape(target_context, input_edge->src(),
i, &sub_result, outer_context));
if (!target_context->RankKnown(sub_result)) {
*result = target_context->UnknownShape();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
target_context->Concatenate(*result, sub_result, result));
}
} else if (src_op == "StridedSlice") {
TF_RETURN_IF_ERROR(PartialStridedSliceShape(input_edge->src(), src_context,
result, outer_context));
} else if (src_op == "VariableShape") {
auto* handle_data = src_context->input_handle_shapes_and_types(0);
if (handle_data != nullptr && !handle_data->empty()) {
*result = handle_data->at(0).shape;
} else {
*result = target_context->UnknownShape();
}
} else {
Tensor t;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(node, dst_idx, &evaluated,
&t, outer_context));
TF_RETURN_IF_ERROR(target_context->MakeShapeFromTensor(
evaluated ? &t : nullptr, src_shape, result));
}
return absl::OkStatus();
}
Status ShapeRefiner::PartialStridedSliceShape(
Node* slice_node, InferenceContext* ctx, ShapeHandle* result,
shape_inference::InferenceContext* outer_context) {
for (int i = 1; i <= 3; ++i) {
ShapeHandle input_shape = ctx->input(i);
if (ctx->Value(ctx->Dim(input_shape, 0)) != 1) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask;
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "begin_mask", &begin_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(slice_node->attrs(), "end_mask", &end_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "ellipsis_mask", &ellipsis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "new_axis_mask", &new_axis_mask));
TF_RETURN_IF_ERROR(
GetNodeAttr(slice_node->attrs(), "shrink_axis_mask", &shrink_axis_mask));
if (!(begin_mask == 0 || begin_mask == 1) ||
!(end_mask == 0 || end_mask == 1) || ellipsis_mask != 0 ||
new_axis_mask != 0 || shrink_axis_mask != 0) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
bool evaluated;
int64_t begin;
if (begin_mask == 1) {
begin = 0;
} else {
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 1, &evaluated,
&begin, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int64_t end;
if (end_mask == 1) {
end = std::numeric_limits<int64_t>::max();
} else {
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 2, &evaluated,
&end, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
}
int64_t stride;
TF_RETURN_IF_ERROR(EvaluateConstantIntScalarEdge(slice_node, 3, &evaluated,
&stride, outer_context));
if (!evaluated) {
*result = ctx->UnknownShape();
return absl::OkStatus();
}
ShapeHandle input;
TF_RETURN_IF_ERROR(
ConstantPartialShape(ctx, slice_node, 0, &input, outer_context));
TF_RETURN_IF_ERROR(ctx->Subshape(input, begin, end, stride, result));
return absl::OkStatus();
}
Status ShapeRefiner::RunShapeFn(const Node* node,
const OpRegistrationData* op_reg_data,
InferenceContext* c,
InferenceContext* outer_context) {
std::vector<const Tensor*> input_tensors(node->num_inputs(), nullptr);
std::vector<Tensor> real_tensors(node->num_inputs());
std::vector<bool> attempted_materialization(node->num_inputs());
std::vector<bool> attempted_tensor_as_shape_conversion(node->num_inputs());
std::vector<ShapeHandle> input_tensors_as_shapes;
c->set_input_tensors(input_tensors);
c->set_input_tensors_as_shapes(input_tensors_as_shapes);
auto run_inference_lambda = [&]() {
if (function_library_ && IsFunctionCall(*function_library_, *node)) {
bool disable_shape_inference;
if (!GetNodeAttr(AttrSlice(node->def()), "_disable_call_shape_inference",
&disable_shape_inference)
.ok() ||
!disable_shape_inference) {
NameAttrList function;
TF_RETURN_IF_ERROR(
NameAndAttrsFromFunctionCall(node->def(), &function));
const FunctionDef* function_def =
function_library_->Find(function.name());
if (function_def != nullptr) {
auto const_tensor_map_copy = const_tensor_map_;
const_tensor_map_.clear();
VLOG(4) << "Running shape inference for function \""
<< function.name() << "\".";
Status function_inference_status = InferShapesForFunction(
function_def, AttrSlice(&function.attr()), c);
const_tensor_map_ = const_tensor_map_copy;
VLOG(4) << "Shape inference for function \"" << function.name()
<< "\" returned status " << function_inference_status << ".";
return function_inference_status;
}
}
}
if (op_reg_data->shape_inference_fn) {
VLOG(4) << "Running shape inference function for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
TF_RETURN_IF_ERROR(c->Run(op_reg_data->shape_inference_fn));
} else {
VLOG(4) << "Unknown shape inference function for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
TF_RETURN_IF_ERROR(c->Run(shape_inference::UnknownShape));
}
VLOG(4) << "Shape inference passed for node \"" << node->name()
<< "\" of type \"" << node->type_string() << "\".";
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(run_inference_lambda());
bool rerun_shape_fn;
do {
rerun_shape_fn = false;
for (int i = 0; i < c->num_inputs(); ++i) {
if (!c->requested_input_tensor(i)) {
continue;
}
if (!attempted_materialization[i]) {
attempted_materialization[i] = true;
Tensor result;
bool evaluated = false;
TF_RETURN_IF_ERROR(EvaluateConstantTensorForEdge(
node, i, &evaluated, &result, outer_context));
if (evaluated) {
real_tensors[i] = result;
input_tensors[i] = &real_tensors[i];
rerun_shape_fn = true;
}
}
if (c->requested_input_tensor_as_partial_shape(i) &&
!attempted_tensor_as_shape_conversion[i]) {
attempted_tensor_as_shape_conversion[i] = true;
if (i >= input_tensors_as_shapes.size()) {
input_tensors_as_shapes.resize(i + 1);
}
ShapeHandle s;
TF_RETURN_IF_ERROR(ConstantPartialShape(c, node, i, &s, outer_context));
input_tensors_as_shapes[i] = s;
rerun_shape_fn = true;
}
}
if (rerun_shape_fn) {
c->set_input_tensors(input_tensors);
c->set_input_tensors_as_shapes(input_tensors_as_shapes);
TF_RETURN_IF_ERROR(run_inference_lambda());
}
} while (rerun_shape_fn);
return absl::OkStatus();
}
bool ShapeRefiner::SameDefinedShape(InferenceContext* c, ShapeHandle s0,
ShapeHandle s1) {
if (s0.SameHandle(s1)) {
return true;
}
if (c->Rank(s0) != c->Rank(s1)) {
return false;
}
if (!c->RankKnown(s0) && !c->RankKnown(s1)) {
return false;
}
for (int i = 0; i < c->Rank(s0); ++i) {
if (!c->Dim(s0, i).SameHandle(c->Dim(s1, i))) {
int64_t val0 = c->Value(c->Dim(s0, i));
int64_t val1 = c->Value(c->Dim(s1, i));
if (val0 < 0 || val1 < 0 || val0 != val1) {
return false;
}
}
}
return true;
}
bool ShapeRefiner::IsUpdatedShapesOrTypes(
InferenceContext* c, const std::vector<ShapeAndType>& existing,
const std::vector<ShapeAndType>& updated) {
if (existing.size() != updated.size()) {
return true;
}
for (int i = 0; i < existing.size(); i++) {
if (!SameDefinedShape(c, existing[i].shape, updated[i].shape) ||
existing[i].dtype != updated[i].dtype) {
return true;
}
}
return false;
}
} | #include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class ShapeRefinerTest : public ::testing::Test {
protected:
bool SameHandle(shape_inference::DimensionHandle a,
shape_inference::DimensionHandle b) {
return a.SameHandle(b);
}
bool SameHandle(shape_inference::ShapeHandle a,
shape_inference::ShapeHandle b) {
return a.SameHandle(b);
}
bool SameDefinedShape(shape_inference::InferenceContext* c,
shape_inference::ShapeHandle s0,
shape_inference::ShapeHandle s1) {
return ShapeRefiner::SameDefinedShape(c, s0, s1);
}
bool IsUpdatedShapesOrTypes(
shape_inference::InferenceContext* c,
const std::vector<shape_inference::ShapeAndType>& existing,
const std::vector<shape_inference::ShapeAndType>& updated) {
return ShapeRefiner::IsUpdatedShapesOrTypes(c, existing, updated);
}
static constexpr int64_t kMaxTensorSize = ShapeRefiner::kMaxTensorSize;
void TestStridedSlice(const PartialTensorShape& input_shape, int begin,
int end, int stride, const char* expected,
int begin_mask = 0, int end_mask = 0,
int ellipsis_mask = 0, int shrink_axis_mask = 0,
StringPiece test_op = "TensorAsShapeInt32") {
Scope root = Scope::DisabledShapeInferenceScope();
auto placeholder =
ops::Placeholder(root, DT_INT32, ops::Placeholder::Shape(input_shape));
auto input = ops::Shape(root, placeholder);
auto begin_op = ops::Const(root, {begin});
auto end_op = ops::Const(root, {end});
auto stride_op = ops::Const(root, {stride});
auto slice = ops::StridedSlice(root, input, begin_op, end_op, stride_op,
ops::StridedSlice::BeginMask(begin_mask)
.EndMask(end_mask)
.EllipsisMask(ellipsis_mask)
.ShrinkAxisMask(shrink_axis_mask));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", test_op)
.Input(slice.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(placeholder.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(begin_op.node()));
TF_ASSERT_OK(m.AddNode(end_op.node()));
TF_ASSERT_OK(m.AddNode(stride_op.node()));
TF_ASSERT_OK(m.AddNode(slice.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ(ctx->DebugString(ctx->output(0)), expected);
}
};
namespace {
#define EXPECT_SHAPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
EXPECT_EQ(EXPECTED, ctx->DebugString(ctx->output(IDX))); \
} while (0);
#define EXPECT_RESOURCE_SINGLE_SHAPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
auto* v = ctx->output_handle_shapes_and_types(IDX); \
EXPECT_NE(v, nullptr); \
EXPECT_EQ(v->size(), 1); \
EXPECT_EQ(EXPECTED, ctx->DebugString((*v)[0].shape)); \
} while (0);
#define EXPECT_RESOURCE_SINGLE_TYPE(EXPECTED, M, OP, IDX) \
do { \
shape_inference::InferenceContext* ctx = M.GetContext(OP.node()); \
auto* v = ctx->output_handle_shapes_and_types(IDX); \
EXPECT_NE(v, nullptr); \
EXPECT_EQ(v->size(), 1); \
EXPECT_EQ(EXPECTED, (*v)[0].dtype); \
} while (0);
TEST_F(ShapeRefinerTest, Constant) {
Scope root = Scope::NewRootScope();
auto c = ops::Const(root, 42.0f);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(c.node()));
EXPECT_SHAPE("[]", m, c, 0);
}
TEST_F(ShapeRefinerTest, MatMul) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, {{1.0f}, {2.0f}});
auto b = ops::Const(root, {{1.0f, 2.0f}});
auto mm = ops::MatMul(root, a, b);
TF_ASSERT_OK(m.AddNode(a.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
TF_ASSERT_OK(m.AddNode(mm.node()));
EXPECT_SHAPE("[2,1]", m, a, 0);
EXPECT_SHAPE("[1,2]", m, b, 0);
EXPECT_SHAPE("[2,2]", m, mm, 0);
}
TEST_F(ShapeRefinerTest, BadShapes) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, {{1.0f}, {2.0f}});
auto b = ops::Const(root, {{1.0f}, {2.0f}});
auto mm = ops::MatMul(root, a, b);
TF_ASSERT_OK(m.AddNode(a.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
Status s = m.AddNode(mm.node());
ASSERT_FALSE(s.ok());
ASSERT_TRUE(absl::StrContains(s.message(),
"Dimensions must be equal, but are 1 and 2"));
}
TEST_F(ShapeRefinerTest, SetShape) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Scope root = Scope::NewRootScope();
auto a = ops::Placeholder(root, DT_FLOAT);
TF_ASSERT_OK(m.AddNode(a.node()));
auto ic = m.GetContext(a.node());
ASSERT_NE(nullptr, ic);
shape_inference::ShapeHandle h = ic->MakeShape({2, ic->UnknownDim()});
TF_ASSERT_OK(m.SetShape(a.node(), 0, h));
EXPECT_SHAPE("[2,?]", m, a, 0);
shape_inference::ShapeHandle h2 = ic->MakeShape({ic->UnknownDim(), 2});
TF_ASSERT_OK(m.SetShape(a.node(), 0, h2));
EXPECT_SHAPE("[2,2]", m, a, 0);
ASSERT_FALSE(m.SetShape(a.node(), 1, h).ok());
ASSERT_FALSE(m.SetShape(a.node(), -1, h).ok());
auto b = ops::Const(root, {{1.0f}, {2.0f}});
ASSERT_FALSE(m.SetShape(b.node(), 0, h).ok());
h = ic->MakeShape({3, ic->UnknownDim()});
ASSERT_FALSE(m.SetShape(a.node(), 0, h).ok());
}
namespace {
REGISTER_OP("TestOpWithNoShapeFn").Input("a: int32").Output("o: int32");
}
TEST_F(ShapeRefinerTest, MissingShapeInferenceFns) {
Scope root = Scope::NewRootScope();
auto a = ops::Const(root, 42);
Node* b;
TF_ASSERT_OK(NodeBuilder("b", "TestOpWithNoShapeFn")
.Input(a.node())
.Finalize(root.graph(), &b));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(a.node()));
EXPECT_FALSE(m.AddNode(b).ok());
m.set_require_shape_inference_fns(false);
TF_EXPECT_OK(m.AddNode(b));
}
TEST_F(ShapeRefinerTest, PropagateConstants) {
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Variable(root, {}, DT_INT32);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[?]", m, am, 0);
}
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Const(root, 1);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[3]", m, am, 0);
}
{
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto dim = ops::Const(root, 0);
auto am = ops::ArgMax(root, input, dim);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(dim.node()));
TF_ASSERT_OK(m.AddNode(am.node()));
EXPECT_SHAPE("[2]", m, am, 0);
}
}
TEST_F(ShapeRefinerTest, ExtractConstantSubgraphMultiOutput) {
{
Scope root = Scope::NewRootScope();
auto small = ops::Const(root, {static_cast<int32>(1), TensorShape({1, 1})});
auto large = ops::Const(
root, {static_cast<int32>(2), TensorShape({4, kMaxTensorSize / 2})});
Node* multi;
TF_ASSERT_OK(NodeBuilder("MI", "MultiIdentity")
.Input(std::vector<NodeBuilder::NodeOut>{small.node(),
large.node()})
.Attr("N", 2)
.Finalize(root.graph(), &multi));
Node* shape_v;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeVectorForAllElements")
.Input(multi, 0)
.Finalize(root.graph(), &shape_v));
auto add = ops::Add(root, Output(multi, 0), Output(multi, 1));
Node* shape_v2;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeVectorForAllElements")
.Input(add.node())
.Finalize(root.graph(), &shape_v2));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(small.node()));
TF_ASSERT_OK(m.AddNode(large.node()));
TF_ASSERT_OK(m.AddNode(multi));
TF_ASSERT_OK(m.AddNode(shape_v));
TF_ASSERT_OK(m.AddNode(add.node()));
TF_ASSERT_OK(m.AddNode(shape_v2));
shape_inference::InferenceContext* ctx = m.GetContext(shape_v2);
EXPECT_EQ(strings::StrCat("[", kMaxTensorSize * 2 * 3, "]"),
ctx->DebugString(ctx->output(0)));
}
}
namespace {
REGISTER_OP("TestOp")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->input_tensor(0)) {
if (c->input_tensor(1)) {
c->set_output(0, c->Matrix(10, 10));
return absl::OkStatus();
}
return shape_inference::ScalarShape(c);
}
return shape_inference::UnknownShape(c);
});
}
TEST_F(ShapeRefinerTest, InputTensorDependencies) {
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
Graph graph(OpRegistry::Global());
Node* node;
Tensor a(DT_FLOAT, TensorShape({}));
a.scalar<float>()() = 1.0;
Tensor b(DT_FLOAT, TensorShape({}));
b.scalar<float>()() = 2.0;
Node* input_a = test::graph::Constant(&graph, a);
Node* input_b = test::graph::Constant(&graph, b);
TF_ASSERT_OK(NodeBuilder("Test", "TestOp")
.Input(input_a)
.Input(input_b)
.Finalize(&graph, &node));
TF_ASSERT_OK(m.AddNode(input_a));
TF_ASSERT_OK(m.AddNode(input_b));
TF_ASSERT_OK(m.AddNode(node));
shape_inference::InferenceContext* ctx = m.GetContext(node);
EXPECT_EQ("[10,10]", ctx->DebugString(ctx->output(0)));
}
namespace {
REGISTER_OP("ShapeData")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
std::vector<shape_inference::DimensionHandle> dims;
dims.reserve(shape_data->NumElements());
for (int i = 0; i < shape_data->NumElements(); ++i) {
dims.emplace_back(c->MakeDim(shape_data->flat<int32>()(i)));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("ShapeDataInt64")
.Input("a: int64")
.Output("o: int64")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
std::vector<shape_inference::DimensionHandle> dims;
dims.reserve(shape_data->NumElements());
for (int i = 0; i < shape_data->NumElements(); ++i) {
dims.emplace_back(c->MakeDim(shape_data->flat<int64_t>()(i)));
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("ShapeVectorForAllElements")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn([](shape_inference::InferenceContext* c) {
const Tensor* shape_data = c->input_tensor(0);
if (shape_data == nullptr) {
return shape_inference::UnknownShape(c);
}
int64_t total = 0;
for (int i = 0; i < shape_data->NumElements(); ++i) {
total += shape_data->flat<int32>()(i);
}
c->set_output(0, c->Vector(total));
return absl::OkStatus();
});
REGISTER_OP("MultiIdentity")
.Input("a: N * int32")
.Output("o: N * int32")
.Attr("N: int >= 1")
.SetShapeFn([](shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_inputs(); ++i) {
c->set_output(i, c->input(i));
}
return absl::OkStatus();
});
class MultiIdentity : public OpKernel {
public:
explicit MultiIdentity(OpKernelConstruction* c) : OpKernel(c) {}
void Compute(OpKernelContext* c) override {
for (int i = 0; i < c->num_inputs(); ++i) {
c->set_output(i, c->input(i));
}
}
};
REGISTER_KERNEL_BUILDER(Name("MultiIdentity").Device(DEVICE_CPU),
MultiIdentity);
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {2, 4}, DT_INT32);
auto shape = ops::Shape(root, input);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[4]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContentInt64) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root, {2, 4, static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT64);
auto attrs = ops::Shape::OutType(DT_INT64);
auto shape = ops::Shape(root, input, attrs);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeDataInt64")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[4]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateShapeAcrossTensorContentInt32Overflow) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root, {2, 4, static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT32);
auto shape = ops::Shape(root, input);
auto ones = ops::Const(root, {1});
auto sliced = ops::Slice(root, shape, ones, ones);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(sliced.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(ones.node()));
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(sliced.node()));
EXPECT_FALSE(m.AddNode(shape_data).ok());
}
TEST_F(ShapeRefinerTest, PropagateRankAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {2, 4, 3}, DT_INT32);
auto rank = ops::Rank(root, input);
auto identity = ops::Identity(root, rank);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(rank.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[3]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContent) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(root, {1, 2, 3, 4, 5}, DT_INT32);
auto size = ops::Size(root, input);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[120]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContentInt64) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root,
{1, 2, 3, 4, 5,
static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT64);
auto attrs = ops::Size::OutType(DT_INT64);
auto size = ops::Size(root, input, attrs);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeDataInt64")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[515396075280]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSizeAcrossTensorContentInt32Overflow) {
Scope root = Scope::NewRootScope();
auto input = ops::Variable(
root,
{1, 2, 3, 4, 5,
static_cast<int64_t>(std::numeric_limits<int32>::max()) * 2},
DT_INT32);
auto size = ops::Size(root, input);
auto identity = ops::Identity(root, size);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(identity.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(identity.node()));
EXPECT_FALSE(m.AddNode(shape_data).ok());
}
TEST_F(ShapeRefinerTest, PropagateShape) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto shape = ops::Shape(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(shape.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[3,2]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateSize) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto size = ops::Size(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(size.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(size.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[6]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateRank) {
Scope root = Scope::NewRootScope();
auto input = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
auto rank = ops::Rank(root, input);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(rank.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(rank.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[2]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, PropagateRange) {
Scope root = Scope::NewRootScope();
auto begin = ops::Const(root, 1);
auto limit = ops::Const(root, 11);
auto delta = ops::Const(root, 3);
auto range = ops::Range(root, begin, limit, delta);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(delta.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,4,7,10]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, NoPropagatePlaceholderWithDefault) {
Scope root = Scope::NewRootScope();
auto constant = ops::Const<int>(root, 2);
auto placeholder =
ops::PlaceholderWithDefault(root, constant, PartialTensorShape());
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(placeholder.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(constant.node()));
TF_ASSERT_OK(m.AddNode(placeholder.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ic = m.GetContext(shape_data);
EXPECT_EQ(ic->DebugString(ic->output(0)), "?");
}
TEST_F(ShapeRefinerTest, ConstantValueTwoInputsToSameNode) {
Scope root = Scope::NewRootScope();
auto begin_and_delta = ops::Const(root, 1);
auto limit = ops::Const(root, 4);
auto range = ops::Range(root, begin_and_delta, limit, begin_and_delta);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin_and_delta.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,2,3]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueVisitNodeTwice) {
Scope root = Scope::NewRootScope();
auto begin = ops::Const(root, 1);
auto limit = ops::Const(root, 8);
auto delta = ops::Const(root, 3);
auto d1 = ops::Add(root, begin, limit);
auto d2 = ops::Add(root, begin, delta);
auto flimit = ops::Sub(root, begin, d1);
auto fdelta = ops::Sub(root, begin, d2);
auto nl = ops::Abs(root, flimit);
auto nd = ops::Abs(root, fdelta);
auto range = ops::Range(root, begin, nl, nd);
Node* shape_data;
TF_ASSERT_OK(NodeBuilder("Test", "ShapeData")
.Input(range.node())
.Finalize(root.graph(), &shape_data));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(limit.node()));
TF_ASSERT_OK(m.AddNode(delta.node()));
TF_ASSERT_OK(m.AddNode(d1.node()));
TF_ASSERT_OK(m.AddNode(d2.node()));
TF_ASSERT_OK(m.AddNode(flimit.node()));
TF_ASSERT_OK(m.AddNode(fdelta.node()));
TF_ASSERT_OK(m.AddNode(nl.node()));
TF_ASSERT_OK(m.AddNode(nd.node()));
TF_ASSERT_OK(m.AddNode(range.node()));
TF_ASSERT_OK(m.AddNode(shape_data));
shape_inference::InferenceContext* ctx = m.GetContext(shape_data);
EXPECT_EQ("[1,4,7]", ctx->DebugString(ctx->output(0)));
}
namespace {
Status TensorAsShapeShapeFn(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0 , &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status PartialTensorAsShapeShapeFn(shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
const Tensor* t = c->input_tensor(0);
if (t == nullptr || t->NumElements() != 1) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(TensorShape({t->flat<int32>()(0)}), &out));
c->set_output(0, out);
return absl::OkStatus();
}
REGISTER_OP("PartialTensorAsShapeInt32")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn(PartialTensorAsShapeShapeFn);
REGISTER_OP("TensorAsShapeInt32")
.Input("a: int32")
.Output("o: int32")
.SetShapeFn(TensorAsShapeShapeFn);
REGISTER_OP("TensorAsShapeInt64")
.Input("a: int64")
.Output("o: int64")
.SetShapeFn(TensorAsShapeShapeFn);
REGISTER_OP("NonConstScalarInt32")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("NonConstScalarInt64")
.Output("o: int64")
.SetDoNotOptimize()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("WithEmptyVectorShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Vector(0));
return absl::OkStatus();
});
REGISTER_OP("WithPartialShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(
0, c->MakeShape({1, shape_inference::InferenceContext::kUnknownDim, 3,
shape_inference::InferenceContext::kUnknownDim, 5}));
return absl::OkStatus();
});
REGISTER_OP("WithPartialShape2")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(
0,
c->MakeShape({6, shape_inference::InferenceContext::kUnknownDim, 8}));
return absl::OkStatus();
});
REGISTER_OP("WithUnknownShape")
.Output("o: int32")
.SetDoNotOptimize()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_EmptyVector) {
Scope root = Scope::NewRootScope();
Node* input;
TF_ASSERT_OK(
NodeBuilder("in", "WithEmptyVectorShape").Finalize(root.graph(), &input));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(input)
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_Shape) {
for (int pass = 0; pass < 2; ++pass) {
Scope root = Scope::NewRootScope();
Node* input;
TF_ASSERT_OK(
NodeBuilder("in", pass == 0 ? "WithPartialShape" : "WithUnknownShape")
.Finalize(root.graph(), &input));
auto shape = ops::Shape(root, Output(input));
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(shape.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input));
TF_ASSERT_OK(m.AddNode(shape.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
if (pass == 0) {
EXPECT_EQ("[1,?,3,?,5]", ctx->DebugString(ctx->output(0)));
} else {
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
}
}
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInt32) {
Scope root = Scope::DisabledShapeInferenceScope();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
InputList inputs{
Input(ops::Const<int32>(root, 10)),
Input(ops::Const<int32>(root, 20)),
Input(Output(scalar_non_const)),
Input(ops::Const<int32>(root, 40)),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,20,?,40]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInt64) {
Scope root = Scope::DisabledShapeInferenceScope();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt64")
.Finalize(root.graph(), &scalar_non_const));
InputList inputs{
Input(ops::Const<int64_t>(root, int64_t{10})),
Input(ops::Const<int64_t>(root, int64_t{20})),
Input(Output(scalar_non_const)),
Input(ops::Const<int64_t>(root, int64_t{1} << 40)),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,20,?,1099511627776]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackUnknownDim) {
Scope root = Scope::NewRootScope();
InputList inputs{
Input(ops::Const<int64_t>(root, int64_t{10})),
Input(ops::Const<int64_t>(root, int64_t{-1})),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[10,?]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_PackInvalidInput) {
Scope root = Scope::NewRootScope();
InputList inputs{
Input(ops::Const<int64_t>(root, {int64_t{10}, int64_t{20}})),
Input(ops::Const<int64_t>(root, {int64_t{10}, int64_t{21}})),
};
auto pack = ops::Stack(root, inputs);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt64")
.Input(pack.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
for (const auto& input : inputs) {
TF_ASSERT_OK(m.AddNode(input.node()));
}
TF_ASSERT_OK(m.AddNode(pack.node()));
EXPECT_TRUE(absl::StrContains(m.AddNode(result).message(), "but is rank 2"));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_Concat) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* partial_1;
Node* partial_2;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
auto const_input = ops::Const(root, {9, 10, 11});
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
const_input,
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("[1,?,3,?,5,6,?,8,9,10,11]", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_ConcatWithUnknown) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
Node* partial_1;
Node* partial_2;
Node* unknown;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
TF_ASSERT_OK(NodeBuilder("in", "WithUnknownShape").Finalize(g, &unknown));
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
ops::Shape(root, Output(unknown)),
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
TF_ASSERT_OK(m.AddNode(unknown));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_ConcatInvalidDimValue) {
Scope root = Scope::DisabledShapeInferenceScope();
Graph* g = root.graph();
Node* scalar_non_const;
TF_ASSERT_OK(NodeBuilder("in", "NonConstScalarInt32")
.Finalize(root.graph(), &scalar_non_const));
Node* partial_1;
Node* partial_2;
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape").Finalize(g, &partial_1));
TF_ASSERT_OK(NodeBuilder("in", "WithPartialShape2").Finalize(g, &partial_2));
auto const_input = ops::Const(root, {9, -2, 11});
OutputList concat_inputs{
ops::Shape(root, Output(partial_1)),
ops::Shape(root, Output(partial_2)),
const_input,
};
auto concat_dim = ops::Const(root, 0);
auto concat = ops::Concat(root, concat_inputs, concat_dim);
TF_ASSERT_OK(root.status());
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(concat.node())
.Finalize(g, &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(partial_1));
TF_ASSERT_OK(m.AddNode(partial_2));
for (const auto& o : concat_inputs) {
TF_ASSERT_OK(m.AddNode(o.node()));
}
TF_ASSERT_OK(m.AddNode(concat_dim.node()));
TF_ASSERT_OK(m.AddNode(concat.node()));
EXPECT_EQ("Invalid value in tensor used for shape: -2",
m.AddNode(result).message());
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSlice) {
TestStridedSlice(
{1, -1, 3, -1, 5},
2,
5,
1,
"[3,?,5]");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceNegativeStride) {
TestStridedSlice(
{1, -1, 3, -1, 5},
10,
0,
-1,
"[5,?,3,?]");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceMasks) {
TestStridedSlice(
{1, -1, 3, -1, 5},
3,
4,
1,
"[1,?,3,?,5]",
1,
1);
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceInvalidMask) {
TestStridedSlice(
{1, -1, 3},
2,
3,
1,
"[?,?,?]",
0,
0,
1);
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceWithShrinkAxis) {
TestStridedSlice(
{1, -1, 3, -1, 5},
2,
3,
1,
"[3]",
0,
0,
0,
1,
"PartialTensorAsShapeInt32");
}
TEST_F(ShapeRefinerTest,
ConstantValueAsShape_StridedSliceWithShrinkAxisOnUnknownDim) {
TestStridedSlice(
{1, -1, 3, -1, 5},
1,
2,
1,
"?",
0,
0,
0,
1,
"PartialTensorAsShapeInt32");
}
TEST_F(ShapeRefinerTest, ConstantValueAsShape_StridedSliceMulti) {
Scope root = Scope::DisabledShapeInferenceScope();
auto input = ops::Placeholder(root, DT_INT32);
auto begin = ops::Const(root, {0, 0});
auto end = ops::Const(root, {2, 2});
auto stride = ops::Const(root, {1, 1});
auto slice = ops::StridedSlice(root, input, begin, end, stride);
Node* result;
TF_ASSERT_OK(NodeBuilder("test", "TensorAsShapeInt32")
.Input(slice.node())
.Finalize(root.graph(), &result));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(input.node()));
TF_ASSERT_OK(m.AddNode(begin.node()));
TF_ASSERT_OK(m.AddNode(end.node()));
TF_ASSERT_OK(m.AddNode(stride.node()));
TF_ASSERT_OK(m.AddNode(slice.node()));
TF_ASSERT_OK(m.AddNode(result));
shape_inference::InferenceContext* ctx = m.GetContext(result);
EXPECT_EQ(ctx->DebugString(ctx->output(0)), "?");
}
namespace {
REGISTER_OP("Dummy");
}
TEST_F(ShapeRefinerTest, SameDefinedShape) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* test;
TF_CHECK_OK(NodeBuilder("test", "Dummy").Finalize(g, &test));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
m.set_require_shape_inference_fns(false);
TF_ASSERT_OK(m.AddNode(test));
shape_inference::InferenceContext* ctx = m.GetContext(test);
auto unknown = ctx->UnknownShape();
auto unknown_b = ctx->UnknownShape();
auto s_1_2 = ctx->MakeShape({1, 2});
auto s_1_2_b = ctx->MakeShape({1, 2});
auto s_2_2 = ctx->MakeShape({2, 2});
auto s_unknown_2 = ctx->MakeShape({-1, 2});
auto s_unknown_2_b = ctx->MakeShape({-1, 2});
EXPECT_TRUE(SameDefinedShape(ctx, unknown, unknown));
EXPECT_FALSE(SameDefinedShape(ctx, unknown, unknown_b));
EXPECT_FALSE(SameDefinedShape(ctx, unknown, s_1_2));
EXPECT_TRUE(SameDefinedShape(ctx, s_1_2, s_1_2_b));
EXPECT_FALSE(SameDefinedShape(ctx, s_1_2, s_2_2));
EXPECT_TRUE(SameDefinedShape(ctx, s_unknown_2, s_unknown_2));
EXPECT_FALSE(SameDefinedShape(ctx, s_unknown_2, s_unknown_2_b));
}
TEST_F(ShapeRefinerTest, IsUpdatedShapesOrTypes) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* test;
TF_CHECK_OK(NodeBuilder("test", "Dummy").Finalize(g, &test));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
m.set_require_shape_inference_fns(false);
TF_ASSERT_OK(m.AddNode(test));
shape_inference::InferenceContext* ctx = m.GetContext(test);
shape_inference::ShapeHandle unknown = ctx->UnknownShape();
std::vector<shape_inference::ShapeAndType> t0{
{ctx->MakeShape({1, 2, 3}), DT_FLOAT},
{unknown, DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t1{
{ctx->MakeShape({1, 2, 3}), DT_FLOAT},
{unknown, DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t2{
{ctx->MakeShape({1, 2, 4}), DT_FLOAT},
{ctx->UnknownShape(), DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
std::vector<shape_inference::ShapeAndType> t3{
{ctx->MakeShape({1, 2, 3}), DT_INT32},
{ctx->UnknownShape(), DT_INVALID},
{ctx->MakeShape({4, 3, 2, 1}), DT_INT32}};
EXPECT_FALSE(IsUpdatedShapesOrTypes(ctx, t0, t1));
EXPECT_TRUE(IsUpdatedShapesOrTypes(ctx, t0, t2));
EXPECT_TRUE(IsUpdatedShapesOrTypes(ctx, t0, t3));
}
TEST_F(ShapeRefinerTest, IncrementalUpdates) {
Scope root = Scope::NewRootScope();
Graph* g = root.graph();
Node* queue;
TF_CHECK_OK(NodeBuilder("queue", "FIFOQueueV2")
.Attr("component_types", {DT_FLOAT})
.Finalize(g, &queue));
Node* dequeue;
TF_CHECK_OK(NodeBuilder("dequeue", "QueueDequeueV2")
.Attr("component_types", {DT_FLOAT})
.Input(queue)
.Finalize(g, &dequeue));
ShapeRefiner m(TF_GRAPH_DEF_VERSION, OpRegistry::Global());
TF_ASSERT_OK(m.AddNode(queue));
TF_ASSERT_OK(m.AddNode(dequeue));
shape_inference::InferenceContext* ctx = m.GetContext(dequeue);
EXPECT_EQ("?", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shape_inference::ShapeHandle shp = ctx->MakeShape({3, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
bool refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, false , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[3,7]", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shp = ctx->MakeShape({2, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, true , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[?,7]", ctx->DebugString(ctx->output(0)));
ctx = m.GetContext(queue);
shp = ctx->MakeShape({shape_inference::InferenceContext::kUnknownDim, 7});
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, true , &refined));
EXPECT_TRUE(refined);
ctx = m.GetContext(dequeue);
EXPECT_EQ("[?,7]", ctx->DebugString(ctx->output(0)));
EXPECT_TRUE(SameHandle(ctx->Dim(ctx->output(0), 0), ctx->Dim(shp, 0)));
ctx = m.GetContext(queue);
shape_inference::ShapeHandle shp2 = shp;
ctx->set_output_handle_shapes_and_types(
0, std::vector<shape_inference::ShapeAndType>{{shp2, DT_FLOAT}});
refined = false;
TF_ASSERT_OK(m.UpdateNode(dequeue, false, &refined));
EXPECT_FALSE(refined);
EXPECT_TRUE(SameHandle(ctx->Dim(shp, 0), ctx->Dim(shp2, 0)));
}
void TestSimpleFunctionInference(bool enable_function_inference) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Const(root, {{1.0f, 2.0f}});
auto x2 = test::function::Call(&root, "x2", "XTimesTwo", {x});
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
if (enable_function_inference) {
m.set_function_library_for_shape_inference(&f_lib);
}
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
EXPECT_SHAPE("[1,2]", m, x, 0);
if (enable_function_inference) {
EXPECT_SHAPE("[1,2]", m, x2, 0);
} else {
EXPECT_SHAPE("?", m, x2, 0);
}
}
TEST_F(ShapeRefinerTest, SimpleFunctionShapeInference_Disabled) {
TestSimpleFunctionInference(false );
}
TEST_F(ShapeRefinerTest, SimpleFunctionShapeInference) {
TestSimpleFunctionInference(true );
}
TEST_F(ShapeRefinerTest, FunctionShapeInferenceFallback) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Const(root, {{.0f, .0f}});
auto x2 = test::function::Call(&root, "x2", "XTimesTwo", {x});
FunctionDefLibrary empty_f_lib_proto;
FunctionLibraryDefinition empty_f_lib(OpRegistry::Global(),
empty_f_lib_proto);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&empty_f_lib);
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
EXPECT_SHAPE("[1,2]", m, x, 0);
EXPECT_SHAPE("?", m, x2, 0);
}
TEST_F(ShapeRefinerTest, ChainedFunctionShapeInferenceWithMultipleInputs) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
*(f_lib_proto.add_function()) = test::function::XTimes16();
*(f_lib_proto.add_function()) = test::function::WXPlusB();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto w = ops::Const(root, {{.0f}, {.0f}, {.0f}});
auto x = ops::Const(root, {{.0f, .0f, .0f}});
auto b = ops::Const(root, {{.0f}});
auto wxplusb = test::function::Call(&root, "wxplusb", "WXPlusB", {w, x, b});
auto wxplusb16 =
test::function::Call(&root, "wxplusb16", "XTimes16", {wxplusb});
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&f_lib);
TF_ASSERT_OK(m.AddNode(w.node()));
TF_ASSERT_OK(m.AddNode(x.node()));
TF_ASSERT_OK(m.AddNode(b.node()));
TF_ASSERT_OK(m.AddNode(wxplusb.node()));
TF_ASSERT_OK(m.AddNode(wxplusb16.node()));
EXPECT_SHAPE("[3,1]", m, w, 0);
EXPECT_SHAPE("[1,3]", m, x, 0);
EXPECT_SHAPE("[1,1]", m, b, 0);
EXPECT_SHAPE("[3,3]", m, wxplusb, 0);
EXPECT_SHAPE("[3,3]", m, wxplusb16, 0);
}
TEST_F(ShapeRefinerTest, FunctionShapeInferenceWorksForResourceHandles) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::Swap();
FunctionLibraryDefinition f_lib(OpRegistry::Global(), f_lib_proto);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x1 = ops::VarHandleOp(root, DataType::DT_FLOAT, TensorShape({128, 256}));
auto x2 = ops::VarHandleOp(root, DataType::DT_DOUBLE, TensorShape({1024}));
auto swap = test::function::Call(&root, "swap", "Swap", {x1, x2});
EXPECT_EQ(swap.node()->num_outputs(), 2);
ShapeRefiner m(TF_GRAPH_DEF_VERSION, &f_lib);
m.set_function_library_for_shape_inference(&f_lib);
TF_ASSERT_OK(m.AddNode(x1.node()));
TF_ASSERT_OK(m.AddNode(x2.node()));
TF_ASSERT_OK(m.AddNode(swap.node()));
EXPECT_EQ(m.GetContext(swap.node())->num_outputs(), 2);
EXPECT_RESOURCE_SINGLE_SHAPE("[128,256]", m, x1, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[1024]", m, x2, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[1024]", m, swap, 0);
EXPECT_RESOURCE_SINGLE_SHAPE("[128,256]", m, swap, 1);
EXPECT_RESOURCE_SINGLE_TYPE(DataType::DT_DOUBLE, m, swap, 0);
EXPECT_RESOURCE_SINGLE_TYPE(DataType::DT_FLOAT, m, swap, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/shape_refiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/shape_refiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37982d4d-cc04-490b-a3c7-68edcd3f1dc4 | cpp | tensorflow/tensorflow | gpu_bfc_allocator | tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc | tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc | #include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "xla/tsl/framework/bfc_allocator.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace {
bool GetAllowGrowthValue(bool orig_value) {
const char* force_allow_growth_string =
std::getenv("TF_FORCE_GPU_ALLOW_GROWTH");
if (force_allow_growth_string == nullptr) {
return orig_value;
}
if (strcmp("false", force_allow_growth_string) == 0) {
if (orig_value) {
LOG(WARNING)
<< "Overriding orig_value setting because the"
<< " TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original"
<< " config value was " << orig_value << ".";
}
return false;
} else if (strcmp("true", force_allow_growth_string) == 0) {
if (!orig_value) {
LOG(WARNING)
<< "Overriding orig_value setting because the"
<< " TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original"
<< " config value was " << orig_value << ".";
}
return true;
}
LOG(ERROR)
<< "The TF_FORCE_GPU_ALLOW_GROWTH environment variable is set but could"
<< " not be parsed: \"" << force_allow_growth_string << "\". Valid"
<< " values are \"true\" or \"false\". Using original config value"
<< " of " << orig_value << ".";
return orig_value;
}
bool GetGarbageCollectionValue() {
const char* enable_gpu_garbage_collection =
std::getenv("TF_ENABLE_GPU_GARBAGE_COLLECTION");
if (enable_gpu_garbage_collection == nullptr) {
return true;
}
if (strcmp("false", enable_gpu_garbage_collection) == 0) {
return false;
} else if (strcmp("true", enable_gpu_garbage_collection) == 0) {
return true;
}
LOG(ERROR)
<< "The TF_ENABLE_GPU_GARBAGE_COLLECTION environment variable is set but"
<< " could not be parsed: \"" << enable_gpu_garbage_collection << "\"."
<< " Valid values are \"true\" or \"false\"."
<< " Using the default value \"true\".";
return true;
}
}
GPUBFCAllocator::GPUBFCAllocator(
std::unique_ptr<tsl::SubAllocator> sub_allocator, size_t total_memory,
const std::string& name, const Options& opts)
: BFCAllocator(std::move(sub_allocator), total_memory, name, [&] {
BFCAllocator::Options o;
o.allow_growth = GetAllowGrowthValue(opts.allow_growth);
o.allow_retry_on_failure = opts.allow_retry_on_failure;
if (opts.garbage_collection.has_value()) {
o.garbage_collection = *opts.garbage_collection;
} else {
o.garbage_collection = GetGarbageCollectionValue();
}
o.fragmentation_fraction = opts.fragmentation_fraction;
return o;
}()) {}
} | #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include <algorithm>
#include <optional>
#include <vector>
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/lib/gtl/inlined_vector.h"
#include "xla/tsl/lib/random/simple_philox.h"
#include "tensorflow/core/common_runtime/device/device_mem_allocator.h"
#include "tensorflow/core/framework/typed_allocator.h"
#include "tensorflow/core/protobuf/bfc_memory_map.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
using stream_executor::GPUMachineManager;
using tensorflow::BinSummary;
using tensorflow::DeviceMemAllocator;
using tensorflow::GPUBFCAllocator;
using tensorflow::GPUOptions;
using tensorflow::TypedAllocator;
void CheckStats(Allocator* a, int64_t num_allocs, int64_t bytes_in_use,
int64_t peak_bytes_in_use, int64_t largest_alloc_size) {
std::optional<AllocatorStats> stats = a->GetStats();
EXPECT_TRUE(stats);
if (!stats) {
return;
}
LOG(INFO) << "Alloc stats: " << std::endl << stats->DebugString();
EXPECT_EQ(stats->bytes_in_use, bytes_in_use);
EXPECT_EQ(stats->peak_bytes_in_use, peak_bytes_in_use);
EXPECT_EQ(stats->num_allocs, num_allocs);
EXPECT_EQ(stats->largest_alloc_size, largest_alloc_size);
}
class GPUBFCAllocatorTest
: public ::testing::TestWithParam<std::unique_ptr<SubAllocator> (*)(
size_t)> {};
std::unique_ptr<SubAllocator> CreateGPUMemAllocator(size_t) {
PlatformDeviceId gpu_id(0);
return absl::WrapUnique(new DeviceMemAllocator(
GPUMachineManager()->ExecutorForDevice(gpu_id.value()).value(), gpu_id,
stream_executor::MemoryType::kDevice, {}, {}));
}
std::unique_ptr<SubAllocator> CreateSubAllocator(
size_t virtual_address_space_size = 1ull << 32) {
return CreateGPUMemAllocator(virtual_address_space_size);
}
auto TestSuiteValues() { return ::testing::Values(&CreateGPUMemAllocator); }
TEST_P(GPUBFCAllocatorTest, NoDups) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
CheckStats(&a, 0, 0, 0, 0);
std::vector<void*> ptrs;
for (int s = 1; s < 1024; s++) {
void* raw = a.AllocateRaw(1, s);
ptrs.push_back(raw);
}
CheckStats(&a, 1023, 654336, 654336, 1024);
std::sort(ptrs.begin(), ptrs.end());
for (size_t i = 1; i < ptrs.size(); i++) {
ASSERT_NE(ptrs[i], ptrs[i - 1]);
size_t req_size = a.RequestedSize(ptrs[i - 1]);
ASSERT_GT(req_size, 0);
ASSERT_GE(static_cast<char*>(ptrs[i]) - static_cast<char*>(ptrs[i - 1]),
req_size);
}
for (size_t i = 0; i < ptrs.size(); i++) {
a.DeallocateRaw(ptrs[i]);
}
CheckStats(&a, 1023, 0, 654336, 1024);
}
TEST_P(GPUBFCAllocatorTest, AllocationsAndDeallocations) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rand(&philox);
std::vector<void*> initial_ptrs;
for (int s = 1; s < 256; s++) {
size_t size = std::min<size_t>(
std::max<size_t>(rand.Rand32() % 1048576, 100), 1048576);
void* raw = a.AllocateRaw(1, size);
initial_ptrs.push_back(raw);
}
std::vector<void*> existing_ptrs;
for (size_t i = 0; i < initial_ptrs.size(); i++) {
if (i % 2 == 1) {
a.DeallocateRaw(initial_ptrs[i]);
} else {
existing_ptrs.push_back(initial_ptrs[i]);
}
}
void* out_of_memory_ptr = a.AllocateRaw(1, (1 << 30) + 1);
CHECK_EQ(out_of_memory_ptr, nullptr);
for (int s = 1; s < 256; s++) {
size_t size = std::min<size_t>(
std::max<size_t>(rand.Rand32() % 1048576, 100), 1048576);
void* raw = a.AllocateRaw(1, size);
existing_ptrs.push_back(raw);
}
std::sort(existing_ptrs.begin(), existing_ptrs.end());
for (size_t i = 1; i < existing_ptrs.size(); i++) {
CHECK_NE(existing_ptrs[i], existing_ptrs[i - 1]);
size_t req_size = a.RequestedSize(existing_ptrs[i - 1]);
ASSERT_GT(req_size, 0);
ASSERT_GE(static_cast<char*>(existing_ptrs[i]) -
static_cast<char*>(existing_ptrs[i - 1]),
req_size);
}
for (size_t i = 0; i < existing_ptrs.size(); i++) {
a.DeallocateRaw(existing_ptrs[i]);
}
}
TEST_P(GPUBFCAllocatorTest, ExerciseCoalescing) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
CheckStats(&a, 0, 0, 0, 0);
float* first_ptr = TypedAllocator::Allocate<float>(&a, 1024, {});
a.DeallocateRaw(first_ptr);
CheckStats(&a, 1, 0, 4096, 4096);
for (int i = 0; i < 1024; ++i) {
float* t1 = TypedAllocator::Allocate<float>(&a, 1024, {});
int64_t* t2 = TypedAllocator::Allocate<int64_t>(&a, 1048576, {});
double* t3 = TypedAllocator::Allocate<double>(&a, 2048, {});
float* t4 = TypedAllocator::Allocate<float>(&a, 10485760, {});
a.DeallocateRaw(t1);
a.DeallocateRaw(t2);
a.DeallocateRaw(t3);
a.DeallocateRaw(t4);
}
CheckStats(&a, 4097, 0,
1024 * sizeof(float) + 1048576 * sizeof(int64_t) +
2048 * sizeof(double) + 10485760 * sizeof(float),
10485760 * sizeof(float));
float* first_ptr_after = TypedAllocator::Allocate<float>(&a, 1024, {});
EXPECT_EQ(first_ptr, first_ptr_after);
a.DeallocateRaw(first_ptr_after);
}
TEST_P(GPUBFCAllocatorTest, AllocateZeroBufSize) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
float* ptr = TypedAllocator::Allocate<float>(&a, 0, {});
EXPECT_EQ(nullptr, ptr);
}
TEST_P(GPUBFCAllocatorTest, TracksSizes) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
EXPECT_EQ(true, a.TracksAllocationSizes());
}
TEST_P(GPUBFCAllocatorTest, AllocatedVsRequested) {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
float* t1 = TypedAllocator::Allocate<float>(&a, 1, {});
EXPECT_EQ(4, a.RequestedSize(t1));
EXPECT_EQ(256, a.AllocatedSize(t1));
a.DeallocateRaw(t1);
}
TEST_P(GPUBFCAllocatorTest, TestCustomMemoryLimit) {
GPUBFCAllocator a(GetParam()(1ull << 32), 2 << 20, "GPU_0_bfc", {});
float* first_ptr = TypedAllocator::Allocate<float>(&a, 1 << 6, {});
float* second_ptr = TypedAllocator::Allocate<float>(&a, 2 << 20, {});
EXPECT_NE(nullptr, first_ptr);
EXPECT_EQ(nullptr, second_ptr);
a.DeallocateRaw(first_ptr);
}
TEST_P(GPUBFCAllocatorTest, AllocationsAndDeallocationsWithGrowth) {
GPUOptions options;
options.set_allow_growth(true);
GPUBFCAllocator a(GetParam()(1ull << 32), 1LL << 31, "GPU_0_bfc", {});
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rand(&philox);
const int32_t max_mem = 1 << 27;
std::vector<void*> initial_ptrs;
for (int s = 1; s < 10; s++) {
size_t size = std::min<size_t>(
std::max<size_t>(rand.Rand32() % max_mem, 100), max_mem);
void* raw = a.AllocateRaw(1, size);
initial_ptrs.push_back(raw);
}
std::vector<void*> existing_ptrs;
for (size_t i = 0; i < initial_ptrs.size(); i++) {
if (i % 2 == 1) {
a.DeallocateRaw(initial_ptrs[i]);
} else {
existing_ptrs.push_back(initial_ptrs[i]);
}
}
const int32_t max_mem_2 = 1 << 26;
for (int s = 1; s < 10; s++) {
size_t size = std::min<size_t>(
std::max<size_t>(rand.Rand32() % max_mem_2, 100), max_mem_2);
void* raw = a.AllocateRaw(1, size);
existing_ptrs.push_back(raw);
}
std::sort(existing_ptrs.begin(), existing_ptrs.end());
for (size_t i = 1; i < existing_ptrs.size(); i++) {
CHECK_NE(existing_ptrs[i], existing_ptrs[i - 1]);
size_t req_size = a.RequestedSize(existing_ptrs[i - 1]);
ASSERT_GT(req_size, 0);
ASSERT_GE(static_cast<char*>(existing_ptrs[i]) -
static_cast<char*>(existing_ptrs[i - 1]),
req_size);
}
for (size_t i = 0; i < existing_ptrs.size(); i++) {
a.DeallocateRaw(existing_ptrs[i]);
}
std::optional<AllocatorStats> stats = a.GetStats();
if (stats) {
LOG(INFO) << "Alloc stats: \n" << stats->DebugString();
}
}
TEST_P(GPUBFCAllocatorTest, DISABLED_AllocatorReceivesZeroMemory) {
GPUBFCAllocator a(GetParam()(1ul << 62), 1UL << 60, "GPU_0_bfc", {});
GPUBFCAllocator b(GetParam()(1ul << 62), 1UL << 60, "GPU_0_bfc", {});
void* amem = a.AllocateRaw(1, 1);
void* bmem = b.AllocateRaw(1, 1 << 30);
a.DeallocateRaw(amem);
b.DeallocateRaw(bmem);
}
INSTANTIATE_TEST_SUITE_P(GPUBFCAllocatorTestSuite, GPUBFCAllocatorTest,
TestSuiteValues());
static void BM_Allocation(::testing::benchmark::State& state) {
GPUBFCAllocator a(CreateSubAllocator(1ul << 36), 1uLL << 33, "GPU_0_bfc", {});
std::vector<size_t> sizes = {256, 4096, 16384, 524288,
512, 1048576, 10485760, 104857600,
1048576000, 2048576000};
int size_index = 0;
for (auto s : state) {
size_t bytes = sizes[size_index++ % sizes.size()];
void* p = a.AllocateRaw(1, bytes);
a.DeallocateRaw(p);
}
}
BENCHMARK(BM_Allocation);
static void BM_AllocationThreaded(::testing::benchmark::State& state) {
int num_threads = state.range(0);
int sub_iters = 500;
for (auto s : state) {
state.PauseTiming();
GPUBFCAllocator a(CreateSubAllocator(1ul << 36), 1uLL << 33, "GPU_0_bfc",
{});
thread::ThreadPool pool(Env::Default(), "test", num_threads);
std::atomic_int_fast32_t count(sub_iters);
mutex done_lock;
condition_variable done;
bool done_flag = false;
state.ResumeTiming();
for (int t = 0; t < num_threads; t++) {
pool.Schedule([&a, &count, &done_lock, &done, &done_flag, sub_iters]() {
std::vector<int> sizes = {256, 4096, 16384, 524288,
512, 1048576, 10485760, 104857600};
int size_index = 0;
for (int i = 0; i < sub_iters; i++) {
int bytes = sizes[size_index++ % sizes.size()];
void* p = a.AllocateRaw(1, bytes);
a.DeallocateRaw(p);
if (count.fetch_sub(1) == 1) {
mutex_lock l(done_lock);
done_flag = true;
done.notify_all();
break;
}
}
});
}
mutex_lock l(done_lock);
if (!done_flag) {
done.wait(l);
}
}
}
BENCHMARK(BM_AllocationThreaded)->Arg(1)->Arg(4)->Arg(16);
static void BM_AllocationDelayed(::testing::benchmark::State& state) {
int delay = state.range(0);
GPUBFCAllocator a(CreateSubAllocator(1ull << 32), 1 << 30, "GPU_0_bfc", {});
std::vector<int> sizes = {256, 4096, 16384, 4096, 512, 1024, 1024};
int size_index = 0;
std::vector<void*> ptrs;
ptrs.reserve(delay);
for (int i = 0; i < delay; i++) {
ptrs.push_back(nullptr);
}
int pindex = 0;
for (auto s : state) {
if (ptrs[pindex] != nullptr) {
a.DeallocateRaw(ptrs[pindex]);
ptrs[pindex] = nullptr;
}
int bytes = sizes[size_index++ % sizes.size()];
void* p = a.AllocateRaw(1, bytes);
ptrs[pindex] = p;
pindex = (pindex + 1) % ptrs.size();
}
for (int i = 0; i < ptrs.size(); i++) {
if (ptrs[i] != nullptr) {
a.DeallocateRaw(ptrs[i]);
}
}
}
BENCHMARK(BM_AllocationDelayed)->Arg(1)->Arg(10)->Arg(100)->Arg(1000);
}
class GPUBFCAllocatorPrivateMethodsTest
: public ::testing::TestWithParam<std::unique_ptr<SubAllocator> (*)(
size_t)> {
protected:
void SetUp() override { CHECK_EQ(unsetenv("TF_FORCE_GPU_ALLOW_GROWTH"), 0); }
void TestBinDebugInfo() {
GPUBFCAllocator a(GetParam()(1ull << 32), 1 << 30, "GPU_0_bfc", {});
std::vector<void*> initial_ptrs;
std::vector<size_t> initial_ptrs_allocated_sizes;
const int kNumTestSizes = 5;
const int kNumChunksPerSize = 2;
for (int i = 0; i < kNumTestSizes; i++) {
for (int j = 0; j < kNumChunksPerSize; j++) {
size_t size = 256 << i;
void* raw = a.AllocateRaw(1, size);
ASSERT_NE(raw, nullptr);
initial_ptrs.push_back(raw);
initial_ptrs_allocated_sizes.push_back(a.AllocatedSize(raw));
}
}
std::array<BFCAllocator::BinDebugInfo, BFCAllocator::kNumBins> bin_infos;
{
absl::MutexLock l(&a.mutex_);
bin_infos = a.get_bin_debug_info();
}
{
MemoryDump md = a.RecordMemoryMap();
EXPECT_EQ(md.chunk_size(), 1 + (kNumTestSizes * kNumChunksPerSize));
for (int i = 0; i < BFCAllocator::kNumBins; i++) {
const BFCAllocator::BinDebugInfo& bin_info = bin_infos[i];
const BinSummary& bin_summary = md.bin_summary(i);
if (i < kNumTestSizes) {
const size_t requested_size = 2 * (256 << i);
EXPECT_EQ(requested_size,
a.RequestedSize(initial_ptrs[2 * i]) +
a.RequestedSize(initial_ptrs[2 * i + 1]));
size_t allocated_size = initial_ptrs_allocated_sizes[2 * i] +
initial_ptrs_allocated_sizes[2 * i + 1];
EXPECT_EQ(bin_info.total_bytes_in_use, allocated_size);
EXPECT_EQ(bin_summary.total_bytes_in_use(), allocated_size);
EXPECT_EQ(bin_info.total_bytes_in_bin, allocated_size);
EXPECT_EQ(bin_summary.total_bytes_in_bin(), allocated_size);
EXPECT_EQ(bin_info.total_requested_bytes_in_use, requested_size);
EXPECT_EQ(bin_info.total_chunks_in_use, kNumChunksPerSize);
EXPECT_EQ(bin_summary.total_chunks_in_use(), kNumChunksPerSize);
EXPECT_EQ(bin_info.total_chunks_in_bin, kNumChunksPerSize);
EXPECT_EQ(bin_summary.total_chunks_in_bin(), kNumChunksPerSize);
} else {
EXPECT_EQ(bin_info.total_bytes_in_use, 0);
EXPECT_EQ(bin_summary.total_bytes_in_use(), 0);
EXPECT_EQ(bin_info.total_requested_bytes_in_use, 0);
EXPECT_EQ(bin_info.total_chunks_in_use, 0);
EXPECT_EQ(bin_summary.total_chunks_in_use(), 0);
if (i == BFCAllocator::kNumBins - 1) {
EXPECT_GT(bin_info.total_bytes_in_bin, 0);
EXPECT_GT(bin_summary.total_bytes_in_bin(), 0);
EXPECT_EQ(bin_info.total_chunks_in_bin, 1);
EXPECT_EQ(bin_summary.total_chunks_in_bin(), 1);
} else {
EXPECT_EQ(bin_info.total_bytes_in_bin, 0);
EXPECT_EQ(bin_summary.total_bytes_in_bin(), 0);
EXPECT_EQ(bin_info.total_chunks_in_bin, 0);
EXPECT_EQ(bin_summary.total_chunks_in_bin(), 0);
}
}
}
}
for (size_t i = 1; i < initial_ptrs.size(); i += 2) {
a.DeallocateRaw(initial_ptrs[i]);
initial_ptrs[i] = nullptr;
}
{
absl::MutexLock l(&a.mutex_);
bin_infos = a.get_bin_debug_info();
}
for (int i = 0; i < BFCAllocator::kNumBins; i++) {
const BFCAllocator::BinDebugInfo& bin_info = bin_infos[i];
if (i < 5) {
size_t requested_size = 256 << i;
EXPECT_EQ(requested_size, a.RequestedSize(initial_ptrs[2 * i]));
EXPECT_EQ(bin_info.total_bytes_in_use,
initial_ptrs_allocated_sizes[2 * i]);
EXPECT_GE(bin_info.total_bytes_in_bin,
initial_ptrs_allocated_sizes[2 * i]);
EXPECT_EQ(bin_info.total_requested_bytes_in_use, requested_size);
EXPECT_EQ(bin_info.total_chunks_in_use, 1);
EXPECT_GE(bin_info.total_chunks_in_bin, 1);
} else {
EXPECT_EQ(bin_info.total_bytes_in_use, 0);
EXPECT_EQ(bin_info.total_requested_bytes_in_use, 0);
EXPECT_EQ(bin_info.total_chunks_in_use, 0);
}
}
}
void TestForceAllowGrowth() {
unsetenv("TF_FORCE_GPU_ALLOW_GROWTH");
GPUBFCAllocator::Options opts;
opts.allow_growth = true;
GPUBFCAllocator unset_flag_allocator(GetParam()(1ull << 32), 1LL << 31,
"GPU_0_bfc", opts);
EXPECT_EQ(GPUBFCAllocator::RoundedBytes(size_t{2 << 20}),
unset_flag_allocator.curr_region_allocation_bytes_);
setenv("TF_FORCE_GPU_ALLOW_GROWTH", "unparseable", 1);
GPUBFCAllocator unparsable_flag_allocator(GetParam()(1ull << 32), 1LL << 31,
"GPU_1_bfc", opts);
EXPECT_EQ(GPUBFCAllocator::RoundedBytes(size_t{2 << 20}),
unparsable_flag_allocator.curr_region_allocation_bytes_);
setenv("TF_FORCE_GPU_ALLOW_GROWTH", "true", 1);
opts.allow_growth = false;
GPUBFCAllocator force_allow_growth_allocator(GetParam()(1ull << 32),
1LL << 31, "GPU_2_bfc", opts);
EXPECT_EQ(GPUBFCAllocator::RoundedBytes(size_t{2 << 20}),
force_allow_growth_allocator.curr_region_allocation_bytes_);
setenv("TF_FORCE_GPU_ALLOW_GROWTH", "false", 1);
opts.allow_growth = true;
GPUBFCAllocator force_no_allow_growth_allocator(
GetParam()(1ull << 32), 1LL << 31, "GPU_3_bfc", opts);
EXPECT_EQ(GPUBFCAllocator::RoundedBytes(1LL << 31),
force_no_allow_growth_allocator.curr_region_allocation_bytes_);
}
};
TEST_P(GPUBFCAllocatorPrivateMethodsTest, BinDebugInfo) { TestBinDebugInfo(); }
TEST_P(GPUBFCAllocatorPrivateMethodsTest, ForceAllowGrowth) {
TestForceAllowGrowth();
}
INSTANTIATE_TEST_SUITE_P(GPUBFCAllocatorPrivateMethodTestSuite,
GPUBFCAllocatorPrivateMethodsTest, TestSuiteValues());
class GPUBFCAllocatorTest_SubAllocatorSpecific : public ::testing::Test {};
TEST_F(GPUBFCAllocatorTest_SubAllocatorSpecific,
PhysicalAllocatorOomsFragmentation) {
GPUBFCAllocator::Options options;
options.allow_growth = true;
constexpr size_t k512MiB = 512ull << 20;
GPUBFCAllocator a(CreateGPUMemAllocator( 0), k512MiB, "GPU_0_bfc",
options);
const size_t size = 1LL << 22;
std::vector<void*> initial_ptrs;
for (size_t s = 0; s < 128; s++) {
void* raw = a.AllocateRaw(1, size);
initial_ptrs.push_back(raw);
}
for (int i = 0; i < 127; ++i) {
a.DeallocateRaw(initial_ptrs[i]);
}
void* big_alloc = a.AllocateRaw(1, k512MiB - size);
EXPECT_EQ(big_alloc, nullptr);
}
class GPUBFCAllocatorPrivateMethodsTest_SubAllocatorSpecific
: public ::testing::Test {
protected:
void SetUp() override { CHECK_EQ(unsetenv("TF_FORCE_GPU_ALLOW_GROWTH"), 0); }
void TestRegionDeallocation() {
GPUBFCAllocator::Options options;
options.allow_growth = true;
GPUBFCAllocator a(CreateGPUMemAllocator( 0), 1LL << 31,
"GPU_0_bfc", options);
const size_t size = 1LL << 22;
std::vector<void*> initial_ptrs;
for (size_t s = 0; s < 128; s++) {
void* raw = a.AllocateRaw(1, size);
initial_ptrs.push_back(raw);
}
{
absl::MutexLock l(&a.mutex_);
EXPECT_LT(1, a.region_manager_.regions().size());
}
for (size_t i = 0; i < initial_ptrs.size() - 1; i++) {
a.DeallocateRaw(initial_ptrs[i]);
}
EXPECT_EQ(true, a.DeallocateFreeRegions(0));
{
absl::MutexLock l(&a.mutex_);
EXPECT_EQ(1, a.region_manager_.regions().size());
}
size_t num_chunks_in_bins = 0;
for (int i = 0; i < BFCAllocator::kNumBins; i++) {
BFCAllocator::Bin* bin = a.BinFromIndex(i);
num_chunks_in_bins += bin->free_chunks.size();
}
EXPECT_EQ(1, num_chunks_in_bins);
}
};
TEST_F(GPUBFCAllocatorPrivateMethodsTest_SubAllocatorSpecific,
TestRegionDeallocation) {
TestRegionDeallocation();
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_bfc_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49b48a75-9413-4452-9358-84307792442c | cpp | tensorflow/tensorflow | gpu_debug_allocator | tensorflow/core/common_runtime/gpu/gpu_debug_allocator.cc | tensorflow/core/common_runtime/gpu/gpu_debug_allocator_test.cc | #include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <vector>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#define MASK_WORDS 2
#define MASK_BYTES (MASK_WORDS * sizeof(int64_t))
namespace tensorflow {
namespace {
int64_t* NewMask(int64_t word) {
int64_t* m = new int64_t[MASK_WORDS];
for (int i = 0; i < MASK_WORDS; ++i) {
m[i] = word;
}
return m;
}
int64_t* before_mask = NewMask(0xabababababababab);
int64_t* after_mask = NewMask(0xcdcdcdcdcdcdcdcd);
bool CheckMask(se::StreamExecutor* exec, void* ptr, int64_t* mask) {
se::DeviceMemory<int64_t> gpu_ptr{se::DeviceMemoryBase{ptr, MASK_BYTES}};
int64_t tmp[MASK_WORDS];
absl::Status result = exec->SynchronousMemcpyD2H(gpu_ptr, MASK_BYTES, tmp);
if (!result.ok()) {
LOG(FATAL) << "Could not copy debug mask, " << result;
}
bool ok = true;
for (int i = 0; i < MASK_WORDS; ++i) {
ok &= (mask[i] == tmp[i]);
if (!ok) {
LOG(ERROR) << "i=" << i
<< " mask=" << reinterpret_cast<const void*>(mask[i])
<< " field=" << reinterpret_cast<const void*>(tmp[i]);
}
}
return ok;
}
void InitMask(se::StreamExecutor* exec, void* ptr, int64_t* mask) {
se::DeviceMemory<int64_t> gpu_ptr{se::DeviceMemoryBase{ptr, MASK_BYTES}};
absl::Status result = exec->SynchronousMemcpyH2D(mask, MASK_BYTES, &gpu_ptr);
if (!result.ok()) {
LOG(FATAL) << "Could not copy debug mask, " << result;
}
}
}
GPUDebugAllocator::GPUDebugAllocator(Allocator* allocator,
tsl::PlatformDeviceId platform_device_id)
: base_allocator_(allocator) {
stream_exec_ = se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
GPUDebugAllocator::~GPUDebugAllocator() { delete base_allocator_; }
void* GPUDebugAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
num_bytes += (2 * MASK_BYTES);
void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
if (allocated_ptr == nullptr) return allocated_ptr;
void* rv = static_cast<char*>(allocated_ptr) + MASK_BYTES;
InitMask(stream_exec_, allocated_ptr, before_mask);
size_t req_size = base_allocator_->RequestedSize(allocated_ptr);
InitMask(stream_exec_,
static_cast<char*>(allocated_ptr) + req_size - MASK_BYTES,
after_mask);
return rv;
}
void GPUDebugAllocator::DeallocateRaw(void* ptr) {
if (ptr != nullptr) {
CHECK(CheckHeader(ptr)) << "before_mask has been overwritten";
CHECK(CheckFooter(ptr)) << "after_mask has been overwritten";
ptr = static_cast<void*>(static_cast<char*>(ptr) - MASK_BYTES);
}
base_allocator_->DeallocateRaw(ptr);
}
bool GPUDebugAllocator::TracksAllocationSizes() const { return true; }
size_t GPUDebugAllocator::RequestedSize(const void* ptr) const {
auto req_size = base_allocator_->RequestedSize(static_cast<const char*>(ptr) -
MASK_BYTES);
return req_size - 2 * MASK_BYTES;
}
size_t GPUDebugAllocator::AllocatedSize(const void* ptr) const {
return base_allocator_->AllocatedSize(static_cast<const char*>(ptr) -
MASK_BYTES);
}
int64_t GPUDebugAllocator::AllocationId(const void* ptr) const {
return base_allocator_->AllocationId(static_cast<const char*>(ptr) -
MASK_BYTES);
}
std::optional<tsl::AllocatorStats> GPUDebugAllocator::GetStats() {
return base_allocator_->GetStats();
}
bool GPUDebugAllocator::ClearStats() { return base_allocator_->ClearStats(); }
bool GPUDebugAllocator::CheckHeader(void* ptr) {
return CheckMask(stream_exec_, static_cast<char*>(ptr) - MASK_BYTES,
before_mask);
}
bool GPUDebugAllocator::CheckFooter(void* ptr) {
char* original_ptr = static_cast<char*>(ptr) - MASK_BYTES;
size_t req_size = base_allocator_->RequestedSize(original_ptr);
return CheckMask(stream_exec_, original_ptr + req_size - MASK_BYTES,
after_mask);
}
GPUNanResetAllocator::GPUNanResetAllocator(
Allocator* allocator, tsl::PlatformDeviceId platform_device_id)
: base_allocator_(allocator) {
stream_exec_ = se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
GPUNanResetAllocator::~GPUNanResetAllocator() { delete base_allocator_; }
void* GPUNanResetAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
void* allocated_ptr = base_allocator_->AllocateRaw(alignment, num_bytes);
if (allocated_ptr == nullptr) return allocated_ptr;
size_t req_size = base_allocator_->RequestedSize(allocated_ptr);
std::vector<float> nans((req_size + sizeof(float) - 1) / sizeof(float),
std::nanf(""));
se::DeviceMemory<float> nan_ptr{
se::DeviceMemoryBase{static_cast<float*>(allocated_ptr), req_size}};
absl::Status result =
stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr);
if (!result.ok()) {
LOG(ERROR) << "Could not initialize to NaNs, " << result;
}
return allocated_ptr;
}
void GPUNanResetAllocator::DeallocateRaw(void* ptr) {
if (ptr != nullptr) {
size_t req_size = base_allocator_->RequestedSize(ptr);
std::vector<float> nans((req_size + sizeof(float) - 1) / sizeof(float),
std::nanf(""));
se::DeviceMemory<float> nan_ptr{
se::DeviceMemoryBase{static_cast<float*>(ptr), req_size}};
absl::Status result =
stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr);
if (!result.ok()) {
LOG(ERROR) << "Could not initialize to NaNs, " << result;
}
}
base_allocator_->DeallocateRaw(ptr);
}
size_t GPUNanResetAllocator::RequestedSize(const void* ptr) const {
return base_allocator_->RequestedSize(ptr);
}
size_t GPUNanResetAllocator::AllocatedSize(const void* ptr) const {
return base_allocator_->AllocatedSize(ptr);
}
std::optional<tsl::AllocatorStats> GPUNanResetAllocator::GetStats() {
return base_allocator_->GetStats();
}
bool GPUNanResetAllocator::ClearStats() {
return base_allocator_->ClearStats();
}
} | #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#include "tensorflow/core/common_runtime/gpu/gpu_debug_allocator.h"
#include <algorithm>
#include <vector>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/lib/gtl/inlined_vector.h"
#include "tensorflow/core/common_runtime/device/device_mem_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.h"
#include "tensorflow/core/framework/typed_allocator.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace {
se::StreamExecutor* ExecutorForPlatformDeviceId(
tsl::PlatformDeviceId platform_device_id) {
return se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_None) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
for (int s : {8}) {
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], s * sizeof(int64_t), &gpu_array_ptr));
EXPECT_TRUE(a.CheckHeader(gpu_array));
EXPECT_TRUE(a.CheckFooter(gpu_array));
a.DeallocateRaw(gpu_array);
}
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_Header) {
for (int s : {8, 211}) {
EXPECT_DEATH(
{
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(
absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{
se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(int64_t),
&gpu_array_ptr));
se::DeviceMemory<int64_t> gpu_hdr_ptr{
se::DeviceMemoryBase{gpu_array - 1}};
float pi = 3.1417;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(&pi, sizeof(float),
&gpu_hdr_ptr));
a.DeallocateRaw(gpu_array);
},
"");
}
}
TEST(GPUDebugAllocatorTest, OverwriteDetection_Footer) {
for (int s : {8, 22}) {
EXPECT_DEATH(
{
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(
absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<int64_t> cpu_array(s);
memset(&cpu_array[0], 0, cpu_array.size() * sizeof(int64_t));
int64_t* gpu_array =
TypedAllocator::Allocate<int64_t>(&a, cpu_array.size(), {});
se::DeviceMemory<int64_t> gpu_array_ptr{
se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(int64_t),
&gpu_array_ptr));
se::DeviceMemory<int64_t> gpu_ftr_ptr{
se::DeviceMemoryBase{gpu_array + s}};
float pi = 3.1417;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(&pi, sizeof(float),
&gpu_ftr_ptr));
a.DeallocateRaw(gpu_array);
},
"");
}
}
TEST(GPUDebugAllocatorTest, ResetToNan) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUNanResetAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<float> cpu_array(1024);
std::vector<float> cpu_array_result(1024);
float* gpu_array = TypedAllocator::Allocate<float>(&a, cpu_array.size(), {});
se::DeviceMemory<float> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array.size() * sizeof(float), &cpu_array[0]));
for (float f : cpu_array) {
ASSERT_FALSE(std::isfinite(f));
}
cpu_array[0] = 1.0;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(float), &gpu_array_ptr));
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
ASSERT_EQ(1.0, cpu_array_result[0]);
a.DeallocateRaw(gpu_array);
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
for (float f : cpu_array_result) {
ASSERT_FALSE(std::isfinite(f));
}
}
TEST(GPUDebugAllocatorTest, ResetToNanWithHeaderFooter) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUNanResetAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
std::vector<float> cpu_array(1024);
std::vector<float> cpu_array_result(1024);
float* gpu_array = TypedAllocator::Allocate<float>(&a, cpu_array.size(), {});
se::DeviceMemory<float> gpu_array_ptr{se::DeviceMemoryBase{gpu_array}};
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array.size() * sizeof(float), &cpu_array[0]));
for (float f : cpu_array) {
ASSERT_FALSE(std::isfinite(f));
}
cpu_array[0] = 1.0;
TF_CHECK_OK(stream_exec->SynchronousMemcpyH2D(
&cpu_array[0], cpu_array.size() * sizeof(float), &gpu_array_ptr));
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
ASSERT_EQ(1.0, cpu_array_result[0]);
a.DeallocateRaw(gpu_array);
TF_CHECK_OK(stream_exec->SynchronousMemcpyD2H(
gpu_array_ptr, cpu_array_result.size() * sizeof(float),
&cpu_array_result[0]));
for (float f : cpu_array_result) {
ASSERT_FALSE(std::isfinite(f));
}
}
TEST(GPUDebugAllocatorTest, TracksSizes) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
EXPECT_EQ(true, a.TracksAllocationSizes());
}
TEST(GPUDebugAllocatorTest, AllocatedVsRequested) {
const tsl::PlatformDeviceId platform_device_id(0);
auto stream_exec = ExecutorForPlatformDeviceId(platform_device_id);
GPUDebugAllocator a(
new GPUBFCAllocator(absl::WrapUnique(new DeviceMemAllocator(
stream_exec, platform_device_id,
stream_executor::MemoryType::kDevice, {}, {})),
1 << 30, "", {}),
platform_device_id);
float* t1 = TypedAllocator::Allocate<float>(&a, 1, {});
EXPECT_EQ(4, a.RequestedSize(t1));
EXPECT_EQ(256, a.AllocatedSize(t1));
a.DeallocateRaw(t1);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_debug_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_debug_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
890fb4c1-2de7-4aa0-846d-d9612a5dfe98 | cpp | tensorflow/tensorflow | gpu_serving_device_selector | tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.cc | tensorflow/core/common_runtime/gpu/gpu_serving_device_selector_test.cc | #include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
constexpr int64_t kDefaultEstimateNs = 1;
ABSL_CONST_INIT int64_t (*NowNs)() = +[]() -> int64_t {
return absl::GetCurrentTimeNanos();
};
using DeviceStates = GpuServingDeviceSelector::DeviceStates;
GpuServingDeviceSelector::GpuServingDeviceSelector(
const int num_devices,
std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy)
: device_states_(num_devices),
device_selector_policy_(std::move(device_selector_policy)),
req_id_counter_(0) {}
tsl::DeviceReservation GpuServingDeviceSelector::ReserveDevice(
absl::string_view program_fingerprint) {
absl::MutexLock lock(&mu_);
DeviceStates device_states;
device_states.states = absl::Span<const DeviceState>(device_states_);
auto [it, emplaced] =
execution_info_.try_emplace(program_fingerprint, ExecutionInfo());
const int device_index =
device_selector_policy_->SelectDevice(program_fingerprint, device_states);
ServingDeviceSelector::EnqueueHelper(
device_states_.at(device_index), device_index, it->second,
program_fingerprint, 0, req_id_counter_++,
1, 0, NowNs());
return tsl::DeviceReservation(device_index, this);
}
void GpuServingDeviceSelector::FreeDeviceReservation(
const tsl::DeviceReservation& reservation) {
Completed(reservation.device_index(), false);
}
void GpuServingDeviceSelector::Enqueue(int32_t index_on_host,
absl::string_view fingerprint) {
if (fingerprint.empty()) {
LOG(ERROR) << "Empty fingerprint.";
return;
}
absl::MutexLock lock(&mu_);
auto [it, emplaced] =
execution_info_.try_emplace(fingerprint, ExecutionInfo());
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::EnqueueHelper(device_state, index_on_host, it->second,
fingerprint,
0, -1,
1,
0, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
void GpuServingDeviceSelector::Completed(int32_t index_on_host,
bool had_error) {
absl::MutexLock lock(&mu_);
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::CompletedHelper(device_state, index_on_host, 0,
min_exec_time_, had_error, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
int64_t GpuServingDeviceSelector::TotalEstimatedTimeTillIdleNs() {
int64_t total_gpu_load_ns = 0;
for (const auto& device_state : device_states_) {
total_gpu_load_ns += ServingDeviceSelector::EstimateTimeTillIdleNs(
device_state, 0, min_exec_time_.value_or(kDefaultEstimateNs), NowNs());
}
return total_gpu_load_ns;
}
void GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
int64_t (*now_ns)()) {
NowNs = now_ns;
}
}
} | #include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
class ServingDeviceSelectorTestHelper {
public:
ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(NowNs);
now_ns_ = 0;
}
~ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
absl::GetCurrentTimeNanos);
}
static void ElapseNs(int64_t ns) { now_ns_ += ns; }
static int64_t NowNs() { return now_ns_; }
private:
static int64_t now_ns_;
};
int64_t ServingDeviceSelectorTestHelper::now_ns_ = 0;
namespace {
TEST(GpuServingDeviceSelector, Basic) {
GpuServingDeviceSelector selector(2,
std::make_unique<tsl::RoundRobinPolicy>());
const std::string program_fingerprint = "TensorFlow";
tsl::DeviceReservation reservation =
selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 1);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
}
TEST(GpuServingDeviceSelector, DefaultPolicyOnlyEnqueueCall) {
ServingDeviceSelectorTestHelper helper;
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
auto serving_device_selector =
std::make_unique<tensorflow::gpu::GpuServingDeviceSelector>(
4, std::move(policy));
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
helper.ElapseNs(2e6);
serving_device_selector->Completed(0, false);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0, false);
serving_device_selector->Completed(1, false);
helper.ElapseNs(4e6);
serving_device_selector->Completed(1, false);
serving_device_selector->Completed(2, false);
helper.ElapseNs(8e6);
serving_device_selector->Completed(2, false);
serving_device_selector->Completed(3, false);
helper.ElapseNs(16e6);
serving_device_selector->Completed(3, false);
serving_device_selector->Enqueue(3, "16ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
serving_device_selector->Enqueue(2, "8ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
24e6);
serving_device_selector->Enqueue(1, "4ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
28e6);
serving_device_selector->Enqueue(0, "2ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
30e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0, false);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
22e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(1, false);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
helper.ElapseNs(4e6);
serving_device_selector->Completed(2, false);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
8e6);
helper.ElapseNs(8e6);
serving_device_selector->Completed(3, false);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
0e6);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_serving_device_selector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1d79aab-385a-4f6f-bd49-cd1c141acb1c | cpp | tensorflow/tensorflow | gpu_device | tensorflow/core/common_runtime/gpu/gpu_device.cc | tensorflow/core/common_runtime/gpu/gpu_device_test.cc | #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#if (defined(PLATFORM_GOOGLE) && defined(TF_PLATFORM_LINUX_X86_64))
#define TF_GPU_USE_PJRT
#endif
#if TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#endif
#define EIGEN_USE_GPU
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_split.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "tensorflow/core/common_runtime/device/device_event_mgr.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_id_utils.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
#endif
#ifdef TF_GPU_USE_PJRT
#include "tensorflow/compiler/jit/flags.h"
#include "xla/pjrt/gpu/gpu_helpers.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/stream_executor/integrations/device_host_allocator.h"
#endif
#include "xla/stream_executor/gpu/gpu_stream.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/scoped_annotation.h"
#include "tensorflow/core/profiler/lib/scoped_memory_debug_annotation.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/dso_loader.h"
#ifdef TF_GPU_USE_PJRT
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#endif
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/stream_executor_util.h"
#if !defined(PLATFORM_GOOGLE)
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/cuda_config.h"
#endif
#endif
namespace tensorflow {
namespace {
int GetPriority(const int tf_device_id, const GPUOptions& options) {
int id = tf_device_id;
int i = 0;
int priority = 0;
while (i < options.experimental().virtual_devices_size()) {
const int size =
options.experimental().virtual_devices().Get(i).priority_size();
if (id >= size) {
id -= size;
} else {
priority =
options.experimental().virtual_devices().Get(i).priority().Get(id);
break;
}
i++;
}
return priority;
}
}
#if GOOGLE_CUDA
typedef cudaStream_t gpuStream_t;
typedef cudaDeviceProp gpuDeviceProp_t;
#define EIGEN_GPU_SCRATCH_SIZE (Eigen::kGpuScratchSize)
using se::gpu::ScopedActivateContext;
#elif TENSORFLOW_USE_ROCM
typedef hipStream_t gpuStream_t;
typedef hipDeviceProp_t gpuDeviceProp_t;
#define EIGEN_GPU_SCRATCH_SIZE (Eigen::kGpuScratchSize)
using se::gpu::ScopedActivateContext;
#endif
static_assert(std::is_pointer_v<gpuStream_t>,
"Gpu stream handle must be a pointer");
class EigenGpuStreamDevice : public ::Eigen::StreamInterface {
public:
EigenGpuStreamDevice()
: scratch_(nullptr),
semaphore_(nullptr),
context_(nullptr),
device_(this) {}
~EigenGpuStreamDevice() override {}
void Reinitialize(OpKernelContext* context, gpuStream_t gpu_stream,
tsl::PlatformDeviceId platform_device_id,
::tensorflow::Allocator* alloc, char* scratch) {
if (LogMemory::IsEnabled()) {
operation_ = context->op_kernel().name() + "/EigenAllocator";
step_id_ = context->step_id();
}
context_ = context;
scratch_ = scratch;
semaphore_ =
reinterpret_cast<unsigned int*>(scratch + Eigen::kGpuScratchSize);
stream_ = gpu_stream;
allocator_ = alloc;
device_prop_ = &Eigen::GetGpuDeviceProperties(platform_device_id.value());
}
const gpuStream_t& stream() const override { return stream_; }
const gpuDeviceProp_t& deviceProperties() const override {
return *device_prop_;
}
void* allocate(size_t num_bytes) const override {
void* ret = allocator_->AllocateRaw(32 , num_bytes);
if (ret == nullptr) {
if (context_) {
context_->SetStatus(errors::ResourceExhausted(
strings::StrCat("Ran out of GPU memory when allocating ", num_bytes,
" bytes for ", operation_)));
} else {
LOG(FATAL)
<< "EigenAllocator for GPU ran out of memory when allocating "
<< num_bytes << ". See error logs for more detailed info.";
}
}
if (LogMemory::IsEnabled() && ret != nullptr) {
LogMemory::RecordRawAllocation(operation_, step_id_, num_bytes, ret,
allocator_);
}
return ret;
}
void deallocate(void* buffer) const override {
if (LogMemory::IsEnabled() && buffer != nullptr) {
LogMemory::RecordRawDeallocation(operation_, step_id_, buffer, allocator_,
true);
}
AsyncFreeData* afData =
new AsyncFreeData(allocator_, buffer, operation_, step_id_);
#if GOOGLE_CUDA
cudaError_t err = cudaStreamAddCallback(stream_, asyncLogFree, afData, 0);
CHECK_EQ(err, cudaSuccess);
#elif TENSORFLOW_USE_ROCM
hipError_t err = hipStreamAddCallback(stream_, asyncLogFree, afData, 0);
CHECK_EQ(err, hipSuccess);
#endif
allocator_->DeallocateRaw(buffer);
}
void* scratchpad() const override { return scratch_; }
unsigned int* semaphore() const override { return semaphore_; }
const Eigen::GpuDevice& device() const { return device_; }
private:
struct AsyncFreeData {
AsyncFreeData(::tensorflow::Allocator* a, void* p, const string& o,
const int64_t s)
: allocator_(a), address_(p), operation_(o), step_id_(s) {}
::tensorflow::Allocator* allocator_;
void* address_;
const string operation_;
const int64_t step_id_;
};
#if GOOGLE_CUDA
static void CUDART_CB asyncLogFree(gpuStream_t stream, cudaError_t status,
void* userData)
#elif TENSORFLOW_USE_ROCM
static void asyncLogFree(gpuStream_t stream, hipError_t status,
void* userData)
#endif
{
AsyncFreeData* data = static_cast<AsyncFreeData*>(userData);
if (LogMemory::IsEnabled()) {
LogMemory::RecordRawDeallocation(data->operation_, data->step_id_,
data->address_, data->allocator_, false);
}
delete data;
}
string operation_;
int64_t step_id_;
gpuStream_t stream_;
const gpuDeviceProp_t* device_prop_;
::tensorflow::Allocator* allocator_;
mutable char* scratch_;
mutable unsigned int* semaphore_;
OpKernelContext* context_;
Eigen::GpuDevice device_;
EigenGpuStreamDevice(const EigenGpuStreamDevice&) = delete;
void operator=(const EigenGpuStreamDevice&) = delete;
};
class BaseGPUDevice::StreamGroupFactory {
public:
std::pair<BaseGPUDevice::StreamGroup*, bool> Emplace(
tsl::TfDeviceId tf_device_id, int stream_group_within_gpu,
BaseGPUDevice::StreamGroup stream_group) {
mutex_lock guard(lock_);
auto insert_result = streams_.emplace(
key_type(tf_device_id.value(), stream_group_within_gpu),
std::move(stream_group));
return std::make_pair(&insert_result.first->second, insert_result.second);
}
BaseGPUDevice::StreamGroup* GetOrCreate(tsl::TfDeviceId tf_device_id,
int stream_group_within_gpu,
se::StreamExecutor* executor,
const GPUOptions& options) {
mutex_lock guard(lock_);
StreamGroup* group =
&streams_[key_type(tf_device_id.value(), stream_group_within_gpu)];
if (!group->compute) {
int priority = GetPriority(tf_device_id.value(), options);
group->priority = priority;
group->compute = GetInitializedStream(executor, priority);
VLOG(2) << "Created stream[" << stream_group_within_gpu
<< "] = " << group->compute << " with priority: " << priority;
#if TENSORFLOW_USE_ROCM
group->nccl = GetInitializedStream(executor, priority);
VLOG(2) << "Created nccl_stream[" << stream_group_within_gpu
<< "] = " << group->nccl;
group->compute->WaitFor(group->nccl).IgnoreError();
group->nccl->WaitFor(group->compute).IgnoreError();
#endif
group->host_to_device = GetInitializedStream(executor, priority);
VLOG(2) << "Created host_to_device_stream[" << stream_group_within_gpu
<< "] = " << group->host_to_device;
group->device_to_host = GetInitializedStream(executor, priority);
VLOG(2) << "Created device_to_host_stream[" << stream_group_within_gpu
<< "] = " << group->device_to_host;
int num_d2d_streams =
options.experimental().num_dev_to_dev_copy_streams();
if (num_d2d_streams == 0) num_d2d_streams = 1;
if (num_d2d_streams < 1 || num_d2d_streams > 4) {
LOG(ERROR)
<< "Illegal GPUOptions.experimental.num_dev_to_dev_copy_streams="
<< num_d2d_streams << " set to 1 instead.";
num_d2d_streams = 1;
}
for (int i = 0; i < num_d2d_streams; ++i) {
se::Stream* stream = GetInitializedStream(executor, priority);
group->device_to_device.push_back(stream);
VLOG(2) << "Created device_to_device_stream[" << stream_group_within_gpu
<< "] = " << group->device_to_device.back();
}
}
return group;
}
static StreamGroupFactory& Global() {
static StreamGroupFactory* instance = new StreamGroupFactory();
return *instance;
}
void TestOnlyReset() {
mutex_lock guard(lock_);
for (auto& item : streams_) {
auto& stream = item.second;
if (stream.compute) {
#ifndef TF_GPU_USE_PJRT
delete stream.compute;
#endif
stream.compute = nullptr;
}
#if TENSORFLOW_USE_ROCM
if (stream.nccl) {
#ifndef TF_GPU_USE_PJRT
delete stream.nccl;
#endif
stream.nccl = nullptr;
}
#endif
if (stream.host_to_device) {
#ifndef TF_GPU_USE_PJRT
delete stream.host_to_device;
#endif
stream.host_to_device = nullptr;
}
if (stream.device_to_host) {
#ifndef TF_GPU_USE_PJRT
delete stream.device_to_host;
#endif
stream.device_to_host = nullptr;
}
while (!stream.device_to_device.empty()) {
auto back = stream.device_to_device.back();
if (back) {
#ifndef TF_GPU_USE_PJRT
delete back;
#endif
}
stream.device_to_device.pop_back();
}
}
streams_.clear();
}
std::optional<tsl::TfDeviceId> FindTfDeviceId(se::Stream* compute) const {
for (const auto& item : streams_) {
if (item.second.compute == compute) {
return tsl::TfDeviceId(std::get<0>(item.first));
}
}
return std::nullopt;
}
private:
se::Stream* GetInitializedStream(se::StreamExecutor* executor, int priority) {
auto stream_or_status = executor->CreateStream(priority);
if (!stream_or_status.ok()) {
LOG(ERROR) << "Failed to create stream: " << stream_or_status.status();
return nullptr;
}
auto stream_ptr = stream_or_status->get();
allocated_streams_.emplace_back(std::move(stream_or_status.value()));
return stream_ptr;
}
mutex lock_;
using key_type = std::tuple<int, int>;
std::map<key_type, StreamGroup> streams_;
std::vector<std::unique_ptr<se::Stream>> allocated_streams_;
StreamGroupFactory() = default;
StreamGroupFactory(const StreamGroupFactory&) = delete;
void operator=(const StreamGroupFactory&) = delete;
};
BaseGPUDevice::BaseGPUDevice(const SessionOptions& options, const string& name,
Bytes memory_limit, const DeviceLocality& locality,
tsl::TfDeviceId tf_device_id,
const string& physical_device_desc,
Allocator* gpu_allocator, Allocator* cpu_allocator,
bool sync_every_op)
: LocalDevice(options, Device::BuildDeviceAttributes(name, DEVICE_GPU,
memory_limit, locality,
physical_device_desc)),
gpu_allocator_(gpu_allocator),
cpu_allocator_(cpu_allocator),
scoped_allocator_mgr_(new ScopedAllocatorMgr(name)),
tf_device_id_(tf_device_id),
sync_every_op_(sync_every_op),
stream_merge_options_(
options.config.gpu_options().experimental().stream_merge_options()) {
set_xla_global_id(Fingerprint32(name) % std::numeric_limits<int32_t>::max());
#ifdef TF_GPU_USE_PJRT
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_fns{
UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()};
pjrt_device_context_ = core::RefCountPtr<DeviceContext>(
new PjRtDeviceContext(shape_fns, true));
#endif
GPUProcessState::singleton()->EnableGPUDevice();
}
BaseGPUDevice::~BaseGPUDevice() {
delete accelerator_device_info_;
if (scratch_) gpu_allocator_->DeallocateRaw(scratch_);
device_context_->Unref();
}
Status BaseGPUDevice::InitScratchBuffers() {
mutex_lock l(scratch_init_mutex_);
if (!scratch_) {
DCHECK(stream_);
size_t scratch_buffer_size = Eigen::kGpuScratchSize + sizeof(unsigned int);
profiler::ScopedMemoryDebugAnnotation op_annotation("ScratchBuffer");
void* scratch_buffer = gpu_allocator_->AllocateRaw(
Allocator::kAllocatorAlignment, scratch_buffer_size);
if (scratch_buffer == nullptr) {
return errors::FailedPrecondition(
"Failed to allocate scratch buffer for device ",
tf_device_id_.value());
}
se::DeviceMemory<char> mem(
se::DeviceMemoryBase(scratch_buffer, scratch_buffer_size));
TF_RETURN_IF_ERROR(executor_->SynchronousMemZero(
&mem, Eigen::kGpuScratchSize + sizeof(unsigned int)));
scratch_ = static_cast<char*>(scratch_buffer);
}
return OkStatus();
}
#ifdef TF_GPU_USE_PJRT
Status BaseGPUDevice::Init(const SessionOptions& options,
xla::LocalDeviceState* xla_local_device_state) {
#else
Status BaseGPUDevice::Init(const SessionOptions& options) {
#endif
auto executor_status = DeviceIdUtil::ExecutorForTfDeviceId(
DEVICE_GPU, se::GPUMachineManager(), tf_device_id_);
if (!executor_status.status().ok()) {
return errors::Internal("Failed to get StreamExecutor for device ",
tf_device_id_.value());
}
executor_ = executor_status.value();
#ifdef TF_GPU_USE_PJRT
CHECK(xla_local_device_state != nullptr);
StreamGroup stream_group;
stream_group.priority =
GetPriority(tf_device_id_.value(), options.config.gpu_options());
stream_group.compute = xla_local_device_state->compute_stream();
stream_group.host_to_device = xla_local_device_state->host_to_device_stream();
stream_group.device_to_host = xla_local_device_state->GetDeviceToHostStream();
std::vector<se::Stream*> d2d_streams_from_local_devices_state =
xla_local_device_state->GetDeviceToDeviceStreams();
for (se::Stream* stream : d2d_streams_from_local_devices_state) {
stream_group.device_to_device.push_back(stream);
}
std::pair<StreamGroup*, bool> emplace_result =
StreamGroupFactory::Global().Emplace(
tf_device_id_, 0, stream_group);
if (!emplace_result.second) {
LOG(WARNING) << "StreamGroup for tf_device_id: " << tf_device_id_.value()
<< " already exists. This usually only happens in unit tests.";
}
stream_ = emplace_result.first;
#else
stream_ = StreamGroupFactory::Global().GetOrCreate(
tf_device_id_, 0, executor_, options.config.gpu_options());
#endif
AllocatorAttributes attr;
attr.set_on_host(true);
attr.set_gpu_compatible(true);
Allocator* host_memory_allocator = GetAllocator(attr);
device_context_ =
new GPUDeviceContext(0, stream_->compute,
#if TENSORFLOW_USE_ROCM
stream_->nccl,
#endif
stream_->host_to_device, stream_->device_to_host,
stream_->device_to_device, host_memory_allocator);
em_ = EventMgrFactory::Singleton()->GetEventMgr(executor_,
options.config.gpu_options());
GPUKernelTracker::Params tracker_params(
options.config.gpu_options().experimental().kernel_tracker_max_interval(),
options.config.gpu_options().experimental().kernel_tracker_max_bytes(),
options.config.gpu_options().experimental().kernel_tracker_max_pending());
timestamped_allocator_ =
options.config.gpu_options().experimental().timestamped_allocator();
pending_cap_ = tracker_params.max_pending;
if (timestamped_allocator_ ||
(tracker_params.max_interval > 0 || tracker_params.max_bytes > 0 ||
tracker_params.max_pending > 0)) {
SharedCounter* timing_counter = nullptr;
if (timestamped_allocator_) {
timing_counter =
GPUProcessState::singleton()->GPUAllocatorCounter(tf_device_id_);
DCHECK(timing_counter);
}
kernel_tracker_.reset(new GPUKernelTracker(
tracker_params, Env::Default(), stream_->compute, timing_counter,
timestamped_allocator_ ? gpu_allocator_ : nullptr, em_));
}
accelerator_device_info_ = new DeviceBase::AcceleratorDeviceInfo;
accelerator_device_info_->stream = stream_->compute;
accelerator_device_info_->default_context = device_context_;
#ifdef TF_GPU_USE_PJRT
accelerator_device_info_->pjrt_context = pjrt_device_context_.get();
bool use_pjrt =
GetXlaOpsCommonFlags()->tf_xla_use_device_api.IsEnabledForGpu();
accelerator_device_info_->use_pjrt_tensor_buffer =
use_pjrt && static_cast<PjRtDeviceContext*>(pjrt_device_context_.get())
->use_pjrt_tensor_buffer();
#endif
accelerator_device_info_->event_mgr = em_;
tsl::PlatformDeviceId platform_device_id;
TF_RETURN_IF_ERROR(
GpuIdManager::TfToPlatformDeviceId(tf_device_id_, &platform_device_id));
accelerator_device_info_->gpu_id = platform_device_id.value();
set_tensorflow_accelerator_device_info(accelerator_device_info_);
string gpu_thread_mode;
TF_RETURN_IF_ERROR(
ReadStringFromEnvVar("TF_GPU_THREAD_MODE", "global", &gpu_thread_mode));
gpu_thread_mode = absl::AsciiStrToLower(gpu_thread_mode);
if (gpu_thread_mode != "global") {
int64_t gpu_thread_count = -1;
TF_RETURN_IF_ERROR(
ReadInt64FromEnvVar("TF_GPU_THREAD_COUNT", 2, &gpu_thread_count));
if (gpu_thread_mode == "gpu_private") {
thread_pool_.reset(new thread::ThreadPool(
options.env, ThreadOptions(),
strings::StrCat("gpu_private_", tf_device_id_.value()),
static_cast<int32>(gpu_thread_count),
!options.config.experimental().disable_thread_spinning(),
nullptr));
set_tensorflow_device_thread_pool(thread_pool_.get());
} else if (gpu_thread_mode == "gpu_shared") {
static thread::ThreadPool* thread_pool = new thread::ThreadPool(
options.env, ThreadOptions(), "gpu_shared",
static_cast<int32>(gpu_thread_count),
!options.config.experimental().disable_thread_spinning(),
nullptr);
set_tensorflow_device_thread_pool(thread_pool);
} else {
string error_message =
strings::StrCat("Invalid gpu_thread_mode: ", gpu_thread_mode);
LOG(WARNING) << error_message;
return errors::InvalidArgument(error_message);
}
}
TF_ASSIGN_OR_RETURN(
node_file_writer_,
NodeFileWriter::GetNodeFileWriterIfEnabled(name(), env()));
if (node_file_writer_) {
LOG(INFO) << "Writing NodeDefs to file: " << node_file_writer_->filename();
}
return OkStatus();
}
string BaseGPUDevice::ComputeOpKernelDebugString(const OpKernel& op_kernel,
const int& stream_id) {
return strings::StrCat(op_kernel.name(), " op ", op_kernel.type_string(),
" on GPU ", tf_device_id_.value(), " stream[",
stream_id, "]");
}
std::optional<tsl::TfDeviceId> BaseGPUDevice::FindTfDeviceId(
se::Stream* compute) {
return StreamGroupFactory::Global().FindTfDeviceId(compute);
}
namespace {
const absl::flat_hash_set<std::string>* GetOpsToLogFromEnv() {
auto* result = new absl::flat_hash_set<std::string>;
const char* env = getenv("TF_GPU_DEBUG_OPS_TO_LOG");
if (!env) {
return result;
}
std::vector<absl::string_view> ops = absl::StrSplit(env, ',');
LOG(INFO) << "Will log inputs & outputs from the following ops: ";
for (absl::string_view op : ops) {
result->insert(std::string(op));
LOG(INFO) << " |" << op << "|";
}
return result;
}
bool ShouldLogInputsAndOutputs(OpKernel* op_kernel) {
static const absl::flat_hash_set<std::string>& ops_to_log =
*GetOpsToLogFromEnv();
return ops_to_log.count(op_kernel->type_string());
}
}
Tensor BaseGPUDevice::CopyGpuTensorToHostDebugOnly(const Tensor& gpu_tensor) {
Tensor host_tensor(gpu_tensor.dtype(), gpu_tensor.shape());
auto stream = device_context_->stream();
CHECK(stream
->Memcpy(host_tensor.data(),
se::DeviceMemoryBase(gpu_tensor.data(),
gpu_tensor.TotalBytes()),
gpu_tensor.TotalBytes())
.ok());
CHECK(stream->BlockHostUntilDone().ok());
return host_tensor;
}
void BaseGPUDevice::LogInputs(OpKernel* op_kernel, OpKernelContext* context) {
LOG(INFO) << "Inputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_inputs(); i++) {
if (!context->has_input(i)) {
LOG(INFO) << "input # " << i << " is absent";
continue;
}
Tensor input = context->input_memory_type(i) == DEVICE_MEMORY
? CopyGpuTensorToHostDebugOnly(context->input(i))
: context->input(i);
LOG(INFO) << "input # " << i;
LOG(INFO) << input.DebugString(-1);
}
LOG(INFO) << "";
}
void BaseGPUDevice::LogOutputs(OpKernel* op_kernel, OpKernelContext* context) {
if (!context->status().ok()) {
LOG(INFO) << op_kernel->name()
<< " failed: " << context->status().message();
return;
}
LOG(INFO) << "Outputs for " << op_kernel->name() << " (total "
<< context->num_inputs() << "):";
for (int i = 0; i < context->num_outputs(); i++) {
Tensor* output_ptr = context->mutable_output(i);
if (output_ptr == nullptr) {
LOG(INFO) << "output # " << i << " is null";
continue;
}
Tensor output = context->output_memory_type(i) == DEVICE_MEMORY
? CopyGpuTensorToHostDebugOnly(*output_ptr)
: *output_ptr;
LOG(INFO) << "output # " << i;
LOG(INFO) << output.DebugString(-1);
}
LOG(INFO) << "";
}
void BaseGPUDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
GPUDeviceContext* gpu_device_context = device_context_;
if (context->op_device_context() != nullptr) {
gpu_device_context =
static_cast<GPUDeviceContext*>(context->op_device_context());
}
se::Stream* stream = gpu_device_context->stream();
const auto stream_id = gpu_device_context->stream_id();
const bool vlog_1 = VLOG_IS_ON(1);
if (vlog_1) {
VLOG(1) << "GpuDevice::ComputeHelper "
<< ComputeOpKernelDebugString(*op_kernel, stream_id);
}
if (kernel_tracker_.get()) {
context->set_record_memory_consumption(true);
if (pending_cap_ > 0) {
kernel_tracker_->PauseWhilePendingExceeds(pending_cap_);
}
}
ScopedActivateContext scoped_activation{stream->parent()};
profiler::ScopedMemoryDebugAnnotation op_annotation(
op_kernel->name_view().data(), context->step_id());
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
}
op_kernel->Compute(context);
if (should_log_inputs_and_outputs) {
LogOutputs(op_kernel, context);
}
if (context->status().ok()) {
if (sync_every_op_) {
context->SetStatus(GPUUtil::SyncAll(this));
if (vlog_1) {
VLOG(1) << "GpuDevice::ComputeHelper finished "
<< ComputeOpKernelDebugString(*op_kernel, stream_id);
}
} else if (vlog_1) {
VLOG(1) << "GpuDevice::ComputeHelper scheduled "
<< ComputeOpKernelDebugString(*op_kernel, stream_id);
}
if (kernel_tracker_) {
GPUKernelTracker* tracker = kernel_tracker_.get();
DCHECK(tracker);
uint64 queued_count = tracker->MaybeQueue(context);
if (queued_count > 0) {
em_->ThenExecute(stream, [tracker, queued_count]() {
tracker->RecordTerminated(queued_count);
});
}
}
if (node_file_writer_) {
Status s = node_file_writer_->RecordNodeExecution(op_kernel, context);
if (!s.ok()) {
LOG(ERROR) << s;
context->SetStatus(s);
}
}
} else {
if (vlog_1) {
VLOG(1) << "GpuDevice::ComputeHelper failed to schedule "
<< ComputeOpKernelDebugString(*op_kernel, stream_id);
}
}
}
Status BaseGPUDevice::Sync() {
DCHECK_NE(stream_, nullptr);
return stream_->compute->BlockHostUntilDone();
}
void BaseGPUDevice::ComputeAsync(AsyncOpKernel* op_kernel,
OpKernelContext* context,
AsyncOpKernel::DoneCallback done) {
GPUDeviceContext* gpu_device_context = device_context_;
if (context->op_device_context() != nullptr) {
gpu_device_context =
static_cast<GPUDeviceContext*>(context->op_device_context());
}
se::Stream* stream = gpu_device_context->stream();
const auto stream_id = gpu_device_context->stream_id();
VLOG(1) << "GpuDevice::ComputeAsync " << op_kernel->name() << " op "
<< op_kernel->type_string() << " on GPU" << tf_device_id_
<< " stream[" << stream_id << "]";
bool should_log_inputs_and_outputs = ShouldLogInputsAndOutputs(op_kernel);
if (should_log_inputs_and_outputs) {
LogInputs(op_kernel, context);
AsyncOpKernel::DoneCallback parent_done = done;
done = [this, parent_done, should_log_inputs_and_outputs, op_kernel,
context]() {
LogOutputs(op_kernel, context);
parent_done();
};
}
ScopedActivateContext scoped_activation{stream->parent()};
op_kernel->ComputeAsync(context, std::move(done));
}
Status BaseGPUDevice::MaybeCopyTensorToGPU(
const AllocatorAttributes& alloc_attrs, const Tensor& from, Tensor* to,
StatusCallback done) {
if (alloc_attrs.on_host()) {
*to = from;
done(OkStatus());
return OkStatus();
} else {
if (!DMAHelper::CanUseDMA(&from)) {
Status err = errors::Internal("GPU copy from non-DMA ",
DataTypeString(from.dtype()), " tensor");
done(err);
return err;
}
AllocationAttributes allocation_attr;
uint64 safe_alloc_frontier = 0;
std::function<uint64()> freed_by_func = [this, &safe_alloc_frontier]() {
safe_alloc_frontier = SafeAllocFrontier(safe_alloc_frontier);
return safe_alloc_frontier;
};
if (timestamped_allocator_) {
allocation_attr.freed_by_func = &freed_by_func;
}
auto* copy = new Tensor(GetAllocator(alloc_attrs), from.dtype(),
from.shape(), allocation_attr);
if (!copy->IsInitialized()) {
delete copy;
Status err = errors::ResourceExhausted(
"OOM when allocating tensor of shape ", from.shape().DebugString(),
" and type ", DataTypeString(from.dtype()));
done(err);
return err;
}
auto wrapped_done = [to, copy, done = std::move(done)](const Status& s) {
if (s.ok()) {
*to = std::move(*copy);
}
delete copy;
done(s);
};
profiler::ScopedAnnotation annotation("MakeTensorFromProto");
device_context_->CopyCPUTensorToDevice(
&from, this, copy, std::move(wrapped_done),
!timestamped_allocator_ );
return OkStatus();
}
}
Status BaseGPUDevice::MakeTensorFromProto(const TensorProto& tensor_proto,
const AllocatorAttributes alloc_attrs,
Tensor* tensor) {
AllocatorAttributes attr;
attr.set_on_host(true);
attr.set_gpu_compatible(true);
Allocator* host_alloc = GetAllocator(attr);
Tensor parsed(tensor_proto.dtype());
if (!parsed.FromProto(host_alloc, tensor_proto)) {
return errors::InvalidArgument("Cannot parse tensor from proto: ",
tensor_proto.DebugString());
}
profiler::ScopedMemoryDebugAnnotation op_annotation(
"MakeTensorFromProto", "dynamic", parsed.dtype(),
[&parsed]() { return parsed.shape().DebugString(); });
if (parsed.dtype() == DT_VARIANT) {
const Variant* from = parsed.flat<Variant>().data();
int numa_node = attributes().locality().numa_node();
Tensor copy(cpu_allocator(numa_node), DT_VARIANT, parsed.shape());
Variant* copy_variant = copy.flat<Variant>().data();
std::list<Notification> notifications;
Status copy_status;
auto copier = [this, &alloc_attrs, ¬ifications, ©_status](
const Tensor& from, Tensor* to) {
notifications.emplace_back();
Notification& n = *notifications.rbegin();
return MaybeCopyTensorToGPU(alloc_attrs, from, to,
[&n, ©_status](const Status& s) {
if (copy_status.ok()) {
copy_status.Update(s);
}
n.Notify();
});
};
Status s;
for (int64_t ix = 0; ix < parsed.NumElements(); ++ix) {
s = VariantDeviceCopy(VariantDeviceCopyDirection::HOST_TO_DEVICE,
from[ix], ©_variant[ix], copier);
if (!s.ok()) {
break;
}
}
for (auto& n : notifications) {
n.WaitForNotification();
}
if (!s.ok()) {
return s;
}
*tensor = std::move(copy);
return copy_status;
} else {
Notification n;
Status status;
TF_RETURN_IF_ERROR(MaybeCopyTensorToGPU(alloc_attrs, parsed, tensor,
[&n, &status](const Status& s) {
status = s;
n.Notify();
}));
n.WaitForNotification();
return status;
}
}
void BaseGPUDevice::CopyTensorInSameDevice(const Tensor* input_tensor,
Tensor* output_tensor,
const DeviceContext* device_context,
StatusCallback done) {
GPUUtil::CopyGPUTensorToSameGPU(static_cast<Device*>(this), device_context,
input_tensor, output_tensor, std::move(done));
}
ConcretePerOpGpuDevice::ConcretePerOpGpuDevice()
: stream_device_(std::make_unique<EigenGpuStreamDevice>()) {}
void ConcretePerOpGpuDevice::Reinitialize(OpKernelContext* context,
void* gpu_stream,
tsl::TfDeviceId tf_device_id,
Allocator* base_allocator,
char* scratch) {
tsl::PlatformDeviceId platform_device_id;
TF_CHECK_OK(
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id));
static_cast<EigenGpuStreamDevice*>(stream_device_.get())
->Reinitialize(context, static_cast<gpuStream_t>(gpu_stream),
platform_device_id, base_allocator, scratch);
}
void ConcretePerOpGpuDevice::Reinitialize(
OpKernelContext* context, void* gpu_stream,
tsl::PlatformDeviceId platform_device_id, Allocator* base_allocator,
char* scratch) {
static_cast<EigenGpuStreamDevice*>(stream_device_.get())
->Reinitialize(context, static_cast<gpuStream_t>(gpu_stream),
platform_device_id, base_allocator, scratch);
}
const Eigen::GpuDevice& ConcretePerOpGpuDevice::device() const {
return static_cast<EigenGpuStreamDevice*>(stream_device_.get())->device();
}
namespace {
Status VerifyVirtualDeviceSettings(
const size_t num_gpus_to_use, const GPUOptions& gpu_options,
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order,
const std::vector<tsl::PlatformDeviceId>& valid_platform_device_ids,
const std::map<int, std::pair<int, int>>& supported_priority_ranges) {
const auto& virtual_devices = gpu_options.experimental().virtual_devices();
CHECK(!virtual_devices.empty());
if (gpu_options.per_process_gpu_memory_fraction() > 0) {
return errors::InvalidArgument(
"It's invalid to set per_process_gpu_memory_fraction when "
"virtual_devices is set.");
}
if (num_gpus_to_use < virtual_devices.size()) {
return errors::Unknown(
"Not enough GPUs to create virtual devices."
" num_gpus_to_use: ",
num_gpus_to_use, " #virtual_devices: ", virtual_devices.size());
}
if (!gpu_options.visible_device_list().empty() &&
visible_gpu_order.size() != virtual_devices.size()) {
return errors::InvalidArgument(
"The number of GPUs in visible_device_list doesn't match the number "
"of elements in the virtual_devices list.",
" #GPUs in visible_device_list: ", visible_gpu_order.size(),
" virtual_devices.size(): ", virtual_devices.size());
}
if (valid_platform_device_ids.size() != virtual_devices.size()) {
return errors::Unknown(
"The number of valid GPUs doesn't match the number of elements in "
"the virtual_devices list.",
" #valid GPUs: ", valid_platform_device_ids.size(),
" virtual_devices.size(): ", virtual_devices.size());
}
for (int i = 0; i < virtual_devices.size(); ++i) {
if (virtual_devices.Get(0).device_ordinal().empty() !=
virtual_devices.Get(i).device_ordinal().empty()) {
return errors::InvalidArgument(
"Device ordinals must be set for all virtual devices or none. But "
"the device_ordinal is specified for ",
i, " while previous devices didn't have any set.");
}
}
if (!virtual_devices.Get(0).device_ordinal().empty()) {
for (int i = 0; i < virtual_devices.size(); ++i) {
const size_t memory_limit_mb_size =
virtual_devices.Get(i).memory_limit_mb().size();
const size_t device_ordinal_size =
virtual_devices.Get(i).device_ordinal().size();
if (memory_limit_mb_size != device_ordinal_size) {
return errors::InvalidArgument(
"Number of virtual device ordinals specified doesn't "
"match with number of memory_limit_mb specified for GPU# ",
i, " memory_limit_mb size: ", memory_limit_mb_size,
" and device_ordinal size: ", device_ordinal_size);
}
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
bool priority_exists = !virtual_devices.Get(0).priority().empty();
for (int i = 0; i < virtual_devices.size(); ++i) {
const auto& memory_limit_mb = virtual_devices.Get(i).memory_limit_mb();
const auto& priority = virtual_devices.Get(i).priority();
if (!priority_exists) {
if (!priority.empty()) {
return errors::InvalidArgument(
"Priority must be set for all virtual devices or none. But the "
"priority is specified for ",
i,
" while previous devices didn't "
"have any set.");
}
}
if (priority_exists && memory_limit_mb.size() != priority.size()) {
return errors::InvalidArgument(
"Number of virtual device priorities specified doesn't "
"match with number of memory_limit_mb specified for GPU# ",
i, " memory_limit_mb size: ", memory_limit_mb.size(),
" and priority size: ", priority.size());
}
const int gpu_id = valid_platform_device_ids[i].value();
auto it = supported_priority_ranges.find(gpu_id);
if (it == supported_priority_ranges.end()) {
return errors::Internal(
"Failed to find supported priority range for GPU"
" device ",
gpu_id);
}
const std::pair<int, int>& priority_range = it->second;
for (int p : priority) {
if (p > priority_range.first || p < priority_range.second) {
return errors::InvalidArgument(
"Priority ", p,
" is outside the range of supported priorities "
"[",
priority_range.second, ",", priority_range.first,
"] for virtual device ", i, " on GPU# ", gpu_id);
}
}
}
#endif
return OkStatus();
}
int64_t MinSystemMemory(int64_t available_memory, int cc_major) {
int64_t min_system_memory;
if (available_memory < (1LL << 31)) {
min_system_memory = 225 * 1024 * 1024;
} else {
if (cc_major <= 6) {
min_system_memory = 500 * 1024 * 1024;
} else if (cc_major <= 7) {
min_system_memory = 1050 * 1024 * 1024;
} else if (cc_major <= 8) {
min_system_memory = 1536 * 1024 * 1024;
} else {
min_system_memory = 1800 * 1024 * 1024;
}
}
#if defined(__GNUC__) && defined(__OPTIMIZE__)
#elif !defined(__GNUC__) && defined(NDEBUG)
#else
min_system_memory *= 2;
#endif
#if defined(ANDROID_TEGRA)
min_system_memory = 1 << 30;
#endif
VLOG(5) << "available_memory = " << available_memory;
VLOG(5) << "min_system_memory = " << min_system_memory;
return min_system_memory;
}
Status SingleVirtualDeviceMemoryLimit(const GPUOptions& gpu_options,
tsl::PlatformDeviceId platform_device_id,
int64_t* memory_limit) {
int64_t total_memory = 0;
int64_t available_memory = 0;
se::StreamExecutor* se = se::GPUMachineManager()
->ExecutorForDevice(platform_device_id.value())
.value();
if (!se->DeviceMemoryUsage(&available_memory, &total_memory)) {
return errors::Unknown("Failed to query available memory for GPU ",
platform_device_id.value());
}
int64_t allocated_memory = 0;
const double per_process_gpu_memory_fraction =
gpu_options.per_process_gpu_memory_fraction();
se::CudaComputeCapability cc =
se->GetDeviceDescription().cuda_compute_capability();
if ((per_process_gpu_memory_fraction > 1.0 ||
gpu_options.experimental().use_unified_memory()) &&
!cc.IsAtLeast(se::CudaComputeCapability::PASCAL_)) {
return errors::Internal(
"Unified memory on GPUs with compute capability lower than 6.0 "
"(pre-Pascal class GPUs) does not support oversubscription.");
}
if (per_process_gpu_memory_fraction == 0) {
allocated_memory = available_memory;
const int64_t min_system_memory =
MinSystemMemory(available_memory, cc.major);
if (min_system_memory < allocated_memory) {
allocated_memory -= min_system_memory;
}
} else {
allocated_memory = total_memory * per_process_gpu_memory_fraction;
}
const auto maybe_update_allocated_memory = [&](int64_t reserved_mb) {
int64_t allowable_reserved_memory = reserved_mb * 1024 * 1024;
if (allowable_reserved_memory <= available_memory) {
allocated_memory = available_memory - allowable_reserved_memory;
VLOG(1) << "Setting the GPU reserved bytes to "
<< strings::HumanReadableNumBytes(allocated_memory) << " MBytes";
} else {
LOG(WARNING) << "The requested reserved device memory "
<< strings::HumanReadableNumBytes(allowable_reserved_memory)
<< " is larger than the available memory of "
<< strings::HumanReadableNumBytes(available_memory)
<< ". The request is ignored.";
}
};
if (gpu_options.experimental().gpu_system_memory_size_in_mb() > 0) {
maybe_update_allocated_memory(
gpu_options.experimental().gpu_system_memory_size_in_mb());
}
const char* force_device_reserved_bytes =
std::getenv("TF_DEVICE_MIN_SYS_MEMORY_IN_MB");
if (force_device_reserved_bytes != nullptr &&
strcmp(force_device_reserved_bytes, "") != 0) {
int64_t reserved_mb;
if (!strings::safe_strto64(force_device_reserved_bytes, &reserved_mb) ||
reserved_mb < 0) {
LOG(WARNING) << "The requested reserved device memory "
<< force_device_reserved_bytes
<< " is invalid. The request will be ignored.";
} else {
maybe_update_allocated_memory(reserved_mb);
}
}
*memory_limit = allocated_memory;
return OkStatus();
}
}
void BaseGPUDevice::ReinitializeDevice(OpKernelContext* context,
PerOpGpuDevice* device, int stream_id,
Allocator* allocator) {
ConcretePerOpGpuDevice* concrete_device =
static_cast<ConcretePerOpGpuDevice*>(device);
DCHECK(concrete_device);
DCHECK_EQ(stream_id, 0);
const gpuStream_t gpu_stream = reinterpret_cast<gpuStream_t>(
stream_->compute->platform_specific_handle().stream);
concrete_device->Reinitialize(context, gpu_stream, tf_device_id_, allocator,
scratch_);
}
PerOpGpuDevice* BaseGPUDevice::MakeGpuDevice() {
return new ConcretePerOpGpuDevice();
}
Status BaseGPUDevice::ReinitializeGpuDevice(OpKernelContext* context,
PerOpGpuDevice* device,
DeviceContext* dc,
Allocator* allocator) {
TF_RETURN_IF_ERROR(InitScratchBuffers());
if (dc) {
const GPUDeviceContext* gpu_dc = static_cast<GPUDeviceContext*>(dc);
const int stream_id = gpu_dc->stream_id();
VLOG(1) << " eigen_gpu_device(" << dc << ") => stream[" << stream_id
<< "]";
CHECK_EQ(stream_id, 0);
ReinitializeDevice(context, device, stream_id, allocator);
} else {
ReinitializeDevice(context, device, 0, allocator);
}
return OkStatus();
}
Allocator* BaseGPUDevice::GetScopedAllocator(AllocatorAttributes attr,
int64_t step_id) {
if (attr.scope_id > 0) {
return scoped_allocator_mgr_->GetContainer(step_id)->GetInstance(
attr.scope_id);
}
LOG(FATAL) << "Unexpected call to BaseGPUDevice::GetScopedAllocator "
<< "attr.scope_id = " << attr.scope_id;
return gpu_allocator_;
}
const int BaseGPUDeviceFactory::InterconnectMap::kSameDeviceStrength = 1000;
const int BaseGPUDeviceFactory::InterconnectMap::kStreamExecutorStrength = 1;
Status BaseGPUDeviceFactory::CacheDeviceIds() {
if (!cached_device_ids_.empty()) {
return OkStatus();
}
TF_RETURN_IF_ERROR(se::ValidateGPUMachineManager());
se::Platform* gpu_manager = se::GPUMachineManager();
if (gpu_manager == nullptr) {
return OkStatus();
}
int device_count = gpu_manager->VisibleDeviceCount();
if (device_count <= 0) {
return OkStatus();
}
std::vector<tsl::PlatformDeviceId> visible_gpu_order(device_count);
std::iota(visible_gpu_order.begin(), visible_gpu_order.end(), 0);
TF_RETURN_IF_ERROR(GetValidDeviceIds(visible_gpu_order, &cached_device_ids_));
return OkStatus();
}
Status BaseGPUDeviceFactory::ListPhysicalDevices(std::vector<string>* devices) {
TF_RETURN_IF_ERROR(CacheDeviceIds());
for (tsl::PlatformDeviceId platform_device_id : cached_device_ids_) {
const string device_name =
strings::StrCat("/physical_device:GPU:", platform_device_id.value());
devices->push_back(device_name);
}
return OkStatus();
}
Status BaseGPUDeviceFactory::GetDeviceDetails(
int device_index, std::unordered_map<string, string>* details) {
TF_RETURN_IF_ERROR(CacheDeviceIds());
if (device_index < 0 || device_index > cached_device_ids_.size()) {
return errors::Internal("Invalid device index: ", device_index);
}
tsl::PlatformDeviceId platform_device_id = cached_device_ids_[device_index];
TF_RETURN_IF_ERROR(se::ValidateGPUMachineManager());
se::Platform* gpu_manager = se::GPUMachineManager();
if (gpu_manager == nullptr) {
return errors::Internal("Cannot get GPUMachineManager");
}
auto desc_status =
gpu_manager->DescriptionForDevice(platform_device_id.value());
if (!desc_status.ok()) {
return desc_status.status();
}
auto desc = std::move(desc_status).value();
(*details)["device_name"] = desc->name();
#if GOOGLE_CUDA
(*details)["compute_capability"] = desc->cuda_compute_capability().ToString();
#endif
return OkStatus();
}
Status BaseGPUDeviceFactory::CreateDevices(
const SessionOptions& options, const string& name_prefix,
std::vector<std::unique_ptr<Device>>* devices) {
TF_RETURN_IF_ERROR(se::ValidateGPUMachineManager());
se::Platform* gpu_manager = se::GPUMachineManager();
if (gpu_manager == nullptr) {
return OkStatus();
}
if (gpu_manager->VisibleDeviceCount() <= 0) {
return OkStatus();
}
size_t num_gpus_to_use = INT_MAX;
auto iter = options.config.device_count().find("GPU");
if (iter != options.config.device_count().end()) {
num_gpus_to_use = iter->second;
}
const auto& gpu_options = options.config.gpu_options();
bool populate_pjrt_gpu_client_creation_info =
gpu_options.experimental().populate_pjrt_gpu_client_creation_info();
#ifdef TF_GPU_USE_PJRT
absl::StatusOr<PjRtGpuClientCreationInfo*> obtained_info =
GetPjRtGpuClientCreationInfo();
if (obtained_info.ok() && obtained_info.value() != nullptr) {
populate_pjrt_gpu_client_creation_info = false;
VLOG(3) << "Previous GetPjRtGpuClientCreationInfo exists, setting "
"populate_pjrt_gpu_client_creation_info to false.";
} else {
VLOG(3)
<< "Previous GetPjRtGpuClientCreationInfo does not exist. Will create.";
}
#endif
std::vector<tsl::PlatformDeviceId> visible_gpu_order;
std::vector<tsl::PlatformDeviceId> valid_platform_device_ids;
VLOG(3) << "CreateGPUDevice: num_gpus_to_use: " << num_gpus_to_use
<< ", visible_device_list: " << gpu_options.visible_device_list()
<< ", populate_pjrt_gpu_client_creation_info: "
<< populate_pjrt_gpu_client_creation_info;
if (num_gpus_to_use > 0) {
TF_RETURN_IF_ERROR(tsl::ParseVisibleDeviceList(
gpu_options.visible_device_list(), gpu_manager->VisibleDeviceCount(),
&visible_gpu_order));
bool new_gpu_found = false;
for (int i = 0; i < visible_gpu_order.size(); ++i) {
int visible_gpu_id = visible_gpu_order[i].value();
if (visible_gpu_initialized_[visible_gpu_id]) {
continue;
}
visible_gpu_initialized_[visible_gpu_id] = true;
new_gpu_found = true;
}
if (new_gpu_found && visible_gpu_order.size() > 1) {
TF_RETURN_IF_ERROR(EnablePeerAccess(visible_gpu_order));
}
TF_RETURN_IF_ERROR(
GetValidDeviceIds(visible_gpu_order, &valid_platform_device_ids));
}
if (num_gpus_to_use > valid_platform_device_ids.size()) {
num_gpus_to_use = valid_platform_device_ids.size();
}
std::map<int, std::pair<int, int>> supported_priority_ranges;
if (!valid_platform_device_ids.empty()) {
int original_device = 0;
#if GOOGLE_CUDA
cudaError_t err = cudaGetDevice(&original_device);
if (err != cudaSuccess) {
return errors::Internal("cudaGetDevice() failed. Status: ",
cudaGetErrorString(err));
}
#elif TENSORFLOW_USE_ROCM
hipError_t err = hipGetDevice(&original_device);
if (err != hipSuccess) {
return errors::Internal("hipGetDevice() failed. Status: ",
hipGetErrorString(err));
}
#endif
for (tsl::PlatformDeviceId platform_device_id : valid_platform_device_ids) {
#if GOOGLE_CUDA
err = cudaSetDevice(platform_device_id.value());
if (err != cudaSuccess) {
return errors::Internal(
"cudaSetDevice() on GPU:", platform_device_id.value(),
" failed. Status: ", cudaGetErrorString(err));
}
int priority_low, priority_high;
cudaDeviceGetStreamPriorityRange(&priority_low, &priority_high);
if (err != cudaSuccess) {
return errors::Internal(
"cudaDeviceGetStreamPriorityRange() on GPU:", original_device,
" failed. Status: ", cudaGetErrorString(err));
}
VLOG(1) << "Cuda stream priority range on GPU(" << original_device
<< "): " << priority_high << "," << priority_low;
supported_priority_ranges.insert(
std::make_pair(platform_device_id.value(),
std::make_pair(priority_low, priority_high)));
#elif TENSORFLOW_USE_ROCM
err = hipSetDevice(platform_device_id.value());
if (err != hipSuccess) {
return errors::Internal(
"hipSetDevice() on GPU:", platform_device_id.value(),
" failed. Status: ", hipGetErrorString(err));
}
err = hipFree(nullptr);
if (err != hipSuccess) {
return errors::Internal("ROCm runtime implicit initialization on GPU:",
platform_device_id.value(),
" failed. Status: ", hipGetErrorString(err));
}
int priority_low, priority_high;
err = hipDeviceGetStreamPriorityRange(&priority_low, &priority_high);
if (err != hipSuccess) {
return errors::Internal(
"hipDeviceGetStreamPriorityRange() on GPU:", original_device,
" failed. Status: ", hipGetErrorString(err));
}
VLOG(1) << "HIP stream priority range on GPU(" << original_device
<< "): " << priority_high << "," << priority_low;
supported_priority_ranges.insert(
std::make_pair(platform_device_id.value(),
std::make_pair(priority_low, priority_high)));
#endif
}
if (std::find(valid_platform_device_ids.begin(),
valid_platform_device_ids.end(),
original_device) == valid_platform_device_ids.end()) {
original_device = valid_platform_device_ids[0].value();
}
#if GOOGLE_CUDA
err = cudaSetDevice(original_device);
if (err != cudaSuccess) {
return errors::Internal("cudaSetDevice() on GPU:", original_device,
" failed. Status: ", cudaGetErrorString(err));
}
#elif TENSORFLOW_USE_ROCM
err = hipSetDevice(original_device);
if (err != hipSuccess) {
return errors::Internal("hipSetDevice() on GPU:", original_device,
" failed. Status: ", hipGetErrorString(err));
}
#endif
#if GOOGLE_CUDA
int cuda_major_version = CUDART_VERSION / 1000;
int cuda_minor_version = (CUDART_VERSION / 10) % 10;
VLOG(1) << "TensorFlow compiled with CUDA " << cuda_major_version << "."
<< cuda_minor_version << " and cuDNN " << CUDNN_MAJOR << "."
<< CUDNN_MINOR << "." << CUDNN_PATCHLEVEL;
#endif
}
std::vector<InterconnectMap> interconnect_maps;
TF_RETURN_IF_ERROR(
GetInterconnectMaps(visible_gpu_order, gpu_manager, &interconnect_maps));
for (const InterconnectMap& im : interconnect_maps) {
VLOG(1) << "Device interconnect " << im.name << " with strength "
<< im.strength << " edge matrix:";
string line_buf = " ";
for (int i = 0; i < visible_gpu_order.size(); ++i) {
strings::StrAppend(&line_buf, visible_gpu_order[i].value(), " ");
}
VLOG(1) << line_buf;
for (int i = 0; i < visible_gpu_order.size(); ++i) {
line_buf = strings::StrCat(visible_gpu_order[i].value(), ": ");
tsl::PlatformDeviceId gpu_id_i = visible_gpu_order[i];
for (int j = 0; j < visible_gpu_order.size(); ++j) {
tsl::PlatformDeviceId gpu_id_j = visible_gpu_order[j];
if (im.directed_links.find({gpu_id_i, gpu_id_j}) !=
im.directed_links.end()) {
line_buf.append("Y ");
} else {
line_buf.append("N ");
}
}
VLOG(1) << line_buf;
}
}
const auto& virtual_devices = gpu_options.experimental().virtual_devices();
if (!virtual_devices.empty()) {
TF_RETURN_IF_ERROR(VerifyVirtualDeviceSettings(
num_gpus_to_use, gpu_options, visible_gpu_order,
valid_platform_device_ids, supported_priority_ranges));
num_gpus_to_use = virtual_devices.size();
CHECK(gpu_options.visible_device_list().empty() ||
valid_platform_device_ids == visible_gpu_order);
}
if (num_gpus_to_use == 0) {
return OkStatus();
}
struct TfDeviceSpec {
tsl::PlatformDeviceId platform_device_id;
int64_t memory_limit_bytes;
int index;
int device_ordinal;
TfDeviceSpec(tsl::PlatformDeviceId platform_device_id,
int64_t memory_limit_bytes, int index, int device_ordinal)
: platform_device_id(platform_device_id),
memory_limit_bytes(memory_limit_bytes),
index(index),
device_ordinal(device_ordinal) {}
};
std::vector<TfDeviceSpec> tf_device_specs;
constexpr int64_t kMegaByte = 1ll << 20;
for (int i = 0; i < num_gpus_to_use; ++i) {
const tsl::PlatformDeviceId platform_device_id =
valid_platform_device_ids[i];
if (virtual_devices.empty() ||
virtual_devices.Get(i).memory_limit_mb_size() == 0) {
int64_t single_virtual_device_memory_limit = 0;
TF_RETURN_IF_ERROR(
SingleVirtualDeviceMemoryLimit(gpu_options, platform_device_id,
&single_virtual_device_memory_limit));
const int num_virtual_devices =
gpu_options.experimental().num_virtual_devices_per_gpu();
if (num_virtual_devices > 1) {
const int64_t memory_limit_per_virtual_device =
single_virtual_device_memory_limit / num_virtual_devices;
for (int j = 0; j < num_virtual_devices; ++j) {
tf_device_specs.emplace_back(
platform_device_id, memory_limit_per_virtual_device,
tf_device_specs.size(), 0);
}
} else {
tf_device_specs.emplace_back(
platform_device_id, single_virtual_device_memory_limit,
tf_device_specs.size(), 0);
}
} else {
const GPUOptions::Experimental::VirtualDevices& virtual_devices_for_gpu =
virtual_devices.Get(i);
for (int j = 0; j < virtual_devices_for_gpu.memory_limit_mb().size();
j++) {
tf_device_specs.emplace_back(
platform_device_id,
static_cast<int64_t>(virtual_devices_for_gpu.memory_limit_mb(j)) *
kMegaByte,
tf_device_specs.size(),
j <
virtual_devices_for_gpu.device_ordinal().size()
? virtual_devices_for_gpu.device_ordinal(j)
: 0);
}
}
}
std::sort(tf_device_specs.begin(), tf_device_specs.end(),
[](const TfDeviceSpec& a, const TfDeviceSpec& b) {
if (a.device_ordinal < b.device_ordinal) {
return true;
} else if (a.device_ordinal > b.device_ordinal) {
return false;
}
DCHECK_EQ(a.device_ordinal, b.device_ordinal);
DCHECK(std::addressof(a) == std::addressof(b) ||
a.index != b.index);
if (a.index < b.index) {
return true;
}
return false;
});
for (int di = 0; di < tf_device_specs.size(); ++di) {
tsl::TfDeviceId tf_device_id(di);
TF_RETURN_IF_ERROR(GpuIdManager::InsertTfPlatformDeviceIdPair(
tf_device_id, tf_device_specs[di].platform_device_id));
}
LocalityMap device_localities;
TF_RETURN_IF_ERROR(GetDeviceLocalities(
tf_device_specs.size(), interconnect_maps, &device_localities));
#ifdef TF_GPU_USE_PJRT
std::vector<se::MultiDeviceAdapter::AllocatorInfo> allocator_id_stream_tuples;
allocator_id_stream_tuples.reserve(tf_device_specs.size());
std::map<int, std::unique_ptr<xla::LocalDeviceState>> local_device_states;
std::set<int> allowed_devices;
if (!gpu_options.visible_device_list().empty()) {
for (const TfDeviceSpec& tf_device_spec : tf_device_specs) {
allowed_devices.insert(tf_device_spec.platform_device_id.value());
}
}
TF_ASSIGN_OR_RETURN(
xla::LocalClient * xla_client,
xla::GetGpuXlaClient(
std::nullopt,
allowed_devices.empty() ? std::nullopt
: std::make_optional(allowed_devices)));
bool should_create_new_pjrt_client = true;
xla::PjRtStreamExecutorClient* pjrt_se_client = nullptr;
auto obtained_pjrt_client = GetPjRtClient(DeviceType(DEVICE_GPU));
if (obtained_pjrt_client.ok()) {
pjrt_se_client = tensorflow::down_cast<xla::PjRtStreamExecutorClient*>(
*obtained_pjrt_client);
if (pjrt_se_client->addressable_device_count() == tf_device_specs.size()) {
should_create_new_pjrt_client = false;
} else {
LOG(WARNING) << "A PjRt GPU Client was previously created, but the "
"addressable device count: "
<< pjrt_se_client->addressable_device_count()
<< " is not equal to tf_device_specs size: "
<< tf_device_specs.size()
<< ". This usually only happens in unit tests and we will "
"create a new PjRt GPU Client.";
}
}
#endif
GPUProcessState* process_state = GPUProcessState::singleton();
for (int di = 0; di < tf_device_specs.size(); ++di) {
tsl::TfDeviceId tf_device_id(di);
std::vector<tsl::TfDeviceId> peer_gpu_ids;
size_t num_tf_gpus = tf_device_specs.size();
peer_gpu_ids.reserve(num_tf_gpus);
for (int id = 0; id < num_tf_gpus; ++id) {
tsl::TfDeviceId peer_tf_device_id(id);
if (peer_tf_device_id != di) {
peer_gpu_ids.push_back(peer_tf_device_id);
}
}
int64_t memory_limit = tf_device_specs[di].memory_limit_bytes;
Allocator* gpu_allocator = process_state->GetGPUAllocator(
options.config.gpu_options(), tf_device_id, memory_limit, peer_gpu_ids);
if (gpu_allocator == nullptr) {
return absl::InternalError(absl::StrCat(
"Failed to get memory allocator for TF GPU ", tf_device_id.value(),
" with ", memory_limit, " bytes of memory."));
}
auto it = device_localities.find(tf_device_id);
if (it == device_localities.end()) {
return errors::Internal("Failed to find DeviceLocality for GPU device ",
tf_device_id.value());
}
#ifdef TF_GPU_USE_PJRT
const auto executor_status = DeviceIdUtil::ExecutorForTfDeviceId(
DEVICE_GPU, gpu_manager, tf_device_id);
if (!executor_status.status().ok()) {
return absl::InternalError(absl::StrCat(
"Failed to get StreamExecutor for device ", tf_device_id.value()));
}
xla::LocalDeviceState* local_device_state = nullptr;
if (should_create_new_pjrt_client ||
populate_pjrt_gpu_client_creation_info) {
VLOG(3) << "should_create_new_pjrt_client="
<< should_create_new_pjrt_client
<< ", populate_pjrt_gpu_client_creation_info="
<< populate_pjrt_gpu_client_creation_info
<< " for device ordinal " << di;
const int priority = GetPriority(tf_device_id.value(), gpu_options);
xla::LocalDeviceState::StreamOptions stream_options;
int num_d2d_streams =
gpu_options.experimental().num_dev_to_dev_copy_streams();
if (num_d2d_streams == 0) num_d2d_streams = 1;
if (num_d2d_streams < 1 || num_d2d_streams > 4) {
LOG(ERROR)
<< "Illegal GPUOptions.experimental.num_dev_to_dev_copy_streams="
<< num_d2d_streams << " set to 1 instead.";
num_d2d_streams = 1;
}
stream_options.num_device_to_device_streams = num_d2d_streams;
stream_options.priority = priority;
auto emplace_result = local_device_states.emplace(
di, std::make_unique<xla::LocalDeviceState>(
executor_status.value(), xla_client,
xla::LocalDeviceState::kComputeSynchronized,
32,
true, true,
di, stream_options));
if (!emplace_result.second) {
LOG(ERROR) << "Creating LocalDeviceState for device ordinal: " << di
<< " already exists! Returning an error";
return absl::InternalError(absl::StrCat(
"GPU local device state for tf_device_id: ", tf_device_id.value(),
" already exists."));
}
local_device_state = emplace_result.first->second.get();
} else {
VLOG(3) << "should_create_new_pjrt_client="
<< should_create_new_pjrt_client << " for device ordinal " << di
<< ". Re-using local_device_state";
auto* pjrt_se_client =
tensorflow::down_cast<xla::PjRtStreamExecutorClient*>(
*obtained_pjrt_client);
local_device_state = &(pjrt_se_client->device_state(di));
}
TF_RETURN_IF_ERROR(CreateGPUDevice(
options, name_prefix, tf_device_id,
it->second,
local_device_state, gpu_allocator, devices));
if (should_create_new_pjrt_client ||
populate_pjrt_gpu_client_creation_info) {
auto gpu_allocator_ptr = std::unique_ptr<Allocator>(gpu_allocator);
allocator_id_stream_tuples.emplace_back(
std::move(gpu_allocator_ptr), local_device_state->compute_stream(), 0,
tf_device_id.value());
}
}
if (local_device_states.empty()) {
return OkStatus();
}
if (should_create_new_pjrt_client || populate_pjrt_gpu_client_creation_info) {
VLOG(3) << "should_create_new_pjrt_client=" << should_create_new_pjrt_client
<< ", populate_pjrt_gpu_client_creation_info="
<< populate_pjrt_gpu_client_creation_info;
auto allocator_adapter = std::make_unique<se::MultiDeviceAdapter>(
gpu_manager, std::move(allocator_id_stream_tuples));
const int64_t numa_node = 0;
std::unique_ptr<tsl::Allocator> pjrt_gpu_host_allocator(
process_state->GetGpuHostAllocator({}, numa_node));
if (populate_pjrt_gpu_client_creation_info &&
!should_create_new_pjrt_client) {
auto pjrt_gpu_client_creation_info =
std::make_unique<PjRtGpuClientCreationInfo>();
pjrt_gpu_client_creation_info->allocator = std::move(allocator_adapter);
pjrt_gpu_client_creation_info->host_memory_allocator =
std::move(pjrt_gpu_host_allocator);
pjrt_gpu_client_creation_info->local_device_states =
std::move(local_device_states);
pjrt_gpu_client_creation_info->local_client = xla_client;
pjrt_gpu_client_creation_info->allowed_devices =
std::move(allowed_devices);
return SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(
std::move(pjrt_gpu_client_creation_info));
}
if (should_create_new_pjrt_client) {
int node_id = gpu_options.experimental().node_id();
std::vector<std::unique_ptr<xla::PjRtStreamExecutorDevice>> pjrt_devices =
xla::BuildLocalDevices(std::move(local_device_states),
node_id);
auto& pjrt_rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
pjrt_rollout_config.AllowForDeviceInXlaLaunch(DEVICE_GPU);
pjrt_rollout_config.AllowForDeviceInXlaCompileOnDemand(DEVICE_GPU);
pjrt_rollout_config.AllowForDeviceInXlaCompileAndRun(DEVICE_GPU);
auto gpu_run_options =
std::make_unique<xla::gpu::GpuExecutableRunOptions>();
#if TENSORFLOW_USE_ROCM
auto platform_name = xla::RocmName();
#elif TENSORFLOW_USE_SYCL
auto pjrt_platform_name = xla::SyclName();
#else
auto platform_name = xla::CudaName();
#endif
std::unique_ptr<xla::PjRtClient> pjrt_client =
std::make_unique<xla::StreamExecutorGpuClient>(
platform_name, xla_client, std::move(pjrt_devices),
numa_node,
std::move(allocator_adapter),
std::move(pjrt_gpu_host_allocator),
true,
std::move(gpu_run_options),
nullptr, nullptr);
return SetPjRtClientInTFGlobalResourceManager(DeviceType(DEVICE_GPU),
std::move(pjrt_client));
}
LOG(INFO)
<< "Unexpectedly returning OK STATUS in "
"BaseGPUDeviceFactory::CreateDevices without creating a PJRT GPU "
"client when one does not already exist. If there is any problem "
"with GPU computations, file a bug. (But if this occurs in a "
"test environment that doesn't actually perform GPU "
"computations, this might not be a problem.)";
return OkStatus();
} else {
return obtained_pjrt_client.status();
}
#else
TF_RETURN_IF_ERROR(CreateGPUDevice(options, name_prefix, tf_device_id,
it->second,
gpu_allocator, devices));
}
return OkStatus();
#endif
}
static string GetShortDeviceDescription(
tsl::PlatformDeviceId platform_device_id,
const se::DeviceDescription& desc) {
#if GOOGLE_CUDA
return strings::StrCat(
"device: ", platform_device_id.value(), ", name: ", desc.name(),
", pci bus id: ", desc.pci_bus_id(),
", compute capability: ", desc.cuda_compute_capability().ToString());
#elif TENSORFLOW_USE_ROCM
return strings::StrCat("device: ", platform_device_id.value(),
", name: ", desc.name(),
", pci bus id: ", desc.pci_bus_id());
#endif
}
#ifdef TF_GPU_USE_PJRT
Status BaseGPUDeviceFactory::CreateGPUDevice(
const SessionOptions& options, const string& name_prefix,
tsl::TfDeviceId tf_device_id, const DeviceLocality& dev_locality,
xla::LocalDeviceState* xla_local_device_state, Allocator* gpu_allocator,
std::vector<std::unique_ptr<Device>>* devices) {
#else
Status BaseGPUDeviceFactory::CreateGPUDevice(
const SessionOptions& options, const string& name_prefix,
tsl::TfDeviceId tf_device_id, const DeviceLocality& dev_locality,
Allocator* gpu_allocator, std::vector<std::unique_ptr<Device>>* devices) {
#endif
CHECK_GE(tf_device_id.value(), 0);
const string device_name =
strings::StrCat(name_prefix, "/device:GPU:", tf_device_id.value());
tsl::CheckValidTfDeviceId(
DEVICE_GPU, se::GPUMachineManager()->VisibleDeviceCount(), tf_device_id);
tsl::PlatformDeviceId platform_device_id;
TF_RETURN_IF_ERROR(
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id));
int numa_node = dev_locality.numa_node();
se::Platform* gpu_manager = se::GPUMachineManager();
auto desc_status =
gpu_manager->DescriptionForDevice(platform_device_id.value());
if (!desc_status.ok()) {
return desc_status.status();
}
auto desc = std::move(desc_status).value();
std::optional<AllocatorStats> stats = gpu_allocator->GetStats();
int64_t bytes_limit = (stats && stats->bytes_limit) ? *stats->bytes_limit : 0;
std::unique_ptr<BaseGPUDevice> gpu_device = CreateGPUDevice(
options, device_name, static_cast<Bytes>(bytes_limit), dev_locality,
tf_device_id, GetShortDeviceDescription(platform_device_id, *desc),
gpu_allocator, ProcessState::singleton()->GetCPUAllocator(numa_node));
LOG(INFO) << "Created device " << device_name << " with "
<< (bytes_limit >> 20) << " MB memory: " << " -> "
<< GetShortDeviceDescription(platform_device_id, *desc);
#ifdef TF_GPU_USE_PJRT
TF_RETURN_IF_ERROR(gpu_device->Init(options, xla_local_device_state));
#else
TF_RETURN_IF_ERROR(gpu_device->Init(options));
#endif
gpu_allocator->SetStreamAndPreallocateMemory(
gpu_device->compute_stream()->platform_specific_handle().stream);
devices->push_back(std::move(gpu_device));
return OkStatus();
}
namespace {
std::unique_ptr<
std::map<std::pair<tsl::PlatformDeviceId, tsl::PlatformDeviceId>, bool>>
GetPeerAccessMap(se::Platform* platform,
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order) {
std::unique_ptr<
std::map<std::pair<tsl::PlatformDeviceId, tsl::PlatformDeviceId>, bool>>
map(new std::map<std::pair<tsl::PlatformDeviceId, tsl::PlatformDeviceId>,
bool>);
for (tsl::PlatformDeviceId platform_gpu_i : visible_gpu_order) {
for (tsl::PlatformDeviceId platform_gpu_j : visible_gpu_order) {
se::StreamExecutor* from =
platform->ExecutorForDevice(platform_gpu_i.value()).value();
se::StreamExecutor* to =
platform->ExecutorForDevice(platform_gpu_j.value()).value();
(*map)[{platform_gpu_i, platform_gpu_j}] =
from->CanEnablePeerAccessTo(to);
}
}
return map;
}
}
Status BaseGPUDeviceFactory::GetInterconnectMaps(
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order,
se::Platform* gpu_manager, std::vector<InterconnectMap>* maps) {
auto access_map = GetPeerAccessMap(gpu_manager, visible_gpu_order);
maps->resize(1);
InterconnectMap& imap = maps->at(0);
imap.name = "StreamExecutor";
imap.strength = InterconnectMap::kStreamExecutorStrength;
for (tsl::PlatformDeviceId gpu_id_i : visible_gpu_order) {
for (tsl::PlatformDeviceId gpu_id_j : visible_gpu_order) {
if (gpu_id_i == gpu_id_j) continue;
if ((*access_map)[{gpu_id_i, gpu_id_j}]) {
imap.directed_links.insert({gpu_id_i, gpu_id_j});
}
}
}
return OkStatus();
}
Status BaseGPUDeviceFactory::GetDeviceLocalities(
int num_tf_gpus, const std::vector<InterconnectMap>& interconnects,
LocalityMap* localities) {
std::vector<tsl::TfDeviceId> all_tf_device_ids;
all_tf_device_ids.reserve(num_tf_gpus);
for (int i = 0; i < num_tf_gpus; ++i) {
all_tf_device_ids.push_back(tsl::TfDeviceId(i));
}
for (tsl::TfDeviceId tf_device_id : all_tf_device_ids) {
tsl::PlatformDeviceId platform_device_id;
TF_RETURN_IF_ERROR(
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id));
se::Platform* gpu_manager = se::GPUMachineManager();
auto desc_status =
gpu_manager->DescriptionForDevice(platform_device_id.value());
if (!desc_status.ok()) {
return desc_status.status();
}
auto desc = std::move(desc_status).value();
int numa_node = desc->numa_node();
if (numa_node < 0) {
LOG(INFO) << "Could not identify NUMA node of platform GPU id "
<< platform_device_id
<< ", defaulting to 0. Your kernel may not have been built "
<< "with NUMA support.";
numa_node = 0;
}
DeviceLocality dev_locality;
dev_locality.set_numa_node(numa_node);
dev_locality.set_bus_id(numa_node + 1);
LocalLinks* links = dev_locality.mutable_links();
for (const InterconnectMap& imap : interconnects) {
for (tsl::TfDeviceId tf_gpu_dst : all_tf_device_ids) {
tsl::PlatformDeviceId platform_gpu_dst;
TF_RETURN_IF_ERROR(
GpuIdManager::TfToPlatformDeviceId(tf_gpu_dst, &platform_gpu_dst));
if (imap.directed_links.find({platform_device_id, platform_gpu_dst}) !=
imap.directed_links.end()) {
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(tf_gpu_dst.value());
ilink->set_type(imap.name);
ilink->set_strength(imap.strength);
}
}
}
for (tsl::TfDeviceId tf_gpu_dst : all_tf_device_ids) {
if (tf_device_id == tf_gpu_dst) continue;
tsl::PlatformDeviceId platform_gpu_dst;
TF_RETURN_IF_ERROR(
GpuIdManager::TfToPlatformDeviceId(tf_gpu_dst, &platform_gpu_dst));
if (platform_device_id == platform_gpu_dst) {
InterconnectLink* ilink = links->add_link();
ilink->set_device_id(tf_gpu_dst.value());
ilink->set_type("SAME_DEVICE");
ilink->set_strength(InterconnectMap::kSameDeviceStrength);
}
}
(*localities)[tf_device_id] = dev_locality;
VLOG(1) << "GPUDevice PlatformDeviceId " << platform_device_id
<< " TfDeviceId " << tf_device_id << " on bus "
<< dev_locality.bus_id() << " numa: " << numa_node
<< " pci: " << desc->pci_bus_id()
<< " DeviceLocality: " << dev_locality.DebugString();
}
return OkStatus();
}
static int GetDefaultMinGPUMultiprocessorCount(
se::Platform* gpu_manager,
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order) {
static const int kDefaultMinGPUMultiprocessorCount = 8;
int max_count = -1;
for (int i = 0; i < visible_gpu_order.size(); ++i) {
int visible_gpu_id = visible_gpu_order[i].value();
auto description_status = gpu_manager->DescriptionForDevice(visible_gpu_id);
if (!description_status.ok()) {
continue;
}
auto description = std::move(description_status).value();
max_count = std::max(max_count, description->core_count());
}
if (max_count < 0 || kDefaultMinGPUMultiprocessorCount < max_count) {
return kDefaultMinGPUMultiprocessorCount;
} else {
return max_count;
}
}
static int GetMinGPUMultiprocessorCount(
se::Platform* gpu_manager,
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order) {
const char* tf_min_gpu_core_count = getenv("TF_MIN_GPU_MULTIPROCESSOR_COUNT");
if (tf_min_gpu_core_count == nullptr ||
strcmp(tf_min_gpu_core_count, "") == 0) {
return GetDefaultMinGPUMultiprocessorCount(gpu_manager, visible_gpu_order);
}
int min_gpu_core_count = -1;
if (strings::safe_strto32(tf_min_gpu_core_count, &min_gpu_core_count)) {
if (min_gpu_core_count >= 0) {
return min_gpu_core_count;
}
}
int count =
GetDefaultMinGPUMultiprocessorCount(gpu_manager, visible_gpu_order);
LOG(ERROR) << "Invalid minimum GPU multiprocessor count: ["
<< tf_min_gpu_core_count << "]. "
<< "Using the default value: " << count;
return count;
}
namespace {
#if GOOGLE_CUDA
se::CudaComputeCapability ComputeCapabilityFromString(
const std::string& version_name) {
int major_part, minor_part;
size_t dot_pos = version_name.find('.');
CHECK(dot_pos != string::npos)
<< "Illegal version name: [" << version_name << "]";
string major_str = version_name.substr(0, dot_pos);
CHECK(strings::safe_strto32(major_str, &major_part))
<< "Illegal version name: [" << version_name << "]";
string minor_str = version_name.substr(dot_pos + 1);
CHECK(strings::safe_strto32(minor_str, &minor_part))
<< "Illegal version name: [" << version_name << "]";
return se::CudaComputeCapability{major_part, minor_part};
}
std::vector<se::CudaComputeCapability> GetSupportedCudaComputeCapabilities() {
std::vector<se::CudaComputeCapability> cuda_caps = {
ComputeCapabilityFromString("3.5"), ComputeCapabilityFromString("5.2")};
#ifdef TF_EXTRA_CUDA_CAPABILITIES
#define TF_XSTRING(...) #__VA_ARGS__
#define TF_STRING(s) TF_XSTRING(s)
string extra_cuda_caps = TF_STRING(TF_EXTRA_CUDA_CAPABILITIES);
#undef TF_STRING
#undef TF_XSTRING
auto extra_capabilities = str_util::Split(extra_cuda_caps, ',');
for (const auto& capability : extra_capabilities) {
cuda_caps.push_back(ComputeCapabilityFromString(capability));
}
#endif
return cuda_caps;
}
#endif
}
Status BaseGPUDeviceFactory::EnablePeerAccess(
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order) {
se::Platform* gpu_manager = se::GPUMachineManager();
int possible_peer_count = 0;
int enabled_peer_count = 0;
for (int i = 0; i < visible_gpu_order.size(); ++i) {
const tsl::PlatformDeviceId platform_gpu_i = visible_gpu_order[i];
for (int j = 0; j < visible_gpu_order.size(); ++j) {
const tsl::PlatformDeviceId platform_gpu_j = visible_gpu_order[j];
se::StreamExecutor* from =
gpu_manager->ExecutorForDevice(platform_gpu_i.value()).value();
se::StreamExecutor* to =
gpu_manager->ExecutorForDevice(platform_gpu_j.value()).value();
if (from->CanEnablePeerAccessTo(to)) {
++possible_peer_count;
auto status = from->EnablePeerAccessTo(to);
if (!status.ok()) {
LOG(WARNING)
<< "Unable to enable peer access between device ordinals "
<< platform_gpu_i << " and " << platform_gpu_j
<< ", status: " << status;
} else {
++enabled_peer_count;
}
}
}
}
if (possible_peer_count > 0 && enabled_peer_count == 0) {
return errors::Internal(possible_peer_count,
" potential peer access pairs were reported by the "
"driver, but no peering could be enabled.");
}
return OkStatus();
}
Status BaseGPUDeviceFactory::GetValidDeviceIds(
const std::vector<tsl::PlatformDeviceId>& visible_gpu_order,
std::vector<tsl::PlatformDeviceId>* ids) {
se::Platform* gpu_manager = se::GPUMachineManager();
for (int i = 0; i < visible_gpu_order.size(); ++i) {
int visible_gpu_id = visible_gpu_order[i].value();
auto description_status = gpu_manager->DescriptionForDevice(visible_gpu_id);
if (!description_status.ok()) {
return description_status.status();
}
auto description = std::move(description_status).value();
#if GOOGLE_CUDA
VLOG(1) << "Found device " << i << " with properties: " << "\npciBusID: "
<< description->pci_bus_id() << " name: " << description->name()
<< " computeCapability: "
<< description->cuda_compute_capability().ToString()
<< "\ncoreClock: " << description->clock_rate_ghz() << "GHz"
<< " coreCount: " << description->core_count()
<< " deviceMemorySize: "
<< strings::HumanReadableNumBytes(description->device_memory_size())
<< " deviceMemoryBandwidth: "
<< strings::HumanReadableNumBytes(description->memory_bandwidth())
<< "/s";
#elif TENSORFLOW_USE_ROCM
std::string gcn_arch_name =
description->rocm_compute_capability().gcn_arch_name();
VLOG(1) << "Found device " << i << " with properties: " << "\npciBusID: "
<< description->pci_bus_id() << " name: " << description->name()
<< " ROCm AMDGPU Arch: " << gcn_arch_name
<< "\ncoreClock: " << description->clock_rate_ghz() << "GHz"
<< " coreCount: " << description->core_count()
<< " deviceMemorySize: "
<< strings::HumanReadableNumBytes(description->device_memory_size())
<< " deviceMemoryBandwidth: "
<< strings::HumanReadableNumBytes(description->memory_bandwidth())
<< "/s";
#endif
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto handle_or = tsl::internal::DsoLoader::MaybeTryDlopenGPULibraries();
if (!handle_or.ok()) {
LOG(WARNING) << "Cannot dlopen some GPU libraries. Please make sure the "
"missing libraries mentioned above are installed properly "
"if you would like to use GPU. Follow the guide at "
"https:
"download and setup the required libraries for your "
"platform.\nSkipping registering "
"GPU devices...";
return OkStatus();
}
#endif
#if GOOGLE_CUDA
auto cuda_supported_capabilities = GetSupportedCudaComputeCapabilities();
if (cuda_supported_capabilities.empty()) {
return errors::FailedPrecondition(
"No supported cuda capabilities in binary.");
}
se::CudaComputeCapability min_supported_capability = *std::min_element(
cuda_supported_capabilities.begin(), cuda_supported_capabilities.end());
#endif
int min_gpu_core_count =
GetMinGPUMultiprocessorCount(gpu_manager, visible_gpu_order);
for (int i = 0; i < visible_gpu_order.size(); ++i) {
const tsl::PlatformDeviceId visible_gpu_id = visible_gpu_order[i];
auto description_status =
gpu_manager->DescriptionForDevice(visible_gpu_id.value());
if (!description_status.ok()) {
LOG(INFO) << "Ignoring visible gpu device " << visible_gpu_id
<< " whose executor is in invalid state: "
<< description_status.status().ToString();
continue;
}
auto desc = std::move(description_status).value();
#if GOOGLE_CUDA
if (desc->cuda_compute_capability() < min_supported_capability) {
LOG(INFO) << "Ignoring visible gpu device " << "("
<< GetShortDeviceDescription(visible_gpu_id, *desc) << ") "
<< "with Cuda compute capability "
<< desc->cuda_compute_capability().ToString()
<< ". The minimum required Cuda capability is "
<< min_supported_capability.ToString() << ".";
continue;
}
#elif TENSORFLOW_USE_ROCM
auto rocm_compute_capability = desc->rocm_compute_capability();
if (!rocm_compute_capability.is_supported_gfx_version()) {
LOG(INFO) << "Ignoring visible gpu device " << "("
<< GetShortDeviceDescription(visible_gpu_id, *desc) << ") "
<< "with AMDGPU version : "
<< rocm_compute_capability.gfx_version()
<< ". The supported AMDGPU versions are "
<< rocm_compute_capability.supported_gfx_versions_str() << ".";
continue;
}
#endif
if (desc->core_count() < min_gpu_core_count) {
LOG(INFO) << "Ignoring visible gpu device " << "("
<< GetShortDeviceDescription(visible_gpu_id, *desc) << ") "
<< "with core count: " << desc->core_count()
<< ". The minimum required count is " << min_gpu_core_count
<< ". You can adjust this requirement with the env var "
"TF_MIN_GPU_MULTIPROCESSOR_COUNT.";
continue;
}
#if defined(GOOGLE_CUDA) && !defined(PLATFORM_GOOGLE)
auto compute_capabilities = {TF_CUDA_COMPUTE_CAPABILITIES};
auto device_capability = desc->cuda_compute_capability();
if (std::count_if(std::cbegin(compute_capabilities),
std::cend(compute_capabilities), [&](int cc) {
return cc / 10 == device_capability.major &&
cc % 10 <= device_capability.minor;
}) == 0) {
LOG(WARNING)
<< "TensorFlow was not built with CUDA kernel binaries "
"compatible with compute capability "
<< device_capability.ToString() << ". CUDA kernels will be "
<< "jit-compiled from PTX, which could take 30 minutes or longer.";
}
#endif
ids->push_back(visible_gpu_id);
}
if (!ids->empty()) {
std::vector<int> raw_ids(ids->size());
std::transform(ids->begin(), ids->end(), raw_ids.begin(),
[](tsl::PlatformDeviceId id) -> int { return id.value(); });
VLOG(1) << "Adding visible gpu devices: " << absl::StrJoin(raw_ids, ", ");
}
return OkStatus();
}
uint64 BaseGPUDevice::SafeAllocFrontier(uint64 old_value) {
if (timestamped_allocator_) {
return kernel_tracker_->LastTerminatedCount(old_value);
} else {
return 0;
}
}
int BaseGPUDevice::PendingKernels() {
if (kernel_tracker_) {
return kernel_tracker_->NumPending();
}
return 0;
}
void BaseGPUDevice::TestOnlyReset() {
StreamGroupFactory::Global().TestOnlyReset();
}
uint64 GPUKernelTracker::MaybeQueue(OpKernelContext* ctx) {
mutex_lock l(mu_);
++ops_since_last_;
int64_t mem_used =
ctx->persistent_memory_allocated() + ctx->temp_memory_allocated();
VLOG(2) << "kernel: " << ctx->op_kernel().name() << " mem_used: " << mem_used;
mem_since_last_ += mem_used;
int weight = 1;
if ((mem_since_last_ < params_.max_bytes) &&
(ops_since_last_ < params_.max_interval)) {
return 0;
} else {
weight = std::min(
params_.max_pending,
std::max(1, mem_since_last_ / std::max(16386, params_.max_bytes)));
mem_since_last_ = 0;
ops_since_last_ = 0;
}
uint64 queued_count = timing_counter_->next();
RecordQueued(queued_count, weight);
return queued_count;
}
void GPUKernelTracker::RecordQueued(uint64 queued_count, int weight) {
VLOG(2) << "RecordQueued queued_count=" << queued_count
<< " first_available_=" << first_available_
<< " last_completed_=" << last_completed_
<< " num_pending_=" << num_pending_;
pending_kernels_[first_available_].queued_count = queued_count;
pending_kernels_[first_available_].weight = weight;
pending_kernels_[first_available_].terminated = false;
++first_available_;
num_pending_ += weight;
if (first_available_ >= pending_kernels_.size()) {
if (last_completed_ >= 0) {
first_available_ = 0;
} else {
pending_kernels_.resize(2 * pending_kernels_.size());
}
}
if (first_available_ == last_completed_) {
std::vector<PendingKernel> new_buffer(pending_kernels_.size() * 2);
for (int i = 0; i < pending_kernels_.size(); ++i) {
int j = (i + last_completed_) % pending_kernels_.size();
new_buffer[i] = pending_kernels_[j];
}
last_completed_ = 0;
first_available_ = pending_kernels_.size();
pending_kernels_.swap(new_buffer);
VLOG(1) << "last_completed_=" << last_completed_
<< " first_available_=" << first_available_
<< " num_pending_=" << num_pending_;
}
DCHECK_NE(first_available_, last_completed_) << "exhausted pending_kernels";
}
void GPUKernelTracker::MaybeQueueProgressEvent() {
mutex_lock l(mu_);
if (num_pending_ == 0) {
uint64 new_count = timing_counter_->next();
RecordQueued(new_count, 1);
em_->ThenExecute(stream_,
[this, new_count]() { RecordTerminated(new_count); });
}
}
void GPUKernelTracker::RecordTerminated(uint64 queued_count) {
mutex_lock l(mu_);
VLOG(2) << this << " RecordTerminated queued_count=" << queued_count
<< " first_available_=" << first_available_
<< " last_completed_=" << last_completed_
<< " num_pending_=" << num_pending_ << " LC="
<< ((last_completed_ >= 0)
? pending_kernels_[last_completed_].queued_count
: -1);
DCHECK_NE(first_available_, last_completed_);
DCHECK_GT(num_pending_, 0);
int index = (last_completed_ + 1) % pending_kernels_.size();
int weight = 1;
while (true) {
if (index == first_available_) {
LOG(FATAL) << "Failed to find " << queued_count
<< " in queue, last_completed_=" << last_completed_
<< " index=" << index
<< " first_available_=" << first_available_
<< " pending_kernels_.size()=" << pending_kernels_.size();
}
if (pending_kernels_[index].queued_count == queued_count) {
pending_kernels_[index].terminated = true;
weight = pending_kernels_[index].weight;
break;
}
index = (index + 1) % pending_kernels_.size();
}
while (true) {
int next_index = (last_completed_ + 1) % pending_kernels_.size();
if (next_index == first_available_) break;
if (pending_kernels_[next_index].terminated) {
last_completed_ = next_index;
} else {
break;
}
}
if (last_completed_ >= 0) {
int64_t v = pending_kernels_[last_completed_].queued_count;
last_terminated_count_ = v;
if (allocator_) {
allocator_->SetSafeFrontier(v);
}
}
num_pending_ -= weight;
pending_decreased_.notify_all();
}
}
#endif | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if (defined(PLATFORM_GOOGLE) && defined(TF_PLATFORM_LINUX_X86_64))
#define TF_GPU_USE_PJRT
#endif
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#ifdef TF_GPU_USE_PJRT
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#endif
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace tensorflow {
namespace {
using ::testing::SizeIs;
const char* kDeviceNamePrefix = "/job:localhost/replica:0/task:0";
int64_t GetTotalGPUMemory(tsl::PlatformDeviceId gpu_id) {
se::StreamExecutor* se =
se::GPUMachineManager()->ExecutorForDevice(gpu_id.value()).value();
int64_t total_memory, available_memory;
CHECK(se->DeviceMemoryUsage(&available_memory, &total_memory));
return total_memory;
}
se::CudaComputeCapability GetComputeCapability() {
return se::GPUMachineManager()
->ExecutorForDevice(0)
.value()
->GetDeviceDescription()
.cuda_compute_capability();
}
void ExpectErrorMessageSubstr(const Status& s, StringPiece substr) {
EXPECT_TRUE(absl::StrContains(s.ToString(), substr))
<< s << ", expected substring " << substr;
}
}
class GPUDeviceTest : public ::testing::Test {
public:
void TearDown() override {
BaseGPUDevice::TestOnlyReset();
GPUProcessState::singleton()->TestOnlyReset();
}
protected:
static SessionOptions MakeSessionOptions(
const string& visible_device_list = "",
double per_process_gpu_memory_fraction = 0, int gpu_device_count = 1,
const std::vector<std::vector<float>>& memory_limit_mb = {},
const std::vector<std::vector<int32>>& priority = {},
const std::vector<std::vector<int32>>& device_ordinal = {},
const int32 num_virtual_devices = 0,
const bool use_cuda_malloc_async = false) {
SessionOptions options;
ConfigProto* config = &options.config;
(*config->mutable_device_count())["GPU"] = gpu_device_count;
GPUOptions* gpu_options = config->mutable_gpu_options();
gpu_options->set_visible_device_list(visible_device_list);
gpu_options->set_per_process_gpu_memory_fraction(
per_process_gpu_memory_fraction);
gpu_options->mutable_experimental()->set_use_cuda_malloc_async(
use_cuda_malloc_async);
if (!memory_limit_mb.empty()) {
for (int i = 0; i < memory_limit_mb.size(); ++i) {
auto virtual_devices =
gpu_options->mutable_experimental()->add_virtual_devices();
for (float mb : memory_limit_mb[i]) {
virtual_devices->add_memory_limit_mb(mb);
}
if (i < device_ordinal.size()) {
for (int o : device_ordinal[i]) {
virtual_devices->add_device_ordinal(o);
}
}
if (i < priority.size()) {
for (int p : priority[i]) {
virtual_devices->add_priority(p);
}
}
}
} else if (num_virtual_devices > 0) {
gpu_options->mutable_experimental()->set_num_virtual_devices_per_gpu(
num_virtual_devices);
}
return options;
}
void InitCPUTensor(Tensor* cpu_tensor, int num_elements, float value) {
auto tensor = cpu_tensor->tensor<float, 1>();
for (int i = 0; i < num_elements; ++i) {
tensor(i) = value;
}
}
void CopyCPUToGPU(Tensor* cpu_tensor, Tensor* gpu_tensor, Device* device,
DeviceContext* device_context) {
TF_ASSERT_OK(device_context->CopyCPUTensorToDeviceSync(cpu_tensor, device,
gpu_tensor));
}
void CopyGPUToCPU(Tensor* gpu_tensor, Tensor* cpu_tensor, Device* device,
DeviceContext* device_context) {
TF_ASSERT_OK(device_context->CopyDeviceTensorToCPUSync(
gpu_tensor, "", device, cpu_tensor));
}
};
TEST_F(GPUDeviceTest, DISABLED_ON_GPU_ROCM(CudaMallocAsync)) {
#ifndef GOOGLE_CUDA
return;
#elif CUDA_VERSION < 11020
LOG(INFO) << "CUDA toolkit too old, skipping this test: " << CUDA_VERSION;
return;
#else
int driverVersion;
cuDriverGetVersion(&driverVersion);
if (driverVersion < 11020) {
LOG(INFO) << "Driver version too old, skipping this test: "
<< driverVersion;
return;
}
#endif
SessionOptions opts = MakeSessionOptions("0", 0, 1, {}, {}, {}, 0,
true);
std::vector<std::unique_ptr<Device>> devices;
Status status;
int number_instantiated =
se::GpuCudaMallocAsyncAllocator::GetInstantiatedCountTestOnly();
{
status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_THAT(devices, SizeIs(1));
Device* device = devices[0].get();
auto* device_info = device->tensorflow_accelerator_device_info();
EXPECT_NE(device_info, nullptr);
AllocatorAttributes allocator_attributes = AllocatorAttributes();
allocator_attributes.set_gpu_compatible(true);
Allocator* allocator = devices[0]->GetAllocator(allocator_attributes);
void* ptr = allocator->AllocateRaw(Allocator::kAllocatorAlignment, 1024);
EXPECT_NE(ptr, nullptr);
allocator->DeallocateRaw(ptr);
}
EXPECT_EQ(number_instantiated + 1,
se::GpuCudaMallocAsyncAllocator::GetInstantiatedCountTestOnly());
EXPECT_EQ(status.code(), error::OK);
}
TEST_F(GPUDeviceTest, DISABLED_ON_GPU_ROCM(CudaMallocAsyncPreallocate)) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {}, {}, {}, 0,
true);
setenv("TF_CUDA_MALLOC_ASYNC_SUPPORTED_PREALLOC", "2048", 1);
std::vector<std::unique_ptr<Device>> devices;
Status status;
int number_instantiated =
se::GpuCudaMallocAsyncAllocator::GetInstantiatedCountTestOnly();
{
status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_THAT(devices, SizeIs(1));
Device* device = devices[0].get();
auto* device_info = device->tensorflow_accelerator_device_info();
CHECK(device_info);
AllocatorAttributes allocator_attributes = AllocatorAttributes();
allocator_attributes.set_gpu_compatible(true);
Allocator* allocator = devices[0]->GetAllocator(allocator_attributes);
void* ptr = allocator->AllocateRaw(Allocator::kAllocatorAlignment, 1024);
EXPECT_NE(ptr, nullptr);
allocator->DeallocateRaw(ptr);
}
unsetenv("TF_CUDA_MALLOC_ASYNC_SUPPORTED_PREALLOC");
EXPECT_EQ(number_instantiated + 1,
se::GpuCudaMallocAsyncAllocator::GetInstantiatedCountTestOnly());
EXPECT_EQ(status.code(), error::OK);
}
TEST_F(GPUDeviceTest, FailedToParseVisibleDeviceList) {
SessionOptions opts = MakeSessionOptions("0,abc");
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(status, "Could not parse entry");
}
TEST_F(GPUDeviceTest, InvalidGpuId) {
SessionOptions opts = MakeSessionOptions("100");
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(status,
"'visible_device_list' listed an invalid Device id");
}
TEST_F(GPUDeviceTest, DuplicateEntryInVisibleDeviceList) {
SessionOptions opts = MakeSessionOptions("0,0");
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(status,
"visible_device_list contained a duplicate entry");
}
TEST_F(GPUDeviceTest, VirtualDeviceConfigConflictsWithMemoryFractionSettings) {
SessionOptions opts = MakeSessionOptions("0", 0.1, 1, {{}});
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(
status, "It's invalid to set per_process_gpu_memory_fraction");
}
TEST_F(GPUDeviceTest, GpuDeviceCountTooSmall) {
SessionOptions opts = MakeSessionOptions("0", 0, 0, {{}});
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::UNKNOWN);
ExpectErrorMessageSubstr(status,
"Not enough GPUs to create virtual devices.");
}
TEST_F(GPUDeviceTest, NotEnoughGpuInVisibleDeviceList) {
SessionOptions opts = MakeSessionOptions("0", 0, 8, {{}, {}});
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::UNKNOWN);
ExpectErrorMessageSubstr(status,
"Not enough GPUs to create virtual devices.");
}
TEST_F(GPUDeviceTest, VirtualDeviceConfigConflictsWithVisibleDeviceList) {
if (se::GPUMachineManager()->VisibleDeviceCount() < 2) return;
SessionOptions opts = MakeSessionOptions("0,1", 0, 8, {{}});
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(
status,
"The number of GPUs in visible_device_list doesn't "
"match the number of elements in the virtual_devices "
"list.");
}
#ifdef TF_GPU_USE_PJRT
TEST_F(GPUDeviceTest, GpuDeviceWithPjrt) {
SessionOptions opts = MakeSessionOptions("0");
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(1));
EXPECT_GE(devices[0]->attributes().memory_limit(), 0);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
auto pjrt_client = GetPjRtClient(DeviceType(DEVICE_GPU));
EXPECT_OK(pjrt_client.status());
}
#endif
TEST_F(GPUDeviceTest, EmptyVirtualDeviceConfig) {
SessionOptions opts = MakeSessionOptions("0");
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(1));
EXPECT_GE(devices[0]->attributes().memory_limit(), 0);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
}
TEST_F(GPUDeviceTest, SingleVirtualDeviceWithNoMemoryLimit) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(1));
EXPECT_GE(devices[0]->attributes().memory_limit(), 0);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
}
TEST_F(GPUDeviceTest, SingleVirtualDeviceWithMemoryLimitAndNoPriority) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(1));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 123 << 20);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
}
TEST_F(GPUDeviceTest, SingleVirtualDeviceWithInvalidPriority) {
{
#if TENSORFLOW_USE_ROCM
SessionOptions opts =
MakeSessionOptions("0", 0, 1, {{123, 456}}, {{-2, 1}});
#else
SessionOptions opts =
MakeSessionOptions("0", 0, 1, {{123, 456}}, {{-9999, 0}});
#endif
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
#if TENSORFLOW_USE_ROCM
ExpectErrorMessageSubstr(
status,
"Priority -2 is outside the range of supported priorities [-1,1] for"
" virtual device 0 on GPU# 0");
#else
ExpectErrorMessageSubstr(
status, "Priority -9999 is outside the range of supported priorities");
#endif
}
{
#if TENSORFLOW_USE_ROCM
SessionOptions opts =
MakeSessionOptions("0", 0, 1, {{123, 456}}, {{-1, 2}});
#else
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123, 456}}, {{0, 1}});
#endif
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
#if TENSORFLOW_USE_ROCM
ExpectErrorMessageSubstr(
status,
"Priority 2 is outside the range of supported priorities [-1,1] for"
" virtual device 0 on GPU# 0");
#else
ExpectErrorMessageSubstr(
status, "Priority 1 is outside the range of supported priorities");
#endif
}
}
TEST_F(GPUDeviceTest, SingleVirtualDeviceWithMemoryLimitAndPriority) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123}}, {{0}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(1));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 123 << 20);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
}
TEST_F(GPUDeviceTest, MultipleVirtualDevices) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123, 456}}, {{0, -1}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(2));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 123 << 20);
EXPECT_EQ(devices[1]->attributes().memory_limit(), 456 << 20);
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[0].get())->priority(), 0);
EXPECT_EQ(-1, static_cast<BaseGPUDevice*>(devices[1].get())->priority());
ASSERT_EQ(devices[0]->attributes().locality().links().link_size(), 1);
ASSERT_EQ(devices[1]->attributes().locality().links().link_size(), 1);
EXPECT_EQ(devices[0]->attributes().locality().links().link(0).device_id(), 1);
EXPECT_EQ(devices[0]->attributes().locality().links().link(0).type(),
"SAME_DEVICE");
EXPECT_EQ(BaseGPUDeviceFactory::InterconnectMap::kSameDeviceStrength,
devices[0]->attributes().locality().links().link(0).strength());
EXPECT_EQ(devices[1]->attributes().locality().links().link(0).device_id(), 0);
EXPECT_EQ(devices[1]->attributes().locality().links().link(0).type(),
"SAME_DEVICE");
EXPECT_EQ(BaseGPUDeviceFactory::InterconnectMap::kSameDeviceStrength,
devices[1]->attributes().locality().links().link(0).strength());
}
TEST_F(GPUDeviceTest, MultipleVirtualDevicesWithPriority) {
{
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123, 456}}, {{0}});
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
ExpectErrorMessageSubstr(
status,
"Number of virtual device priorities specified doesn't "
"match with number of memory_limit_mb specified for GPU# 0"
" memory_limit_mb size: 2 and priority size: 1");
}
{
SessionOptions opts =
MakeSessionOptions("0", 0, 1, {{123, 456}}, {{-1, 0}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(2));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 123 << 20);
EXPECT_EQ(devices[1]->attributes().memory_limit(), 456 << 20);
EXPECT_EQ(-1, static_cast<BaseGPUDevice*>(devices[0].get())->priority());
EXPECT_EQ(static_cast<BaseGPUDevice*>(devices[1].get())->priority(), 0);
}
}
TEST_F(GPUDeviceTest, MultipleVirtualDevicesWithDeviceOrdinal) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{1, 2}}, {}, {{2, 1}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(2));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 2 << 20);
EXPECT_EQ(devices[1]->attributes().memory_limit(), 1 << 20);
}
TEST_F(GPUDeviceTest,
MultipleVirtualDevicesWithDeviceOrdinalOnMultipleDevices) {
if (se::GPUMachineManager()->VisibleDeviceCount() < 2) return;
SessionOptions opts =
MakeSessionOptions("0,1", 0, 2, {{1, 2}, {3, 4}}, {}, {{1, 2}, {1, 2}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(4));
EXPECT_EQ(devices[0]->attributes().memory_limit(), 1 << 20);
EXPECT_EQ(devices[1]->attributes().memory_limit(), 3 << 20);
EXPECT_EQ(devices[2]->attributes().memory_limit(), 2 << 20);
EXPECT_EQ(devices[3]->attributes().memory_limit(), 4 << 20);
}
TEST_F(GPUDeviceTest, MultipleVirtualDevicesWithSpecifiedNumber) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {}, {}, {}, 2);
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
EXPECT_THAT(devices, SizeIs(2));
EXPECT_EQ(devices[0]->attributes().memory_limit(),
devices[1]->attributes().memory_limit());
ASSERT_EQ(devices[0]->attributes().locality().links().link_size(), 1);
ASSERT_EQ(devices[1]->attributes().locality().links().link_size(), 1);
EXPECT_EQ(devices[0]->attributes().locality().links().link(0).device_id(), 1);
EXPECT_EQ(devices[0]->attributes().locality().links().link(0).type(),
"SAME_DEVICE");
EXPECT_EQ(BaseGPUDeviceFactory::InterconnectMap::kSameDeviceStrength,
devices[0]->attributes().locality().links().link(0).strength());
EXPECT_EQ(devices[1]->attributes().locality().links().link(0).device_id(), 0);
EXPECT_EQ(devices[1]->attributes().locality().links().link(0).type(),
"SAME_DEVICE");
EXPECT_EQ(BaseGPUDeviceFactory::InterconnectMap::kSameDeviceStrength,
devices[1]->attributes().locality().links().link(0).strength());
}
TEST_F(GPUDeviceTest, UnifiedMemoryUnavailableOnPrePascalGpus) {
if (GetComputeCapability().IsAtLeast(se::CudaComputeCapability::PASCAL_)) {
return;
}
SessionOptions opts = MakeSessionOptions("0", 1.2);
opts.config.mutable_gpu_options()
->mutable_experimental()
->set_use_unified_memory(true);
std::vector<std::unique_ptr<Device>> devices;
Status status = DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices);
EXPECT_EQ(status.code(), error::INTERNAL);
ExpectErrorMessageSubstr(status, "does not support oversubscription.");
}
TEST_F(GPUDeviceTest, UnifiedMemoryAllocation) {
static constexpr double kGpuMemoryFraction = 1.2;
static constexpr tsl::PlatformDeviceId kPlatformDeviceId(0);
if (!GetComputeCapability().IsAtLeast(se::CudaComputeCapability::PASCAL_)) {
LOG(INFO)
<< "Unified memory allocation is not supported with pre-Pascal GPUs.";
return;
}
SessionOptions opts = MakeSessionOptions("0", kGpuMemoryFraction);
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
ASSERT_THAT(devices, SizeIs(1));
int64_t memory_limit = devices[0]->attributes().memory_limit();
ASSERT_EQ(memory_limit,
static_cast<int64_t>(GetTotalGPUMemory(kPlatformDeviceId) *
kGpuMemoryFraction));
AllocatorAttributes allocator_attributes = AllocatorAttributes();
allocator_attributes.set_gpu_compatible(true);
Allocator* allocator = devices[0]->GetAllocator(allocator_attributes);
void* ptr = allocator->AllocateRaw(Allocator::kAllocatorAlignment,
(memory_limit >> 20) << 20);
EXPECT_NE(ptr, nullptr);
allocator->DeallocateRaw(ptr);
}
TEST_F(GPUDeviceTest, CopyTensorInSameDevice) {
SessionOptions opts = MakeSessionOptions("0");
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
Device* device = devices[0].get();
auto* device_info = device->tensorflow_accelerator_device_info();
CHECK(device_info);
DeviceContext* device_context = device_info->default_context;
Allocator* allocator = device->GetAllocator(AllocatorAttributes());
constexpr int kNumElements = 4;
Tensor input_tensor(allocator, DT_FLOAT, TensorShape({kNumElements}));
Tensor output_tensor(allocator, DT_FLOAT, TensorShape({kNumElements}));
Tensor cpu_tensor(cpu_allocator(), DT_FLOAT, TensorShape({kNumElements}));
InitCPUTensor(&cpu_tensor, kNumElements, 0);
CopyCPUToGPU(&cpu_tensor, &output_tensor, device, device_context);
InitCPUTensor(&cpu_tensor, kNumElements, 1);
CopyCPUToGPU(&cpu_tensor, &input_tensor, device, device_context);
Notification note;
device->CopyTensorInSameDevice(&input_tensor, &output_tensor, device_context,
[¬e](const Status& s) {
TF_ASSERT_OK(s);
note.Notify();
});
note.WaitForNotification();
Tensor output_cpu_tensor(cpu_allocator(), DT_FLOAT,
TensorShape({kNumElements}));
CopyGPUToCPU(&output_tensor, &output_cpu_tensor, device, device_context);
auto input = cpu_tensor.tensor<float, 1>();
auto output = output_cpu_tensor.tensor<float, 1>();
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(input(i), output(i)) << " for index " << i;
}
}
TEST_F(GPUDeviceTest, DeviceDetails) {
DeviceFactory* factory = DeviceFactory::GetFactory("GPU");
std::vector<string> devices;
TF_ASSERT_OK(factory->ListPhysicalDevices(&devices));
EXPECT_GE(devices.size(), 1);
for (int i = 0; i < devices.size(); i++) {
std::unordered_map<string, string> details;
TF_ASSERT_OK(factory->GetDeviceDetails(i, &details));
EXPECT_NE(details["device_name"], "");
#if TENSORFLOW_USE_ROCM
EXPECT_EQ(details.count("compute_capability"), 0);
#else
EXPECT_NE(details["compute_capability"], "");
#endif
}
}
TEST_F(GPUDeviceTest, StreamToIdMultipleVirtualDevices) {
SessionOptions opts = MakeSessionOptions("0", 0, 1, {{123, 456}}, {{0, -1}});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory("GPU")->CreateDevices(
opts, kDeviceNamePrefix, &devices));
for (int i = 0; i < devices.size(); i++) {
EXPECT_EQ(tsl::TfDeviceId(i),
*BaseGPUDevice::FindTfDeviceId(
devices[i]->tensorflow_accelerator_device_info()->stream));
}
EXPECT_FALSE(BaseGPUDevice::FindTfDeviceId(nullptr).has_value());
}
class GPUKernelTrackerTest : public ::testing::Test {
protected:
void Init(const GPUKernelTracker::Params& params) {
timing_counter_.reset(new SharedCounter);
kernel_tracker_.reset(new GPUKernelTracker(params, Env::Default(), nullptr,
timing_counter_.get(), nullptr,
nullptr));
}
void RecordQueued(uint64 v) {
mutex_lock l(kernel_tracker_->mu_);
kernel_tracker_->RecordQueued(v, 1);
}
std::unique_ptr<GPUKernelTracker> kernel_tracker_;
std::unique_ptr<SharedCounter> timing_counter_;
};
TEST_F(GPUKernelTrackerTest, CappingOnly) {
Init({0 , 0 , 32 });
EXPECT_EQ(kernel_tracker_->NumPending(), 0);
EXPECT_EQ(kernel_tracker_->LastTerminatedCount(0), 1);
std::deque<int64_t> queued_counts;
for (int i = 0; i < 32; ++i) {
uint64 queued_count = timing_counter_->next();
queued_counts.push_back(queued_count);
RecordQueued(queued_count);
}
EXPECT_EQ(kernel_tracker_->NumPending(), 32);
EXPECT_EQ(kernel_tracker_->LastTerminatedCount(0), 1);
while (!queued_counts.empty()) {
int64_t x = queued_counts.front();
queued_counts.pop_front();
kernel_tracker_->RecordTerminated(x);
EXPECT_THAT(queued_counts, SizeIs(kernel_tracker_->NumPending()));
EXPECT_EQ(x, kernel_tracker_->LastTerminatedCount(0));
}
EXPECT_EQ(timing_counter_->get(), kernel_tracker_->LastTerminatedCount(0));
int64_t lower_bound = timing_counter_->get();
for (int i = 0; i < 1111; ++i) {
uint64 queued_count = timing_counter_->next();
queued_counts.push_back(queued_count);
RecordQueued(queued_count);
int64_t upper_bound = timing_counter_->get();
if (0 == (i % 16)) {
size_t index = (random::New64() % queued_counts.size());
kernel_tracker_->RecordTerminated(queued_counts[index]);
queued_counts.erase(queued_counts.begin() + index);
EXPECT_LE(lower_bound, kernel_tracker_->LastTerminatedCount(0));
EXPECT_GE(upper_bound, kernel_tracker_->LastTerminatedCount(0));
}
}
while (!queued_counts.empty()) {
int64_t x = queued_counts.front();
queued_counts.pop_front();
kernel_tracker_->RecordTerminated(x);
EXPECT_THAT(queued_counts, SizeIs(kernel_tracker_->NumPending()));
EXPECT_LE(x, kernel_tracker_->LastTerminatedCount(0));
}
EXPECT_EQ(timing_counter_->get(), kernel_tracker_->LastTerminatedCount(0));
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
56c494af-7b20-485f-bb65-2e34dfb9d524 | cpp | tensorflow/tensorflow | c_plugin_coordination_service_agent | tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.cc | tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent_test.cc | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorflow/c/experimental/next_pluggable_device/c_api.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
absl::StatusOr<std::string> ProcessGetKeyValueResult(TF_Buffer* result_buf,
TF_Status* status) {
if (TF_GetCode(status) != TF_OK) {
return StatusFromTF_Status(status);
} else {
std::string result{static_cast<const char*>(result_buf->data),
result_buf->length};
TF_DeleteBuffer(result_buf);
return result;
}
}
}
Status CPluginCoordinationServiceAgent::InsertKeyValue(std::string_view key,
std::string_view value) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceInsertKeyValue(key.data(), key.size(), value.data(),
value.size(), agent_, status);
return StatusFromTF_Status(status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf =
TF_CoordinationServiceGetKeyValue(key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key, absl::Duration timeout) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValueWithTimeout(
key.data(), key.size(), absl::ToInt64Seconds(timeout), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::TryGetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceTryGetKeyValue(
key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status);
return StatusFromTF_Status(status);
}
} | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace {
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
using tsl::CallOptions;
using tsl::DeleteKeyValueRequest;
using tsl::DeleteKeyValueResponse;
using tsl::GetKeyValueRequest;
using tsl::GetKeyValueResponse;
using tsl::InsertKeyValueRequest;
using tsl::InsertKeyValueResponse;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::Pointee;
using ::testing::SetArgPointee;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
void GetKeyValueDirAsync(const tsl::GetKeyValueDirRequest* request,
tsl::GetKeyValueDirResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetKeyValueDirAsync"));
}
void ResetTaskAsync(const tsl::ResetTaskRequest* request,
tsl::ResetTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ResetTaskAsync"));
}
void ReportErrorToServiceAsync(
const tsl::ReportErrorToServiceRequest* request,
tsl::ReportErrorToServiceResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToServiceAsync"));
}
void BarrierAsync(const tsl::BarrierRequest* request,
tsl::BarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("BarrierAsync"));
}
void GetTaskStateAsync(const tsl::GetTaskStateRequest* request,
tsl::GetTaskStateResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetTaskStateAsync"));
}
void WaitForAllTasksAsync(const tsl::WaitForAllTasksRequest* request,
tsl::WaitForAllTasksResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("WaitForAllTasksAsync"));
}
void CancelBarrierAsync(const tsl::CancelBarrierRequest* request,
tsl::CancelBarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("CancelBarrierAsync"));
}
void RegisterTaskAsync(tsl::CallOptions*,
const tsl::RegisterTaskRequest* request,
tsl::RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("RegisterTaskAsync"));
}
void ShutdownTaskAsync(tsl::CallOptions*,
const tsl::ShutdownTaskRequest* request,
tsl::ShutdownTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ShutdownTaskAsync"));
}
void HeartbeatAsync(tsl::CallOptions*, const tsl::HeartbeatRequest* request,
tsl::HeartbeatResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("HeartbeatAsync"));
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
void PollForErrorAsync(CallOptions* call_opts,
const PollForErrorRequest* request,
PollForErrorResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("PollForErrorAsync"));
}
};
class CPluginCoordinationServiceAgentTest : public ::testing::Test {
public:
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(impl_->Initialize(
tsl::Env::Default(), "test_job",
0, config, std::move(client_),
[](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> impl_ =
tsl::CreateCoordinationServiceAgent();
std::unique_ptr<CPluginCoordinationServiceAgent> agent_ =
std::make_unique<CPluginCoordinationServiceAgent>(impl_.get());
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_EQ(result.status().code(), error::DEADLINE_EXCEEDED);
owned_done(absl::CancelledError("error"));
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_ZeroTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::ZeroDuration());
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_NegativeTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::Seconds(-1));
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest, InsertKeyValue_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
InsertKeyValueRequest expected_input;
auto kv = expected_input.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
EXPECT_CALL(*GetClient(),
InsertKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->InsertKeyValue(test_key, test_value));
}
TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) {
const std::string test_key = "test_x_key";
DeleteKeyValueRequest expected_input;
expected_input.set_key(test_key);
expected_input.set_is_directory(true);
EXPECT_CALL(*GetClient(),
DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->DeleteKeyValue(test_key));
}
TEST_F(CPluginCoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5f7b9eb-13c0-4e83-beba-fbe9edad6cf3 | cpp | tensorflow/tensorflow | device_event_mgr | tensorflow/core/common_runtime/device/device_event_mgr.cc | tensorflow/core/common_runtime/device/device_event_mgr_test.cc | #include "tensorflow/core/common_runtime/device/device_event_mgr.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace {
static const int kNumThreads = 2;
}
namespace device_event_mgr {
class ThreadLabel {
public:
static const char* GetValue() { return value_; }
static void SetValue(const char* v) { value_ = v; }
private:
static thread_local const char* value_;
};
thread_local const char* ThreadLabel::value_ = "";
void WarnIfInCallback(std::function<void()> f) {
const char* label = ThreadLabel::GetValue();
if (label && !strcmp(label, "device_event_mgr")) {
if (f) {
f();
} else {
LOG(WARNING) << "Executing inside EventMgr callback thread: "
<< CurrentStackTrace();
}
}
}
void InitThreadpoolLabels(thread::ThreadPool* threadpool) {
static const char* label = "device_event_mgr";
mutex mu;
int init_count = 0;
condition_variable all_initialized;
int exit_count = 0;
condition_variable ready_to_exit;
const int num_threads = threadpool->NumThreads();
for (int i = 0; i < num_threads; ++i) {
threadpool->Schedule([num_threads, &mu, &init_count, &all_initialized,
&exit_count, &ready_to_exit]() {
device_event_mgr::ThreadLabel::SetValue(label);
mutex_lock l(mu);
++init_count;
if (init_count == num_threads) {
all_initialized.notify_all();
}
while (init_count < num_threads) {
all_initialized.wait(l);
}
if (++exit_count == num_threads) {
ready_to_exit.notify_all();
}
});
}
{
mutex_lock l(mu);
while (exit_count < num_threads) {
ready_to_exit.wait(l);
}
}
}
}
EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
: exec_(se),
polling_active_delay_usecs_(gpu_options.polling_active_delay_usecs()
? gpu_options.polling_active_delay_usecs()
: 10),
threadpool_(Env::Default(), "Device_Event_Manager", kNumThreads) {
device_event_mgr::InitThreadpoolLabels(&threadpool_);
StartPollingLoop();
}
EventMgr::~EventMgr() {
StopPollingLoop();
for (auto& [stream, stream_callbacks] : callbacks_) {
for (auto& [event, callback] : stream_callbacks) {
threadpool_.Schedule(std::move(callback));
}
}
}
void EventMgr::StartPollingLoop() {
CHECK(polling_stopped_ == nullptr);
{
mutex_lock l(mu_);
stop_polling_ = false;
}
polling_stopped_ = std::make_unique<Notification>();
threadpool_.Schedule([this]() { PollLoop(); });
}
void EventMgr::StopPollingLoop() {
if (polling_stopped_) {
{
mutex_lock l(mu_);
stop_polling_ = true;
events_pending_.notify_all();
}
polling_stopped_->WaitForNotification();
polling_stopped_.reset(nullptr);
}
}
void EventMgr::PollLoop() {
ToFreeVector to_free;
while (true) {
bool events_still_pending;
{
mutex_lock l(mu_);
if (stop_polling_) {
break;
}
if (callbacks_.empty()) {
events_pending_.wait(l);
}
PollEvents(nullptr, &to_free);
events_still_pending = !callbacks_.empty();
}
FreeMemory(to_free);
to_free.clear();
if (events_still_pending) {
Env::Default()->SleepForMicroseconds(polling_active_delay_usecs_);
}
}
polling_stopped_->Notify();
}
void EventMgr::EnqueueCallback(se::Stream* stream, std::function<void()> func) {
VLOG(2) << "EnqueueCallback with one or more callbacks pending on "
<< callbacks_.size() << " streams and " << free_events_.size()
<< " unused event objects.";
if (free_events_.empty()) {
free_events_.emplace_back(exec_->CreateEvent().value());
}
std::unique_ptr<se::Event> e = std::move(free_events_.back());
free_events_.pop_back();
stream->RecordEvent(e.get()).IgnoreError();
bool was_empty = callbacks_.empty();
callbacks_[stream].push_back({std::move(e), std::move(func)});
if (was_empty) {
events_pending_.notify_all();
}
}
void EventMgr::PollEvents(se::Stream* stream,
absl::InlinedVector<InUse, 4UL>* to_free) {
VLOG(2) << "PollEvents with one or more callbacks pending on "
<< callbacks_.size() << " streams and " << free_events_.size()
<< " unused event objects.";
auto poll_events_for_stream_it =
[&](auto& stream_it) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto& stream_callbacks = stream_it->second;
auto it = stream_callbacks.begin();
while (it != stream_callbacks.end()) {
auto& [event, callback] = *it;
se::Event::Status s = event->PollForStatus();
bool keep_looping = true;
switch (s) {
case se::Event::Status::kUnknown:
case se::Event::Status::kError:
LOG(FATAL) << "Unexpected Event status: " << static_cast<int>(s);
break;
case se::Event::Status::kPending:
keep_looping = false;
break;
case se::Event::Status::kComplete:
free_events_.push_back(std::move(event));
to_free->push_back({nullptr, std::move(callback)});
++it;
break;
}
if (!keep_looping) {
break;
}
}
stream_callbacks.erase(stream_callbacks.begin(), it);
if (stream_callbacks.empty()) {
callbacks_.erase(stream_it++);
} else {
stream_it++;
}
};
if (stream != nullptr) {
auto stream_it = callbacks_.find(stream);
if (stream_it != callbacks_.end()) {
poll_events_for_stream_it(stream_it);
}
} else {
for (auto stream_it = callbacks_.begin(); stream_it != callbacks_.end();) {
poll_events_for_stream_it(stream_it);
}
}
}
EventMgrFactory* EventMgrFactory::Singleton() {
static EventMgrFactory* instance = new EventMgrFactory;
return instance;
}
EventMgr* EventMgrFactory::GetEventMgr(se::StreamExecutor* se,
const GPUOptions& gpu_options) {
mutex_lock l(mu_);
auto itr = event_mgr_map_.find(se);
if (itr == event_mgr_map_.end()) {
auto event_mgr = new EventMgr(se, gpu_options);
event_mgr_map_[se] = event_mgr;
return event_mgr;
} else {
return itr->second;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/device/device_event_mgr.h"
#include <atomic>
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tsl/framework/device_id.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class TEST_EventMgr : public EventMgr {
public:
TEST_EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
: EventMgr(se, gpu_options) {}
};
class TEST_EventMgrHelper {
public:
explicit TEST_EventMgrHelper(EventMgr* em) : em_(em) {
StopPollingLoop();
}
size_t queue_size() {
mutex_lock l(em_->mu_);
size_t n = 0;
for (const auto& [stream, events_and_callbacks] : em_->callbacks_) {
n += events_and_callbacks.size();
}
return n;
}
size_t free_size() {
mutex_lock l(em_->mu_);
return em_->free_events_.size();
}
void PollEvents() {
while (queue_size() > 0) {
EventMgr::ToFreeVector to_free;
{
mutex_lock l(em_->mu_);
em_->PollEvents(nullptr, &to_free);
}
em_->FreeMemory(to_free);
}
}
void StopPollingLoop() { return em_->StopPollingLoop(); }
void StartPollingLoop() { return em_->StartPollingLoop(); }
private:
EventMgr* em_;
};
static std::atomic_int_fast64_t live_tensor_bytes(0);
class TestTensorBuffer : public TensorBuffer {
public:
explicit TestTensorBuffer(size_t bytes)
: TensorBuffer(nullptr), bytes_(bytes) {
live_tensor_bytes += bytes_;
}
~TestTensorBuffer() override { live_tensor_bytes -= bytes_; }
size_t size() const override { return bytes_; }
TensorBuffer* root_buffer() override { return nullptr; }
void FillAllocationDescription(AllocationDescription* arg) const override {}
private:
size_t bytes_;
};
namespace {
TEST(EventMgr, Empty) {
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TEST_EventMgr em(stream_exec, GPUOptions());
TEST_EventMgrHelper th(&em);
EXPECT_EQ(0, th.queue_size());
EXPECT_EQ(0, th.free_size());
}
TEST(EventMgr, WarnIfInCallback) {
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TEST_EventMgr em(stream_exec, GPUOptions());
TEST_EventMgrHelper th(&em);
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
bool hit = false;
th.StartPollingLoop();
device_event_mgr::WarnIfInCallback([&hit] { hit = true; });
EXPECT_FALSE(hit);
Notification note;
em.ThenExecute(stream.get(), [&hit, ¬e]() {
device_event_mgr::WarnIfInCallback([&hit, ¬e] {
hit = true;
note.Notify();
});
});
note.WaitForNotification();
EXPECT_TRUE(hit);
}
}
class GPUDeviceTestHelper {
public:
GPUDeviceTestHelper(size_t memory_limit, int pending_cap) {
SessionOptions sops;
device_ =
DeviceFactory::NewDevice(DEVICE_GPU, sops, "/job:a/replica:0/task:0");
gpu_.reset(reinterpret_cast<BaseGPUDevice*>(device_.release()));
gpu_allocator_ = GPUProcessState::singleton()->GetGPUAllocator(
GPUOptions(), tsl::TfDeviceId(0), memory_limit, {});
host_allocator_ = GPUProcessState::singleton()->GetGpuHostAllocator(
{}, 0);
}
BaseGPUDevice* gpu() { return gpu_.get(); }
Allocator* gpu_allocator() { return gpu_allocator_; }
Allocator* host_allocator() { return host_allocator_; }
se::Stream* compute_stream() { return gpu_->stream_->compute; }
se::Stream* h2d_stream() { return gpu_->stream_->host_to_device; }
se::Stream* d2h_stream() { return gpu_->stream_->device_to_host; }
se::Stream* d2d_stream() { return gpu_->stream_->device_to_device[0]; }
EventMgr* event_mgr() { return gpu_->em_; }
int pending_cap() { return gpu_->pending_cap_; }
private:
std::unique_ptr<Device> device_;
std::unique_ptr<BaseGPUDevice> gpu_;
Allocator* gpu_allocator_;
Allocator* host_allocator_;
};
namespace {
class EMBenchmarkHelper {
GPUDeviceTestHelper* gpu_helper_;
std::vector<std::unique_ptr<OpKernel>> add_kernels_;
std::vector<OpKernelContext::Params*> add_params_;
std::vector<std::unique_ptr<OpKernelContext>> add_contexts_;
NodeDef add_node_def_;
NodeDef id_node_def_;
gtl::InlinedVector<TensorValue, 4> add_inputs_;
std::vector<AllocatorAttributes> allocator_attrs_;
gtl::InlinedVector<Tensor, 4> gpu_inputs_;
gtl::InlinedVector<Tensor, 4> gpu_outputs_;
gtl::InlinedVector<Tensor, 4> host_inputs_;
gtl::InlinedVector<Tensor, 4> host_outputs_;
public:
static constexpr int kTDim = 1024;
int num_ops() const { return add_kernels_.size(); }
size_t tensor_size() const {
return add_inputs_.empty() ? 0 : add_inputs_[0]->NumElements();
}
Tensor& host_outputs(int i) { return host_outputs_[i]; }
Tensor& host_inputs(int i) { return host_inputs_[i]; }
EMBenchmarkHelper(GPUDeviceTestHelper* h) : gpu_helper_(h) {}
void ReInit(int num_ops, int tensor_size) {
gpu_inputs_.clear();
while (gpu_inputs_.size() < 2) {
gpu_inputs_.push_back(Tensor(gpu_helper_->gpu_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
}
gpu_outputs_.clear();
while (gpu_outputs_.empty()) {
gpu_outputs_.push_back(Tensor(gpu_helper_->gpu_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
}
host_inputs_.clear();
while (host_inputs_.size() < 2) {
int instance_index = host_inputs_.size();
host_inputs_.push_back(Tensor(gpu_helper_->host_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
for (int i = 0; i < tensor_size; ++i) {
host_inputs_.back().flat<float>()(i) =
i * (1.0 + (0.5 * instance_index));
}
}
host_outputs_.clear();
while (host_outputs_.empty()) {
host_outputs_.push_back(Tensor(gpu_helper_->host_allocator(), DT_FLOAT,
{tensor_size}, AllocationAttributes()));
for (int i = 0; i < tensor_size; ++i) {
host_outputs_.back().flat<float>()(i) = -1;
}
}
add_kernels_.clear();
add_params_.clear();
while (add_kernels_.size() < num_ops) {
MakeAddOp();
}
}
std::unique_ptr<OpKernel> GetOpKernel(const NodeDef& node_def,
Status* status) {
return CreateOpKernel("GPU", gpu_helper_->gpu(),
gpu_helper_->gpu_allocator(), node_def,
TF_GRAPH_DEF_VERSION, status);
}
void MakeAddOp() {
if (add_kernels_.empty()) {
TF_ASSERT_OK(NodeDefBuilder("add_op", "Add")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Device("/job:a/replica:0/task:0/GPU:0")
.Finalize(&add_node_def_));
}
Status status;
add_kernels_.emplace_back(GetOpKernel(add_node_def_, &status));
TF_ASSERT_OK(status);
add_params_.push_back(new OpKernelContext::Params);
PrepOpKernel(add_params_.back(), add_kernels_.back().get());
}
void SetOutputAttrs(OpKernelContext::Params* params,
std::vector<AllocatorAttributes>* attrs) {
attrs->clear();
for (int index = 0; index < params->op_kernel->num_outputs(); index++) {
AllocatorAttributes attr;
const bool on_host =
(params->op_kernel->output_memory_types()[index] == HOST_MEMORY);
attr.set_on_host(on_host);
attrs->push_back(attr);
}
params->output_attr_array = attrs->data();
params->forward_from_array = {};
}
void PrepOpKernel(OpKernelContext::Params* params, OpKernel* kernel) {
params->step_id = 1;
params->device = gpu_helper_->gpu();
params->log_memory = false;
params->rendezvous = nullptr;
params->collective_executor = nullptr;
params->session_state = nullptr;
params->session_handle = "session_handle";
params->tensor_store = nullptr;
params->cancellation_manager = nullptr;
params->call_frame = nullptr;
params->function_library = nullptr;
params->runner = nullptr;
params->graph_collector = nullptr;
params->step_container = nullptr;
params->slice_reader_cache = nullptr;
params->resource_manager = gpu_helper_->gpu()->resource_manager();
params->stats_collector = nullptr;
params->inc_num_deferred_ops_function = nullptr;
params->dec_num_deferred_ops_function = nullptr;
params->op_device_context = nullptr;
params->track_allocations = false;
params->op_kernel = kernel;
params->frame_iter = FrameAndIter(0, 0);
params->is_input_dead = false;
if (add_inputs_.empty()) {
add_inputs_.resize(2);
add_inputs_[0] = TensorValue(&gpu_inputs_[0]);
add_inputs_[1] = TensorValue(&gpu_inputs_[1]);
}
params->inputs = add_inputs_;
SetOutputAttrs(params, &allocator_attrs_);
}
struct TimeSet {
int iter = 0;
int64_t start = 0;
int64_t copy_done = 0;
int64_t compute_done = 0;
int64_t final_copy = 0;
int64_t all_done = 0;
};
void DisplayTimes(std::vector<TimeSet>* times) {
LOG(INFO) << "Summarize set of " << times->size() << " iters";
for (auto& ts : *times) {
ts.final_copy = ts.all_done - ts.compute_done;
ts.compute_done = ts.compute_done - ts.copy_done;
ts.copy_done = ts.copy_done - ts.start;
ts.all_done = ts.all_done - ts.start;
}
struct TSSort {
bool operator()(const TimeSet& a, const TimeSet& b) {
return a.all_done < b.all_done;
}
};
std::sort(times->begin(), times->end(), TSSort());
int64_t last_time = 0;
for (int i = 0; i < times->size(); ++i) {
if (i == (times->size() - 1) ||
(times->at(i).all_done >= (1.05 * last_time))) {
LOG(INFO) << "rank " << i << " iter: " << times->at(i).iter
<< " copy: " << times->at(i).copy_done
<< " compute: " << times->at(i).compute_done
<< " copy back: " << times->at(i).final_copy
<< " sum: " << times->at(i).all_done;
last_time = times->at(i).all_done;
}
}
}
void DoAddChain(int adds_per_copy, int rounds, bool event_after_add,
std::function<void()> callback, std::vector<TimeSet>* times) {
Tensor alias0(gpu_inputs_[0]);
Tensor alias1(gpu_inputs_[1]);
for (int r = 0; r < rounds; ++r) {
if (times) {
times->at(r).iter = r;
times->at(r).start = Env::Default()->NowMicros();
}
TF_ASSERT_OK(
gpu_helper_->h2d_stream()->WaitFor(gpu_helper_->compute_stream()));
const int64_t src_bytes = host_inputs_[0].TotalBytes();
se::DeviceMemoryBase gpu_dst_ptr0(DMAHelper::base(&gpu_inputs_[0]),
src_bytes);
TF_ASSERT_OK(gpu_helper_->h2d_stream()->Memcpy(
&gpu_dst_ptr0, DMAHelper::base(&host_inputs_[0]), src_bytes));
se::DeviceMemoryBase gpu_dst_ptr1(DMAHelper::base(&gpu_inputs_[1]),
src_bytes);
TF_ASSERT_OK(gpu_helper_->h2d_stream()->Memcpy(
&gpu_dst_ptr1, DMAHelper::base(&host_inputs_[1]), src_bytes));
TF_ASSERT_OK(
gpu_helper_->compute_stream()->WaitFor(gpu_helper_->h2d_stream()));
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->compute_stream(), [times, r]() {
times->at(r).copy_done = Env::Default()->NowMicros();
});
}
std::unique_ptr<OpKernelContext> ctx;
for (int apc = 0; apc < adds_per_copy; ++apc) {
ctx.reset(new OpKernelContext(add_params_[apc], 1));
gpu_helper_->gpu()->Compute(add_kernels_[apc].get(), ctx.get());
TF_ASSERT_OK(ctx->status());
if (event_after_add) {
gpu_helper_->event_mgr()->ThenExecute(gpu_helper_->compute_stream(),
callback);
}
}
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->compute_stream(), [times, r]() {
times->at(r).compute_done = Env::Default()->NowMicros();
});
}
TF_ASSERT_OK(
gpu_helper_->d2h_stream()->WaitFor(gpu_helper_->compute_stream()));
const int64_t return_bytes = ctx->mutable_output(0)->TotalBytes();
se::DeviceMemoryBase gpu_src_ptr(DMAHelper::base(ctx->mutable_output(0)),
return_bytes);
TF_ASSERT_OK(gpu_helper_->d2h_stream()->Memcpy(
DMAHelper::base(&host_outputs_[0]), gpu_src_ptr, return_bytes));
gpu_helper_->event_mgr()->ThenExecute(gpu_helper_->d2h_stream(),
callback);
if (times) {
gpu_helper_->event_mgr()->ThenExecute(
gpu_helper_->d2h_stream(), [times, r]() {
times->at(r).all_done = Env::Default()->NowMicros();
});
}
}
}
};
static void BM_no_ops(::testing::benchmark::State& state) {
const int threads = state.range(0);
const int iters = state.max_iterations;
auto stream_exec = se::GPUMachineManager()->ExecutorForDevice(0).value();
TF_ASSERT_OK_AND_ASSIGN(auto stream, stream_exec->CreateStream());
TEST_EventMgr em(stream_exec, GPUOptions());
auto benchmark_exec = [&]() {
std::atomic<int> counter;
counter.store(0, std::memory_order_seq_cst);
se::Stream* stream_ptr = stream.get();
auto runner = [&em, &counter, stream_ptr, iters]() {
auto callback = [&counter]() { counter.fetch_add(1); };
for (int i = 0; i < iters; ++i) {
em.ThenExecute(stream_ptr, callback);
}
};
for (int t = 0; t < threads; ++t) {
Env::Default()->SchedClosure(runner);
}
int expected = iters * threads;
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
};
#ifdef PLATFORM_GOOGLE
while (state.KeepRunningBatch(state.max_iterations)) {
benchmark_exec();
}
#else
state.ResumeTiming();
benchmark_exec();
state.PauseTiming();
#endif
}
BENCHMARK(BM_no_ops)->UseRealTime()->Arg(4)->Arg(8)->Arg(32);
GPUDeviceTestHelper* gpu_helper = nullptr;
EMBenchmarkHelper* bm_helper = nullptr;
mutex helper_mu;
#ifdef PLATFORM_GOOGLE
static void BM_chain_ops(::testing::benchmark::State& state, int tensor_size,
int adds_per_round, bool event_after_add,
int pending_cap) {
#else
static void BM_chain_ops(::testing::benchmark::State& state, int tensor_size,
int adds_per_round, bool event_after_add,
int pending_cap, int threads) {
#endif
const int iters = state.max_iterations;
{
mutex_lock l(helper_mu);
if (gpu_helper && gpu_helper->pending_cap() != pending_cap) {
delete bm_helper;
bm_helper = nullptr;
delete gpu_helper;
gpu_helper = nullptr;
}
if (!gpu_helper) {
gpu_helper = new GPUDeviceTestHelper(1 << 24, pending_cap);
bm_helper = new EMBenchmarkHelper(gpu_helper);
}
if (bm_helper->num_ops() != adds_per_round ||
bm_helper->tensor_size() != tensor_size) {
bm_helper->ReInit(adds_per_round, tensor_size);
}
}
std::vector<EMBenchmarkHelper::TimeSet> times;
std::vector<EMBenchmarkHelper::TimeSet>* time_ptr = nullptr;
if (VLOG_IS_ON(1)) {
times.resize(iters);
time_ptr = ×
}
std::atomic<int> counter;
counter.store(0, std::memory_order_seq_cst);
auto callback = [&counter]() { counter.fetch_add(1); };
int expected = 1 + (event_after_add ? adds_per_round : 0);
bm_helper->DoAddChain(adds_per_round, 1, event_after_add, callback, nullptr);
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
counter = 0;
#ifdef PLATFORM_GOOGLE
while (state.KeepRunningBatch(state.max_iterations)) {
expected = iters * (1 + (event_after_add ? adds_per_round : 0));
bm_helper->DoAddChain(adds_per_round, iters, event_after_add, callback,
time_ptr);
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
}
#else
state.ResumeTiming();
expected = threads * iters * (1 + (event_after_add ? adds_per_round : 0));
for (int i = 0; i < threads; ++i) {
Env::Default()->SchedClosure(
[callback, iters, adds_per_round, event_after_add, time_ptr]() {
bm_helper->DoAddChain(adds_per_round, iters, event_after_add,
callback, time_ptr);
});
}
while (counter < expected) {
Env::Default()->SleepForMicroseconds(1);
}
state.PauseTiming();
#endif
VLOG(1) << "counter = " << counter << " post_execute Output: "
<< bm_helper->host_outputs(0).SummarizeValue(64);
if (time_ptr) bm_helper->DisplayTimes(time_ptr);
}
#ifdef PLATFORM_GOOGLE
static void BM_chain_1024_1_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 1, false, 0);
}
static void BM_chain_1024_1_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 1, true, 0);
}
static void BM_chain_1024_10_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 10, false, 0);
}
static void BM_chain_1024_10_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 10, true, 0);
}
static void BM_chain_1024_100_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 100, false, 0);
}
static void BM_chain_1024_100_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1024, 100, true, 0);
}
static void BM_chain_1M_1_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 1, false, 0);
}
static void BM_chain_1M_1_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 1, true, 0);
}
static void BM_chain_1M_10_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 10, false, 0);
}
static void BM_chain_1M_10_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 10, true, 0);
}
static void BM_chain_1M_100_false(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 100, false, 0);
}
static void BM_chain_1M_100_true(::testing::benchmark::State& state) {
BM_chain_ops(state, 1 << 20, 100, true, 0);
}
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(1);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(2);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Threads(8);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Threads(8);
#else
static void BM_chain_1024_1_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 1, false, 0, threads);
}
static void BM_chain_1024_1_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 1, true, 0, threads);
}
static void BM_chain_1024_10_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 10, false, 0, threads);
}
static void BM_chain_1024_10_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 10, true, 0, threads);
}
static void BM_chain_1024_100_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 100, false, 0, threads);
}
static void BM_chain_1024_100_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1024, 100, true, 0, threads);
}
static void BM_chain_1M_1_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 1, false, 0, threads);
}
static void BM_chain_1M_1_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 1, true, 0, threads);
}
static void BM_chain_1M_10_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 10, false, 0, threads);
}
static void BM_chain_1M_10_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 10, true, 0, threads);
}
static void BM_chain_1M_100_false(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 100, false, 0, threads);
}
static void BM_chain_1M_100_true(::testing::benchmark::State& state) {
const int threads = state.range(0);
BM_chain_ops(state, 1 << 20, 100, true, 0, threads);
}
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_1_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_1_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_10_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_10_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1024_100_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1024_100_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_1_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_1_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_10_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_10_true)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(1);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(2);
BENCHMARK(BM_chain_1M_100_false)->UseRealTime()->Arg(8);
BENCHMARK(BM_chain_1M_100_true)->UseRealTime()->Arg(8);
#endif
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_event_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_event_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
abb610d2-6795-47fc-9ff4-a077f44ff498 | cpp | tensorflow/tensorflow | kernel_and_device | tensorflow/core/common_runtime/eager/kernel_and_device.cc | tensorflow/core/common_runtime/eager/kernel_and_device_test.cc | #include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/denormal.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/setround.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#endif
namespace tensorflow {
Status EagerKernelArgs::GetLocalArg(const FunctionArgIndex& index,
Tensor* val) const {
if (index.sub_index >= 0) {
return errors::InvalidArgument("Got unexpected sub_index ", index.sub_index,
" for argument ", index.index);
}
Tensor* arg = tensor_args_.at(index.index).tensor;
if (arg) {
*val = *arg;
return absl::OkStatus();
} else {
return errors::NotFound("Argument ", index.index, " has no local tensor.");
}
}
std::vector<Tensor> EagerKernelArgs::GetLocalTensors() const {
std::vector<Tensor> local_inputs;
local_inputs.reserve(tensor_args_.size());
for (const TensorValue& tensor_value : tensor_args_) {
local_inputs.push_back(*tensor_value.tensor);
}
return local_inputs;
}
std::function<void(std::function<void()>)>* KernelAndDevice::get_runner()
const {
if (runner_) {
return runner_;
} else {
static auto* default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
}
KernelAndDeviceFunc::~KernelAndDeviceFunc() {
if (handle_ != kInvalidHandle) {
Status status = pflr_->ReleaseHandle(handle_);
if (!status.ok()) {
LOG(INFO) << "Ignoring error status when releasing multi-device function "
"handle "
<< status;
}
}
}
Status KernelAndDeviceOp::Init(
const bool log_device_placement, const NodeDef& ndef,
GraphCollector* graph_collecto,
const absl::optional<EagerFunctionParams>& eager_func_params) {
if (eager_func_params.has_value()) {
return absl::InternalError(
"KernelAndDeviceOp does not support EagerFunctionParams.");
}
OpKernel* k = nullptr;
if (flr_ == nullptr) {
return errors::Internal(
"A valid FunctionLibraryRuntime must be provided when running ops "
"based on OpKernel.");
}
std::shared_ptr<const NodeProperties> props;
TF_RETURN_IF_ERROR(NodeProperties::CreateFromNodeDef(
ndef, flr_->GetFunctionLibraryDefinition(), &props));
TF_RETURN_IF_ERROR(flr_->CreateKernel(props, &k));
kernel_.reset(k);
const auto* op_reg_data = OpRegistry::Global()->LookUp(ndef.op());
if (op_reg_data != nullptr) {
is_distributed_communication_op_ =
op_reg_data->op_def.is_distributed_communication();
}
input_alloc_attrs_.resize(kernel_->num_inputs());
input_devices_.resize(kernel_->num_inputs(), device_);
for (size_t i = 0; i < input_alloc_attrs_.size(); ++i) {
bool host = kernel_->input_memory_types()[i] == tensorflow::HOST_MEMORY;
input_alloc_attrs_[i].set_on_host(host);
if (host && input_devices_[i]->device_type() != DEVICE_CPU) {
input_devices_[i] = host_cpu_device_;
}
}
output_alloc_attrs_.resize(kernel_->num_outputs());
for (size_t i = 0; i < output_alloc_attrs_.size(); ++i) {
output_alloc_attrs_[i].set_on_host(kernel_->output_memory_types()[i] ==
tensorflow::HOST_MEMORY);
}
return absl::OkStatus();
}
Status KernelAndDeviceFunc::InstantiateFunc(
const bool log_device_placement, const NodeDef& ndef,
GraphCollector* graph_collector,
const absl::optional<EagerFunctionParams>& eager_func_params) {
const OpDef* op_def = nullptr;
const FunctionLibraryDefinition* func_lib_def;
FunctionLibraryRuntime::InstantiateOptions options;
if (eager_func_params.has_value() &&
eager_func_params.value().func_lib_def_override != nullptr) {
func_lib_def = eager_func_params.value().func_lib_def_override;
options.lib_def = func_lib_def;
} else {
if (flr_ == nullptr) {
func_lib_def = pflr_->GetFLR(host_cpu_device_->name())
->GetFunctionLibraryDefinition();
} else {
func_lib_def = flr_->GetFunctionLibraryDefinition();
}
}
const FunctionDef* function_def = func_lib_def->Find(ndef.op());
if (function_def != nullptr) {
op_def = &(function_def->signature());
} else {
TF_RETURN_IF_ERROR(OpDefForOp(ndef.op(), &op_def));
}
TF_RETURN_IF_ERROR(
InOutTypesForNode(ndef, *op_def, &input_dtypes_, &output_dtypes_));
options.target = device_ == nullptr ? "" : device_->name();
options.is_multi_device_function = true;
for (const Device* device : input_devices_) {
options.input_devices.push_back(device->name());
}
options.composite_devices = composite_devices_;
options.input_resource_dtypes_and_shapes = input_resource_dtypes_and_shapes_;
if (outputs_on_op_device_) {
if (function_def == nullptr) {
return errors::InvalidArgument("Failed to find function ", ndef.op());
}
for (int i = 0; i < function_def->signature().output_arg_size(); ++i) {
options.output_devices.push_back(options.target);
}
}
const auto& it = ndef.attr().find("executor_type");
if (it != ndef.attr().end()) {
options.executor_type = it->second.s();
}
const auto& is_component_fn_it = ndef.attr().find("is_component_function");
if (is_component_fn_it != ndef.attr().end()) {
options.is_component_function = is_component_fn_it->second.b();
}
#if !defined(IS_MOBILE_PLATFORM)
const auto& config_it = ndef.attr().find("config_proto");
if (config_it != ndef.attr().end()) {
if (!options.config_proto.ParseFromString(config_it->second.s())) {
return errors::InvalidArgument(
"Failed to parse config_proto attribute as tensorflow::ConfigProto "
"proto.");
}
grappler::GrapplerItem::OptimizationOptions optimization_options =
grappler::CreateOptOptionsForEager();
options.optimize_graph_fn = std::bind(
grappler::OptimizeGraph, std::placeholders::_1, std::placeholders::_2,
std::placeholders::_3, std::placeholders::_4, std::placeholders::_5,
options.config_proto, function_def->signature().name(),
optimization_options, std::placeholders::_6);
}
#endif
options.graph_collector = graph_collector;
options.allow_small_function_optimizations =
allow_small_function_optimizations_;
options.allow_control_flow_sync_execution =
allow_control_flow_sync_execution_;
options.shape_inference_on_tfe_dialect_import =
shape_inference_on_tfe_dialect_import_;
options.config_proto.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
options.config_proto.set_log_device_placement(log_device_placement);
options.int_args_and_retvals_on_device = int_args_and_retvals_on_device_;
if (xla_compile_device_type_.has_value()) {
options.xla_compile_device_type = xla_compile_device_type_.value();
}
options.allow_soft_placement = allow_soft_placement_;
TF_RETURN_IF_ERROR(
pflr_->Instantiate(ndef.op(), AttrSlice(ndef), options, &handle_));
return pflr_->IsCrossProcess(handle_, &is_cross_process_);
}
Status KernelAndDeviceFunc::Init(
const bool log_device_placement, const NodeDef& ndef,
GraphCollector* graph_collector,
const absl::optional<EagerFunctionParams>& eager_func_params) {
TF_RETURN_IF_ERROR(InstantiateFunc(log_device_placement, ndef,
graph_collector, eager_func_params));
return pflr_->GetOutputDevices(handle_, &output_devices_);
}
namespace {
struct OpExecutionState : public core::RefCounted {
CancellationManager cancellation_manager;
};
}
Status KernelAndDeviceOp::Run(
ScopedStepContainer* step_container, const EagerKernelArgs& inputs,
std::vector<EagerKernelRet>* outputs,
CancellationManager* cancellation_manager,
const absl::optional<EagerFunctionParams>& eager_func_params,
const absl::optional<ManagedStackTrace>& stack_trace,
tsl::CoordinationServiceAgent* coordination_service_agent) {
OpKernelContext::Params params;
params.device = device_;
params.frame_iter = FrameAndIter(0, 0);
params.inputs = *inputs.GetTensorValues();
params.op_kernel = kernel_.get();
params.resource_manager = device_->resource_manager();
params.input_alloc_attrs = input_alloc_attrs_;
params.output_attr_array = output_alloc_attrs_.data();
params.function_library = flr_;
params.slice_reader_cache = &slice_reader_cache_;
params.rendezvous = rendezvous_;
params.stack_trace = stack_trace;
OpExecutionState* op_execution_state = nullptr;
CancellationManager default_cancellation_manager;
if (cancellation_manager) {
params.cancellation_manager = cancellation_manager;
} else if (kernel_->is_deferred()) {
op_execution_state = new OpExecutionState;
params.cancellation_manager = &op_execution_state->cancellation_manager;
params.inc_num_deferred_ops_function = [op_execution_state]() {
op_execution_state->Ref();
};
params.dec_num_deferred_ops_function = [op_execution_state]() {
op_execution_state->Unref();
};
} else {
params.cancellation_manager = &default_cancellation_manager;
}
params.log_memory = log_memory_;
params.runner = get_runner();
params.step_container = step_container;
params.collective_executor =
collective_executor_ ? collective_executor_->get() : nullptr;
params.coordination_service_agent = coordination_service_agent;
OpKernelContext context(¶ms);
{
port::ScopedFlushDenormal flush;
port::ScopedSetRound round(FE_TONEAREST);
profiler::AnnotatedTraceMe activity(
[&] { return kernel_->TraceString(context, false); },
tsl::profiler::TraceMeLevel::kInfo);
device_->Compute(kernel_.get(), &context);
}
if (op_execution_state != nullptr) {
op_execution_state->Unref();
}
Status s = context.status();
if (TF_PREDICT_FALSE(!s.ok())) {
if (absl::IsUnavailable(s) && !is_distributed_communication_op_) {
s = errors::ReplaceErrorFromNonCommunicationOps(s, kernel_->name());
}
return s;
}
if (outputs != nullptr) {
outputs->clear();
for (int i = 0; i < context.num_outputs(); ++i) {
const auto* output_tensor = context.mutable_output(i);
if (output_tensor != nullptr) {
outputs->push_back(Tensor(*output_tensor));
} else {
outputs->push_back(Tensor());
}
}
}
return absl::OkStatus();
}
std::shared_ptr<FunctionLibraryRuntime::Options>
KernelAndDeviceFunc::PrepareForRun(
ScopedStepContainer* step_container, std::vector<EagerKernelRet>* outputs,
CancellationManager* cancellation_manager,
const absl::optional<EagerFunctionParams>& eager_func_params,
const absl::optional<ManagedStackTrace>& stack_trace,
tsl::CoordinationServiceAgent* coordination_service_agent,
tsl::core::RefCountPtr<Rendezvous>* rendezvous) {
std::shared_ptr<FunctionLibraryRuntime::Options> opts = nullptr;
if (eager_func_params.has_value()) {
const EagerFunctionParams& params = eager_func_params.value();
if (params.step_id.has_value()) {
opts = std::make_shared<FunctionLibraryRuntime::Options>(
params.step_id.value());
} else {
opts = std::make_shared<FunctionLibraryRuntime::Options>();
}
if (params.op_id != kInvalidOpId) {
opts->op_id = params.op_id;
}
} else {
opts = std::make_shared<FunctionLibraryRuntime::Options>();
if (get_op_id_ && is_cross_process_) {
opts->op_id = get_op_id_();
}
}
TF_CHECK_OK(rendezvous_factory_(opts->step_id, nullptr, rendezvous));
opts->rendezvous = rendezvous->get();
opts->create_rendezvous = false;
std::shared_ptr<CancellationManager> local_cm;
if (cancellation_manager) {
opts->cancellation_manager = cancellation_manager;
} else {
opts->cancellation_manager = new CancellationManager;
}
opts->allow_dead_tensors = true;
opts->step_container = step_container;
opts->collective_executor =
collective_executor_ ? collective_executor_->get() : nullptr;
opts->stack_trace = stack_trace;
opts->stats_collector = nullptr;
opts->runner = get_runner();
opts->coordination_service_agent = coordination_service_agent;
outputs->clear();
return opts;
}
Status KernelAndDeviceFunc::Run(
ScopedStepContainer* step_container, const EagerKernelArgs& inputs,
std::vector<EagerKernelRet>* outputs,
CancellationManager* cancellation_manager,
const absl::optional<EagerFunctionParams>& eager_func_params,
const absl::optional<ManagedStackTrace>& stack_trace,
tsl::CoordinationServiceAgent* coordination_service_agent) {
tsl::profiler::TraceMe activity("KernelAndDeviceFunc::Run",
tsl::profiler::TraceMeLevel::kInfo);
if (inputs.HasRemoteOrPackedInputs() || eager_func_params.has_value()) {
Notification n;
Status status;
RunAsync(step_container, inputs, outputs, cancellation_manager,
eager_func_params, coordination_service_agent,
[&status, &n](Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
tsl::core::RefCountPtr<Rendezvous> created_rendezvous;
std::shared_ptr<FunctionLibraryRuntime::Options> opts = PrepareForRun(
step_container, outputs, cancellation_manager, eager_func_params,
stack_trace, coordination_service_agent, &created_rendezvous);
std::vector<Tensor> rets;
Status s;
{
port::ScopedFlushDenormal flush;
port::ScopedSetRound round(FE_TONEAREST);
s.Update(pflr_->RunSync(*opts, handle_, inputs.GetLocalTensors(), &rets));
}
if (cancellation_manager == nullptr) {
delete opts->cancellation_manager;
}
outputs->reserve(rets.size());
for (auto& v : rets) {
outputs->push_back(std::move(v));
}
return s;
}
void KernelAndDeviceFunc::RunAsync(
ScopedStepContainer* step_container, const EagerKernelArgs& inputs,
std::vector<EagerKernelRet>* outputs,
CancellationManager* cancellation_manager,
const absl::optional<EagerFunctionParams>& eager_func_params,
tsl::CoordinationServiceAgent* coordination_service_agent,
std::function<void(const Status&)> done) {
tsl::profiler::TraceMe activity(
[] {
return tsl::profiler::TraceMeEncode("KernelAndDeviceFunc::RunAsync",
{{"_r", 1}});
},
tsl::profiler::TraceMeLevel::kInfo);
tsl::core::RefCountPtr<Rendezvous> created_rendezvous;
std::shared_ptr<FunctionLibraryRuntime::Options> opts = PrepareForRun(
step_container, outputs, cancellation_manager, eager_func_params,
std::nullopt, coordination_service_agent, &created_rendezvous);
pflr_->Run(
*opts, handle_, inputs, outputs,
[opts, cancellation_manager, done = std::move(done),
created_rendezvous = created_rendezvous.release()](const Status& s) {
if (cancellation_manager == nullptr) {
delete opts->cancellation_manager;
}
created_rendezvous->Unref();
done(s);
});
}
tensorflow::Device* KernelAndDeviceOp::OutputDevice(int idx) const {
if (kernel_->output_memory_types()[idx] == HOST_MEMORY) {
return nullptr;
}
return device_;
}
tensorflow::Device* KernelAndDeviceFunc::OutputDevice(int idx) const {
if (output_dtypes_[idx] == DT_RESOURCE) {
return nullptr;
}
return output_devices_[idx];
}
tensorflow::Device* KernelAndDeviceOp::OutputResourceDevice(int idx) const {
if (kernel_->output_type(idx) == DT_RESOURCE) {
return device_;
}
return nullptr;
}
tensorflow::Device* KernelAndDeviceFunc::OutputResourceDevice(int idx) const {
if (output_dtypes_[idx] == DT_RESOURCE) {
return output_devices_[idx];
}
return nullptr;
}
Device* KernelAndDeviceOp::InputDevice(int i) const {
return input_devices_[i];
}
Device* KernelAndDeviceFunc::InputDevice(int i) const {
if ((input_dtypes_[i] == DT_RESOURCE) &&
(composite_devices_.find(input_devices_[i]->name()) ==
composite_devices_.end())) {
return host_cpu_device_;
} else {
return input_devices_[i];
}
}
} | #include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/optional.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class TestEnv {
public:
TestEnv() : flib_def_(OpRegistry::Global(), FunctionDefLibrary()) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
cpu_device_ = devices.back().get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
OptimizerOptions opts;
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, &flib_def_, opts,
nullptr);
flr_ = pflr_->GetFLR("/job:a/replica:0/task:0/device:CPU:0");
CHECK(flr_ != nullptr);
}
FunctionLibraryRuntime* function_library_runtime() const { return flr_; }
ProcessFunctionLibraryRuntime* pflr() const { return pflr_.get(); }
Device* cpu_device() { return cpu_device_; }
private:
FunctionLibraryDefinition flib_def_;
std::unique_ptr<DeviceMgr> device_mgr_;
FunctionLibraryRuntime* flr_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
Device* cpu_device_;
};
void BM_CreateGraph(::testing::benchmark::State& state) {
for (auto s : state) {
Scope root = Scope::NewRootScope();
auto C = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}});
auto M = ops::MatMul(root, C, C);
TF_CHECK_OK(root.status());
}
}
BENCHMARK(BM_CreateGraph);
void BM_RunGraph(::testing::benchmark::State& state) {
Scope root = Scope::NewRootScope();
auto C = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}});
auto M = ops::MatMul(root, C, C);
SessionOptions opts;
opts.config.set_inter_op_parallelism_threads(1);
opts.config.set_intra_op_parallelism_threads(1);
ClientSession sess(root, opts);
std::vector<Tensor> outputs;
for (auto s : state) {
outputs.clear();
TF_CHECK_OK(sess.Run({M}, &outputs));
}
}
BENCHMARK(BM_RunGraph);
void BM_CreateAndDestroySession(::testing::benchmark::State& state) {
Scope root = Scope::NewRootScope();
auto C = ops::Const(root, {{1.0, 2.0}, {3.0, 4.0}});
auto M = ops::MatMul(root, C, C);
for (auto s : state) {
ClientSession sess(root);
}
}
BENCHMARK(BM_CreateAndDestroySession);
void BM_KernelAndDeviceInit(::testing::benchmark::State& state) {
NodeDef ndef(AttrBuilder("MatMul")
.Set("T", DT_FLOAT)
.Set("transpose_a", false)
.Set("transpose_b", false)
.NumInputs(2)
.BuildNodeDef());
TestEnv env;
KernelAndDeviceOp k(nullptr, false, env.function_library_runtime(), nullptr,
nullptr, env.cpu_device());
for (auto s : state) {
TF_CHECK_OK(k.Init({}, ndef, nullptr, std::nullopt));
}
}
BENCHMARK(BM_KernelAndDeviceInit);
void BM_KernelAndDeviceRun(::testing::benchmark::State& state) {
Tensor t(Input({{1.0f, 2.0f}, {3.0f, 4.0f}}).tensor());
absl::InlinedVector<TensorValue, 4UL> inputs;
inputs.push_back(TensorValue(&t));
inputs.push_back(TensorValue(&t));
std::vector<EagerKernelRet> outputs;
NodeDef ndef(AttrBuilder("MatMul")
.Set("T", DT_FLOAT)
.Set("transpose_a", false)
.Set("transpose_b", false)
.NumInputs(inputs.size())
.BuildNodeDef());
TestEnv env;
KernelAndDeviceOp k(nullptr, false, env.function_library_runtime(), nullptr,
nullptr, env.cpu_device());
TF_CHECK_OK(k.Init({}, ndef, nullptr, std::nullopt));
const EagerKernelArgs args(std::move(inputs));
for (auto s : state) {
TF_CHECK_OK(k.Run(nullptr, args, &outputs, nullptr, std::nullopt,
std::nullopt, nullptr));
}
}
BENCHMARK(BM_KernelAndDeviceRun);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/kernel_and_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd82b62c-d379-4d92-a458-68403e5ddeac | cpp | tensorflow/tensorflow | eager_executor | tensorflow/core/common_runtime/eager/eager_executor.cc | tensorflow/core/common_runtime/eager/eager_executor_test.cc | #include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include <forward_list>
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
bool IsAsyncWaitForRemoteFunctionEnabled() {
bool enabled = true;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_ASYNC_WAIT_FOR_REMOTE_FUNCTION",
true, &enabled));
return enabled;
}
}
EagerExecutor::EagerExecutor(bool async, bool enable_streaming_enqueue,
int in_flight_nodes_limit)
: next_node_id_(0),
ok_(true),
thread_(async ? tensorflow::Env::Default()->StartThread(
tensorflow::ThreadOptions(), "eager_async_executor",
std::bind(&EagerExecutor::Run, this))
: nullptr),
last_eager_client_(nullptr),
enable_async_wait_for_remote_function_(
IsAsyncWaitForRemoteFunctionEnabled()),
enable_streaming_enqueue_(enable_streaming_enqueue),
in_flight_nodes_limit_(in_flight_nodes_limit) {
if (async && in_flight_nodes_limit_ > 0) {
VLOG(4) << "EagerExecutor InFlightNodes limit is set to "
<< in_flight_nodes_limit_;
}
}
EagerExecutor::~EagerExecutor() {
tensorflow::mutex_lock l(node_queue_mutex_);
state_ = ExecutorState::kShutDown;
nodes_pending_.notify_all();
for (const auto& cleanups_for_key : cleanups_) {
for (const std::function<void()>& cleanup : cleanups_for_key.second) {
cleanup();
}
}
}
Status EagerExecutor::ShutDown() {
{
bool has_thread;
Status status;
{
tensorflow::mutex_lock l(node_queue_mutex_);
if (state_ != ExecutorState::kShutDown) {
state_ = ExecutorState::kShuttingDown;
}
WaitForAllPendingNodesLocked(&l).IgnoreError();
state_ = ExecutorState::kShutDown;
has_thread = thread_ != nullptr;
status = status_;
if (has_thread) {
nodes_pending_.notify_all();
}
}
if (!has_thread) {
return status;
}
}
thread_exited_notification_.WaitForNotification();
return status();
}
const char* EagerExecutor::StateStringLocked() {
switch (state_) {
case ExecutorState::kActive:
return "Active";
case ExecutorState::kShuttingDown:
return "ShuttingDown";
case ExecutorState::kShutDown:
return "ShutDown";
}
}
Status EagerExecutor::SyncExecute(EagerNode* node) {
if (Async()) {
return errors::Internal("SyncExecute does not support async execution.");
}
if (node->AsAsync() != nullptr) {
return errors::Internal("Executor does not support executing async nodes");
}
uint64 id = next_node_id_++;
Status s = node->Prepare();
if (!s.ok()) {
return s;
}
s = node->Run();
tensorflow::mutex_lock l(node_queue_mutex_);
NotifyWaiters(id);
return s;
}
Status EagerExecutor::AddOrExecute(std::unique_ptr<EagerNode> node) {
Status status;
core::RefCountPtr<NodeItem> item(new NodeItem);
item->id = next_node_id_++;
item->node = std::move(node);
item->state = NodeState::kPENDING;
status = item->node->Prepare();
if (!status.ok()) {
item->node->Abort(status);
return status;
}
if (!Async()) {
return RunItem(std::move(item), false);
} else {
tensorflow::mutex_lock l(node_queue_mutex_);
DVLOG(3) << "Add node [id " << item->id << "]" << item->node->DebugString()
<< " with status: " << status_;
if (state_ != ExecutorState::kActive) {
status = errors::FailedPrecondition(
"EagerExecutor accepts new EagerNodes to run only in Active state. "
"Current state is '",
StateStringLocked(), "'");
} else {
status = status_;
if (status.ok()) {
node_queue_.push(std::move(item));
if (node_queue_.size() == 1) {
nodes_pending_.notify_all();
}
if (in_flight_nodes_limit_ == 0) {
return absl::OkStatus();
}
while (true) {
int64_t in_flight_nodes_count =
node_queue_.size() + unfinished_nodes_.size();
if (in_flight_nodes_count < in_flight_nodes_limit_) {
break;
}
VLOG(4) << "Hitting in-flight node limit node_queue_.size() = "
<< node_queue_.size()
<< " unfinished_nodes_.size() = " << unfinished_nodes_.size()
<< ".";
nodes_done_.wait(l);
}
return absl::OkStatus();
}
}
}
item->node->Abort(status);
return status;
}
tensorflow::Status EagerExecutor::WaitForAllPendingNodes() {
tensorflow::mutex_lock l(node_queue_mutex_);
return WaitForAllPendingNodesLocked(&l);
}
tensorflow::Status EagerExecutor::WaitForAllPendingNodesLocked(
mutex_lock* lock) {
tensorflow::condition_variable cond;
if (!status_.ok()) return status_;
if (node_queue_.empty() && unfinished_nodes_.empty()) return absl::OkStatus();
DCHECK(Async() || node_queue_.empty());
auto last_id = next_node_id_ - 1;
DVLOG(3) << "Wait for Node: [id " << last_id << "] ";
node_done_notifications_.insert(std::make_pair(last_id, &cond));
cond.wait(*lock);
return status_;
}
void EagerExecutor::ClearError() {
if (ok()) return;
tensorflow::mutex_lock l(node_queue_mutex_);
DCHECK(node_done_notifications_.empty());
DCHECK(node_queue_.empty());
status_ = absl::OkStatus();
ok_ = true;
last_eager_client_ = nullptr;
nodes_pending_.notify_all();
}
void EagerExecutor::NodeDone(const core::RefCountPtr<NodeItem>& item,
const Status& status, bool from_queue) {
DVLOG(3) << "Node Done: [id " << item->id << "] " << item->node->DebugString()
<< " with status: " << status;
DCHECK(item->state != NodeState::kDONE);
item->state = NodeState::kDONE;
bool async = item->node->AsAsync() != nullptr;
if (status.ok() && !from_queue && !async) {
return;
}
std::forward_list<core::RefCountPtr<NodeItem>> items_to_destroy;
{
mutex_lock l(node_queue_mutex_);
if (!status_.ok()) return;
bool need_notification = from_queue;
if (from_queue) {
DCHECK(!node_queue_.empty() && item.get() == node_queue_.front().get());
node_queue_.pop();
} else if (async) {
need_notification = item->id == unfinished_nodes_.begin()->first;
auto result = unfinished_nodes_.erase(item->id);
if (result == 0) return;
}
if (!status.ok() && item->node->Fatal()) {
need_notification = true;
status_ = status;
ok_ = false;
if (Async()) {
errors::AppendToMessage(&status_,
"Encountered when executing an operation using "
"EagerExecutor. This error cancels all future "
"operations and poisons their output tensors.");
}
while (!node_queue_.empty()) {
items_to_destroy.push_front(std::move(node_queue_.front()));
node_queue_.pop();
}
for (auto& it : unfinished_nodes_) {
items_to_destroy.push_front(std::move(it.second));
}
unfinished_nodes_.clear();
}
if (need_notification) {
NotifyWaiters(item->id);
}
nodes_done_.notify_all();
}
for (auto& item : items_to_destroy) {
item->node->Abort(status);
}
}
void EagerExecutor::NotifyWaiters(uint64 id) {
if (!node_done_notifications_.empty()) {
uint64 upperbound_id = 0;
if (!unfinished_nodes_.empty()) {
upperbound_id = unfinished_nodes_.begin()->first - 1;
} else if (!node_queue_.empty()) {
upperbound_id = node_queue_.front()->id - 1;
} else {
upperbound_id = next_node_id_ - 1;
}
if (upperbound_id < id) {
return;
}
DVLOG(3) << "Notify node done: [id " << id << " to " << upperbound_id
<< "] ";
const auto range =
status_.ok() ? std::make_pair(
node_done_notifications_.lower_bound(id),
node_done_notifications_.upper_bound(upperbound_id))
: std::make_pair(node_done_notifications_.begin(),
node_done_notifications_.end());
for (auto it = range.first; it != range.second; ++it) {
it->second->notify_all();
}
node_done_notifications_.erase(range.first, range.second);
}
}
void EagerExecutor::Run() {
auto thread_exited_notifier =
gtl::MakeCleanup([this] { thread_exited_notification_.Notify(); });
while (true) {
core::RefCountPtr<NodeItem> curr_item;
{
tensorflow::mutex_lock l(node_queue_mutex_);
while (node_queue_.empty() || !status_.ok()) {
if (state_ == ExecutorState::kShutDown) return;
nodes_pending_.wait(l);
}
curr_item.reset(node_queue_.front().get());
curr_item->Ref();
}
Status status = RunItem(std::move(curr_item), true);
if (!status.ok()) {
VLOG(1) << "Failed to run item: " << status;
}
}
}
Status EagerExecutor::RunItem(core::RefCountPtr<NodeItem> item,
bool from_queue) {
DVLOG(3) << "Running Node: [id " << item->id << "] "
<< item->node->DebugString();
AsyncRemoteExecuteNode* async_remote_node =
item->node->AsAsyncRemoteExecuteNode();
if (enable_async_wait_for_remote_function_) {
if (async_remote_node != nullptr) {
if (last_eager_client_ != nullptr &&
async_remote_node->eager_client() != nullptr &&
last_eager_client_ != async_remote_node->eager_client()) {
DVLOG(3) << "Executing Sync Executor for node" << item->id;
tensorflow::Status status = async_remote_node->SyncExecutors();
if (!status.ok()) {
NodeDone(item, status, from_queue);
return status;
}
last_eager_client_ = nullptr;
}
if (async_remote_node->eager_client() != nullptr &&
async_remote_node->needs_remote_inputs() &&
async_remote_node->allow_multiple_pending_requests()) {
last_eager_client_ = async_remote_node->eager_client();
}
}
}
AsyncEagerNode* async_node = item->node->AsAsync();
if (async_node == nullptr) {
tensorflow::Status status = item->node->Run();
NodeDone(item, status, from_queue);
return status;
}
item->state = NodeState::kSCHEDULED;
auto async_ref = item.get();
async_ref->Ref();
TF_RETURN_IF_ERROR(MoveToUnfinished(std::move(item), from_queue));
async_node->RunAsync([this, async_ref](const Status& status) {
core::RefCountPtr<NodeItem> async_item(async_ref);
NodeDone(async_item, status, false);
});
return status();
}
Status EagerExecutor::MoveToUnfinished(core::RefCountPtr<NodeItem> item,
bool from_queue) {
tensorflow::mutex_lock l(node_queue_mutex_);
if (!status_.ok()) {
return status_;
}
if (from_queue) {
DCHECK(!node_queue_.empty() && item.get() == node_queue_.front().get());
node_queue_.pop();
}
DVLOG(3) << "Add Node: [id " << item->id << "] to unfinished map.";
unfinished_nodes_.emplace_hint(unfinished_nodes_.end(), item->id,
std::move(item));
return absl::OkStatus();
}
void EagerExecutor::AddCleanup(intptr_t key, std::function<void()> callback) {
cleanups_[key].push_back(callback);
}
void EagerExecutor::RemoveCleanups(intptr_t key) { cleanups_.erase(key); }
} | #include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include <memory>
#include <utility>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
class TestState {
public:
enum State { kSuccess, kNotRun, kFailure };
TestState() : state_(kNotRun) {}
TestState(const TestState&) = delete;
TestState& operator=(const TestState&) = delete;
State read_state() { return state_; }
void update_success_state() { state_ = kSuccess; }
void update_run_error_state() { state_ = kFailure; }
private:
State state_;
};
class TestEagerNode : public EagerNode {
public:
explicit TestEagerNode(TestState* state,
Status prepare_return_status = absl::OkStatus(),
Status run_return_status = absl::OkStatus())
: state_(state),
prepare_return_status_(prepare_return_status),
run_return_status_(run_return_status) {}
TestEagerNode(const TestEagerNode&) = delete;
TestEagerNode& operator=(const TestEagerNode&) = delete;
Status Prepare() override { return prepare_return_status_; }
Status Run() override {
if (run_return_status_.ok()) {
state_->update_success_state();
} else {
state_->update_run_error_state();
}
return run_return_status_;
};
void Abort(Status status) override {}
string DebugString() const override { return "testEagerNode"; }
private:
TestState* state_;
Status prepare_return_status_;
Status run_return_status_;
};
class TestAsyncEagerNode : public AsyncEagerNode {
public:
explicit TestAsyncEagerNode(TestState* state,
Status prepare_return_status = absl::OkStatus(),
Status run_return_status = absl::OkStatus())
: state_(state),
prepare_return_status_(prepare_return_status),
run_return_status_(run_return_status) {}
TestAsyncEagerNode(const TestAsyncEagerNode&) = delete;
TestAsyncEagerNode& operator=(const TestAsyncEagerNode&) = delete;
Status Prepare() override { return prepare_return_status_; }
void RunAsync(StatusCallback done) override {
if (run_return_status_.ok()) {
state_->update_success_state();
} else {
state_->update_run_error_state();
}
done(run_return_status_);
};
void Abort(Status status) override {}
string DebugString() const override { return "testAsyncEagerNode"; }
private:
TestState* state_;
Status prepare_return_status_;
Status run_return_status_;
};
TEST(EagerExecutorTest, TestSyncExecutorWithEagerNode) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(sync_executor->AddOrExecute(std::move(node)));
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestSyncExecuteMethodFailureCases) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto sync_node = std::make_unique<TestEagerNode>(state.get());
EXPECT_THAT(async_executor->SyncExecute(sync_node.get()),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
ASSERT_EQ(state->read_state(), TestState::kNotRun);
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
state = std::make_unique<TestState>();
auto async_node = std::make_unique<TestAsyncEagerNode>(state.get());
EXPECT_THAT(sync_executor->SyncExecute(async_node.get()),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestSyncExecuteMethodSuccessCase) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(sync_executor->SyncExecute(node.get()));
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestSyncExecutorFailPrepare) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(),
errors::InvalidArgument("test"));
auto status = sync_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestSyncExecutorFailRun) {
auto sync_executor = std::make_unique<EagerExecutor>(
false, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(), absl::OkStatus(),
errors::Internal("test"));
auto status = sync_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithAsyncEagerNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithInFlightRequestLimit) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true,
1);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto node1 = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node1)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorWithEagerNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get());
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
TF_ASSERT_OK(async_executor->WaitForAllPendingNodes());
ASSERT_EQ(state->read_state(), TestState::State::kSuccess);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailPrepare) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(),
errors::InvalidArgument("test"));
auto status = async_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailRun) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestEagerNode>(state.get(), absl::OkStatus(),
errors::Internal("test"));
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto status = async_executor->WaitForAllPendingNodes();
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailPrepareWithAsyncNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(
state.get(), errors::InvalidArgument("test"));
auto status = async_executor->AddOrExecute(std::move(node));
ASSERT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
ASSERT_EQ(state->read_state(), TestState::State::kNotRun);
}
TEST(EagerExecutorTest, TestAsyncExecutorFailRunWithAsyncNode) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(
state.get(), absl::OkStatus(), errors::Internal("test"));
TF_ASSERT_OK(async_executor->AddOrExecute(std::move(node)));
auto status = async_executor->WaitForAllPendingNodes();
ASSERT_EQ(status.code(), tensorflow::error::INTERNAL);
ASSERT_EQ(state->read_state(), TestState::State::kFailure);
}
TEST(EagerExecutorTest, TestAsyncExecutorAddNodesAfterShutdown) {
auto async_executor = std::make_unique<EagerExecutor>(
true, true);
auto state = std::make_unique<TestState>();
auto node = std::make_unique<TestAsyncEagerNode>(state.get());
TF_ASSERT_OK(async_executor->ShutDown());
EXPECT_THAT(
async_executor->AddOrExecute(std::move(node)),
tensorflow::testing::StatusIs(tensorflow::error::FAILED_PRECONDITION));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ecbd68f-03dc-483c-bf7d-7c2e09656484 | cpp | tensorflow/tensorflow | eager_op_rewrite_registry | tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.cc | tensorflow/core/common_runtime/eager/eager_op_rewrite_registry_test.cc | #include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include <memory>
#include <utility>
namespace tensorflow {
EagerOpRewriteRegistry* EagerOpRewriteRegistry::Global() {
static EagerOpRewriteRegistry* global_rewrite_registry =
new EagerOpRewriteRegistry;
return global_rewrite_registry;
}
void EagerOpRewriteRegistry::Register(Phase phase, int32_t ordinal,
std::unique_ptr<EagerOpRewrite> pass) {
auto it_rewrites = rewrites_[phase].cbegin();
for (; it_rewrites != rewrites_[phase].cend(); ++it_rewrites) {
if (it_rewrites->second == ordinal) {
TF_CHECK_OK(errors::AlreadyExists(
"Attempting to register Eager Rewriter ", pass->GetDebugInfo().name,
" for phase ", phase, " using ordinal ", ordinal,
" already occupied by Rewriter ",
it_rewrites->first->GetDebugInfo().name));
}
if (it_rewrites->second > ordinal) {
break;
}
}
rewrites_[phase].emplace(it_rewrites,
std::make_pair(std::move(pass), ordinal));
}
Status EagerOpRewriteRegistry::RunRewrite(
Phase phase, EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* out_op) {
EagerOperation* pre_op = orig_op;
for (auto it_rewrites = rewrites_[phase].cbegin();
it_rewrites != rewrites_[phase].cend(); ++it_rewrites) {
TF_RETURN_IF_ERROR(it_rewrites->first->Run(pre_op, out_op));
if (*out_op != nullptr) {
pre_op = out_op->get();
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include <memory>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestEagerOpRewrite : public EagerOpRewrite {
public:
TestEagerOpRewrite(string name, string file, string line)
: EagerOpRewrite(name, file, line),
executor_(false, true) {}
static int count_;
EagerExecutor executor_;
Status Run(EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op) override {
++count_;
tensorflow::EagerOperation* op =
new tensorflow::EagerOperation(&orig_op->EagerContext());
TF_RETURN_IF_ERROR(op->Reset("NoOp", nullptr, false, &executor_));
out_op->reset(op);
return absl::OkStatus();
}
};
int TestEagerOpRewrite::count_ = 0;
REGISTER_REWRITE(EagerOpRewriteRegistry::PRE_EXECUTION, 10000,
TestEagerOpRewrite);
REGISTER_REWRITE(EagerOpRewriteRegistry::PRE_EXECUTION, 10001,
TestEagerOpRewrite);
TEST(EagerOpRewriteRegistryTest, RegisterRewritePass) {
EXPECT_EQ(0, TestEagerOpRewrite::count_);
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* ctx = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
EagerOperation orig_op(ctx);
std::unique_ptr<tensorflow::EagerOperation> out_op;
EXPECT_EQ(absl::OkStatus(),
EagerOpRewriteRegistry::Global()->RunRewrite(
EagerOpRewriteRegistry::PRE_EXECUTION, &orig_op, &out_op));
EXPECT_EQ(2, TestEagerOpRewrite::count_);
EXPECT_EQ("NoOp", out_op->Name());
ctx->Unref();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_op_rewrite_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ace438f3-1171-4bf3-b5af-b4186736073d | cpp | tensorflow/tensorflow | mkl_eager_op_rewrite | tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite.cc | tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite_test.cc | #ifdef INTEL_MKL
#include <string>
#include <unordered_map>
#include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
class MklEagerOpRewrite : public EagerOpRewrite {
public:
MklEagerOpRewrite(string name, string file, string line);
struct MklEagerOp {
string op_name;
std::function<bool(EagerOperation*)> RewriteRule;
std::function<Status(EagerOperation*, std::unique_ptr<EagerOperation>*)>
CreateMklOp;
};
private:
std::unordered_map<std::string, MklEagerOp> mkl_eager_ops_;
Status Run(EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op);
static Status SetupNewOp(EagerOperation* orig_op, const string mkl_op_name,
std::unique_ptr<EagerOperation>* new_mkl_op);
static Status CreateGenericMklOp(EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* mkl_op);
static bool RewriteConv2D(EagerOperation* op);
static bool RewriteSparseMatrixMatMul(EagerOperation* op);
static bool RewriteFusedBatchNormV3(EagerOperation* op);
Status RewriteToMklOp(EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* mkl_op);
bool ShouldRewriteOp(EagerOperation* op);
static bool AlwaysRewrite(EagerOperation* op) { return true; }
bool IsKernelRegistered(string op_name, DataType dt);
void InsertMKLEagerOps(MklEagerOp op);
};
REGISTER_REWRITE(EagerOpRewriteRegistry::POST_PLACEMENT, 10000,
MklEagerOpRewrite);
MklEagerOpRewrite::MklEagerOpRewrite(string name, string file, string line)
: EagerOpRewrite(name, file, line) {
InsertMKLEagerOps({"AvgPool", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPoolGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPool3D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPool3DGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"BatchMatMul", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"BatchMatMulV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"Conv2D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv2DBackpropFilter", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps({"Conv2DBackpropInput", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps({"Conv3D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv3DBackpropFilterV2", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv3DBackpropInputV2", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps(
{"DepthwiseConv2dNative", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"DepthwiseConv2dNativeBackpropFilter", RewriteConv2D,
CreateGenericMklOp});
InsertMKLEagerOps({"DepthwiseConv2dNativeBackpropInput", RewriteConv2D,
CreateGenericMklOp});
InsertMKLEagerOps({"Einsum", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNorm", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNormGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormGradV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormGradV3", RewriteFusedBatchNormV3, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNormV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormV3", RewriteFusedBatchNormV3, CreateGenericMklOp});
InsertMKLEagerOps({"MatMul", AlwaysRewrite, CreateGenericMklOp});
#ifdef ENABLE_ONEDNN_V3
InsertMKLEagerOps(
{"SparseMatrixMatMul", RewriteSparseMatrixMatMul, CreateGenericMklOp});
#endif
};
void MklEagerOpRewrite::InsertMKLEagerOps(MklEagerOp op) {
mkl_eager_ops_.insert(std::make_pair(op.op_name, op));
}
Status MklEagerOpRewrite::Run(
EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op) {
if (ShouldRewriteOp(orig_op)) {
TF_CHECK_OK(RewriteToMklOp(orig_op, out_op));
}
return OkStatus();
}
Status MklEagerOpRewrite::SetupNewOp(
EagerOperation* orig_op, const string mkl_op_name,
std::unique_ptr<EagerOperation>* new_mkl_op) {
bool is_remote = false;
new_mkl_op->reset(new tensorflow::EagerOperation(&orig_op->EagerContext()));
TF_RETURN_IF_ERROR(new_mkl_op->get()->Reset(mkl_op_name.c_str(), nullptr,
is_remote, nullptr));
int num_inputs = orig_op->Inputs().size();
for (int i = 0; i < num_inputs; ++i) {
TF_RETURN_IF_ERROR((*new_mkl_op)->AddInput(orig_op->Inputs()[i]));
}
const NodeDef& orig_ndef = orig_op->MutableAttrs()->BuildNodeDef();
AttrSlice attr_list(orig_ndef);
for (const auto& attr : attr_list) {
(*new_mkl_op)->MutableAttrs()->Set(attr.first, attr.second);
}
if (!orig_op->EagerContext().RunEagerOpAsFunction()) {
(*new_mkl_op)
->MutableAttrs()
->Set("_kernel", mkl_op_registry::kMklNameChangeOpLabel);
}
string device_name = orig_op->DeviceName();
return (*new_mkl_op)->SetDeviceName(device_name.c_str());
}
Status MklEagerOpRewrite::CreateGenericMklOp(
EagerOperation* orig_op, std::unique_ptr<EagerOperation>* mkl_op) {
const string mkl_op_name =
mkl_op_registry::GetMklNativeOpName(orig_op->Name());
TF_CHECK_OK(SetupNewOp(orig_op, mkl_op_name, mkl_op));
return OkStatus();
}
bool MklEagerOpRewrite::ShouldRewriteOp(EagerOperation* op) {
if (!IsMKLEnabled()) {
return false;
}
DataType data_type;
if (op->Attrs().Get("T", &data_type) != OkStatus()) {
return false;
}
if (op->GetDeviceParsedName().type != "CPU") {
return false;
}
bool kernel_found = IsKernelRegistered(op->Name(), data_type);
if (!kernel_found) {
return false;
}
auto it = mkl_eager_ops_.find(op->Name());
if (it != mkl_eager_ops_.end()) {
if (it->second.RewriteRule(op)) {
return true;
}
}
return false;
}
bool MklEagerOpRewrite::IsKernelRegistered(string op_name, DataType dt) {
auto element = mkl_eager_ops_.find(op_name);
if (element != mkl_eager_ops_.end()) {
return (mkl_op_registry::IsMklOp(
mkl_op_registry::GetMklNativeOpName(op_name), dt, true) ||
mkl_op_registry::IsMklOp(mkl_op_registry::GetMklOpName(op_name), dt,
true));
} else {
return false;
}
}
Status MklEagerOpRewrite::RewriteToMklOp(
EagerOperation* orig_op, std::unique_ptr<EagerOperation>* mkl_op) {
TF_RETURN_IF_ERROR(
mkl_eager_ops_[orig_op->Name()].CreateMklOp(orig_op, mkl_op));
return OkStatus();
}
bool MklEagerOpRewrite::RewriteConv2D(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
string padding;
TF_CHECK_OK(GetNodeAttr(ndef, "padding", &padding));
return (padding != "EXPLICIT");
}
bool MklEagerOpRewrite::RewriteSparseMatrixMatMul(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
DataType T;
Tensor tensor;
bool adjoint_a, adjoint_b, transpose_a, transpose_b, transpose_out;
TF_CHECK_OK(GetNodeAttr(ndef, "T", &T));
if (T != DT_FLOAT) {
VLOG(1) << "_MklSparseMatrixMatMul only supports DT_FLOAT";
return false;
}
TF_CHECK_OK(GetNodeAttr(ndef, "adjoint_a", &adjoint_a));
TF_CHECK_OK(GetNodeAttr(ndef, "adjoint_b", &adjoint_b));
if (adjoint_a || adjoint_b) {
VLOG(1)
<< "_MklNativeSparseMatrixMatMul doesn't support adjointing matrices";
return false;
}
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_a", &transpose_a));
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_b", &transpose_b));
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_output", &transpose_out));
if (transpose_a || transpose_b || transpose_out) {
VLOG(1)
<< "_MklNativeSparseMatrixMatMul doesn't support transposing matrices";
return false;
}
return true;
}
bool MklEagerOpRewrite::RewriteFusedBatchNormV3(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
if (Check5DFormat(ndef)) {
VLOG(1) << "Eager Op Rewrite: FusedBatchNorm(Grad)V3 op currently does not "
<< "support 5D tensors.";
return false;
}
return true;
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_MKL)
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
class EagerOpRewriteTest : public ::testing::Test {
public:
EagerOpRewriteTest() : eager_ctx_(nullptr) {}
~EagerOpRewriteTest() {
if (eager_ctx_) {
eager_ctx_->Unref();
}
}
std::unique_ptr<tensorflow::EagerOperation> CreateOp(const string op_name) {
std::unique_ptr<DeviceMgr> device_mgr =
std::make_unique<StaticDeviceMgr>(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
bool async = false;
auto rendezvous =
tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
eager_ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
async, device_mgr.get(), false, std::move(rendezvous), nullptr, nullptr,
true);
EagerExecutor executor_(false);
std::unique_ptr<tensorflow::EagerOperation> op(
new tensorflow::EagerOperation(eager_ctx_));
EXPECT_EQ(OkStatus(),
op.get()->Reset(op_name.c_str(), nullptr, false, &executor_));
EXPECT_EQ(OkStatus(), op.get()->SetDeviceName(
"/job:localhost/replica:0/task:0/device:CPU:0"));
return op;
}
void CheckRewrite(EagerOperation* orig_op, string expected_op_name) {
std::unique_ptr<tensorflow::EagerOperation> out_op;
EXPECT_EQ(OkStatus(),
EagerOpRewriteRegistry::Global()->RunRewrite(
EagerOpRewriteRegistry::POST_PLACEMENT, orig_op, &out_op));
string actual_op_name = orig_op->Name();
if (out_op) {
actual_op_name = out_op->Name();
}
EXPECT_EQ(actual_op_name, expected_op_name);
}
protected:
tensorflow::EagerContext* eager_ctx_;
};
#define CONV_FORWARD_OPS "Conv2D", "Conv3D", "DepthwiseConv2dNative"
#define CONV_BACKWARD_OPS \
"Conv2DBackpropInput", "Conv2DBackpropFilter", "Conv3DBackpropFilterV2", \
"Conv3DBackpropInputV2", "DepthwiseConv2dNativeBackpropFilter", \
"DepthwiseConv2dNativeBackpropInput"
#define CONV_OPS CONV_FORWARD_OPS, CONV_BACKWARD_OPS
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "VALID"); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(conv_ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOps_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_FORWARD_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "EXPLICIT"); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(conv_ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOpsExplicitPadding_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_BACKWARD_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "EXPLICIT"); \
CheckRewrite(orig_op.get(), conv_ops[i]); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOpsExplicitPadding_Negative);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> ops = {"AvgPool", \
"AvgPoolGrad", \
"AvgPool3D", \
"AvgPool3DGrad", \
"BatchMatMul", \
"Einsum", \
"FusedBatchNorm", \
"FusedBatchNormV2", \
"FusedBatchNormV3", \
"FusedBatchNormGrad", \
"FusedBatchNormGradV2", \
"FusedBatchNormGradV3", \
"MatMul"}; \
for (int i = 0; i < ops.size(); ++i) { \
auto orig_op = CreateOp(ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(MostOps_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> Fused_BN_ops = {"FusedBatchNormV3", \
"FusedBatchNormGradV3"}; \
for (int i = 0; i < Fused_BN_ops.size(); ++i) { \
auto orig_op = CreateOp(Fused_BN_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("data_format", "" DATA_FORMAT ""); \
CheckRewrite(orig_op.get(), Fused_BN_ops[i]); \
} \
}
#define DATA_FORMAT "NCDHW"
REGISTER_TEST_ALL_TYPES(FusedBatchNormV3_5D_Negative_1);
#undef DATA_FORMAT
#define DATA_FORMAT "NDHWC"
REGISTER_TEST_ALL_TYPES(FusedBatchNormV3_5D_Negative_2);
#undef DATA_FORMAT
#undef REGISTER_TEST
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
594ee246-0ac1-404d-aa23-48f0f2123e9b | cpp | tensorflow/tensorflow | context | tensorflow/core/tfrt/mlrt/interpreter/context.cc | tensorflow/core/tfrt/mlrt/interpreter/context_test.cc | #include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
namespace mlrt {
namespace context_internal {
UserContextBase::~UserContextBase() = default;
}
void KernelRegistry::Register(absl::string_view name,
KernelImplementation kernel) {
map_.emplace(name, kernel);
}
KernelImplementation KernelRegistry::Get(absl::string_view name) const {
DCHECK(map_.contains(name)) << "Missing kernel in registry: " << name;
return map_.at(name);
}
void KernelRegistry::Merge(const KernelRegistry& other) {
map_.insert(other.map_.begin(), other.map_.end());
}
LoadedExecutable::LoadedExecutable(bc::Executable executable,
const KernelRegistry& kernel_registry)
: executable_(executable) {
kernels_.reserve(executable_.kernel_names().size());
for (auto kernel_name : executable_.kernel_names()) {
kernels_.push_back(kernel_registry.Get(kernel_name));
}
functions_.reserve(executable_.functions().size());
for (auto function : executable_.functions()) {
functions_[function.name().Get()] = function;
}
}
} | #include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
namespace mlrt {
namespace {
struct A : KernelFrame {
static constexpr char kName[] = "A";
using KernelFrame::KernelFrame;
void Invoke() {}
};
struct B : KernelFrame {
static constexpr char kName[] = "B";
using KernelFrame::KernelFrame;
void Invoke() {}
};
struct C : KernelFrame {
static constexpr char kName[] = "C";
using KernelFrame::KernelFrame;
void Invoke() {}
};
TEST(ContextTest, MergeKernelRegistry) {
KernelRegistry reg_a;
reg_a.Register<A>();
reg_a.Register<B>();
KernelRegistry reg_b;
reg_b.Register<B>();
reg_b.Register<C>();
EXPECT_TRUE(reg_a.Get(A::kName));
EXPECT_TRUE(reg_a.Get(B::kName));
reg_a.Merge(reg_b);
EXPECT_TRUE(reg_a.Get(A::kName));
EXPECT_TRUE(reg_a.Get(B::kName));
EXPECT_TRUE(reg_a.Get(C::kName));
}
struct TestContext0 : UserContext<TestContext0> {
int v = 0;
};
struct TestContext1 : UserContext<TestContext1> {
int v = 1;
};
TEST(ContextTest, UserContext) {
EXPECT_EQ(TestContext0::id(), 0);
EXPECT_EQ(TestContext1::id(), 1);
ExecutionContext execution_context(nullptr);
auto test_1 = std::make_unique<TestContext1>();
auto* test_1_ptr = test_1.get();
execution_context.AddUserContext(std::move(test_1));
auto test_0 = std::make_unique<TestContext0>();
auto* test_0_ptr = test_0.get();
execution_context.AddUserContext(std::move(test_0));
EXPECT_EQ(&execution_context.GetUserContext<TestContext0>(), test_0_ptr);
EXPECT_EQ(&execution_context.GetUserContext<TestContext1>(), test_1_ptr);
EXPECT_EQ(execution_context.GetUserContext<TestContext0>().v, 0);
EXPECT_EQ(execution_context.GetUserContext<TestContext1>().v, 1);
ExecutionContext execution_context_copy(
nullptr, execution_context.CopyUserContexts(),
execution_context.user_error_loggers());
EXPECT_NE(&execution_context_copy.GetUserContext<TestContext0>(), test_0_ptr);
EXPECT_NE(&execution_context_copy.GetUserContext<TestContext1>(), test_1_ptr);
EXPECT_EQ(execution_context_copy.GetUserContext<TestContext0>().v, 0);
EXPECT_EQ(execution_context_copy.GetUserContext<TestContext1>().v, 1);
}
TEST(ContextTest, PartialUserContext) {
EXPECT_EQ(TestContext0::id(), 0);
EXPECT_EQ(TestContext1::id(), 1);
ExecutionContext execution_context(nullptr);
auto test_1 = std::make_unique<TestContext1>();
auto* test_1_ptr = test_1.get();
execution_context.AddUserContext(std::move(test_1));
EXPECT_EQ(&execution_context.GetUserContext<TestContext1>(), test_1_ptr);
EXPECT_EQ(execution_context.GetUserContext<TestContext1>().v, 1);
ExecutionContext execution_context_copy(
nullptr, execution_context.CopyUserContexts(),
execution_context.user_error_loggers());
EXPECT_NE(&execution_context_copy.GetUserContext<TestContext1>(), test_1_ptr);
EXPECT_EQ(execution_context_copy.GetUserContext<TestContext1>().v, 1);
}
TEST(ContextTest, UserErrorLoggerCanBeCopied) {
int num_error_reported = 0;
ExecutionContext execution_context(nullptr);
execution_context.AddUserErrorLogger(
[&num_error_reported](absl::Status status) {
num_error_reported++;
LOG(INFO) << "User error logger called";
});
execution_context.LogError(absl::InternalError("Test error"));
ExecutionContext execution_context_copy(
nullptr, execution_context.CopyUserContexts(),
execution_context.user_error_loggers());
execution_context_copy.LogError(absl::InternalError("Test error"));
ASSERT_EQ(num_error_reported, 2);
}
TEST(ContextTest, NoUserErrorLoggerRunOk) {
ExecutionContext execution_context(nullptr);
execution_context.LogError(absl::InternalError("Test error"));
ExecutionContext execution_context_copy(
nullptr, execution_context.CopyUserContexts(),
execution_context.user_error_loggers());
execution_context_copy.LogError(absl::InternalError("Test error"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/context.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/context_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce1df6e4-04ce-46d9-852f-1d19564385cf | cpp | tensorflow/tensorflow | tensor_handle | tensorflow/core/common_runtime/eager/tensor_handle.cc | tensorflow/core/common_runtime/eager/tensor_handle_test.cc | #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include <algorithm>
#include <cstddef>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "absl/types/variant.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#if !defined(IS_MOBILE_PLATFORM)
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h"
#endif
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace {
int64_t GetRemoteDeviceIncarnation(Device* device) {
if (device == nullptr || device->IsLocal()) return 0;
return device->attributes().incarnation();
}
string SafeDeviceDebugString(Device* device) {
if (device == nullptr) {
return "[]";
} else {
return device->DebugString();
}
}
}
TensorHandle::PackedTensorHandleData::PackedTensorHandleData(
std::vector<TensorHandle*>&& handles, const TensorShape& shape)
: handles_(std::move(handles)), shape_(shape) {
for (auto* handle : handles_) {
handle->Ref();
}
}
TensorHandle::PackedTensorHandleData::~PackedTensorHandleData() {
for (auto* handle : handles_) {
handle->Unref();
}
}
Status TensorHandle::PackedTensorHandleData::Shape(TensorShape* shape) const {
*shape = shape_;
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::NumDims(int* num_dims) const {
*num_dims = shape_.dims();
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::Dim(int dim_index,
int64_t* dim) const {
*dim = shape_.dim_size(dim_index);
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::NumElements(
int64_t* num_elements) const {
*num_elements = shape_.num_elements();
return absl::OkStatus();
}
Status TensorHandle::PackedTensorHandleData::Unprotect() {
for (auto* handle : handles_) {
TF_RETURN_IF_ERROR(
std::visit([](auto& data) { return data.Unprotect(); }, handle->data_));
}
return absl::OkStatus();
}
bool TensorHandle::PackedTensorHandleData::IsReady() const {
{
tf_shared_lock l(mu_);
if (!is_poisoned_.ok()) {
return true;
}
}
for (auto* handle : handles_) {
if (!handle->IsReady()) {
return false;
}
}
return true;
}
Status TensorHandle::PackedTensorHandleData::WaitReady(
const char* caller) const {
{
tf_shared_lock l(mu_);
if (!is_poisoned_.ok()) {
return is_poisoned_;
}
}
for (auto* handle : handles_) {
TF_RETURN_IF_ERROR(handle->WaitReady(caller));
}
return absl::OkStatus();
}
void TensorHandle::PackedTensorHandleData::Poison(Status status) {
mutex_lock l(mu_);
is_poisoned_ = status;
}
string TensorHandle::PackedTensorHandleData::DebugString() const {
string debug_str = "PackedTensorHandleData: ";
for (const auto* handle : handles_) {
debug_str.append(
absl::StrCat(std::visit([](auto& data) { return data.DebugString(); },
handle->data_),
"; "));
}
return debug_str;
}
int TensorHandle::PackedTensorHandleData::NumPackedHandles() const {
return handles_.size();
}
Status TensorHandle::PackedTensorHandleData::ExtractPackedHandle(
const int index, TensorHandle** handle) const {
if (index < 0 || index >= handles_.size()) {
return errors::InvalidArgument("Expect an index within [0, ",
handles_.size(), "), but got ", index);
}
*handle = handles_.at(index);
return absl::OkStatus();
}
void TensorHandle::SetResourceHandleDtypeAndShape(
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes) {
handle_dtypes_and_shapes_ = std::move(dtypes_and_shapes);
}
Status TensorHandle::GetResourceHandleDtypesAndShapes(
std::vector<DtypeAndPartialTensorShape>* result) {
if (dtype != DT_RESOURCE) {
return errors::InvalidArgument(
"TensorHandle::GetResourceDtypeAndShape should be called on tensor "
"handles with data type DT_RESOURCE. Actual tensor: ",
dtype);
}
if (Type() != LOCAL) {
*result = handle_dtypes_and_shapes_;
return absl::OkStatus();
}
tsl::profiler::TraceMe activity(
"TensorHandle::GetResourceHandleInfo WaitReady",
tsl::profiler::TraceMeLevel::kVerbose);
auto& data = std::get<LocalTensorHandleData>(data_);
TF_RETURN_IF_ERROR(data.WaitReady("TensorHandle::GetResourceHandleInfo"));
*result = handle_dtypes_and_shapes_;
return absl::OkStatus();
}
int TensorHandle::NumPackedHandles() const {
if (Type() != PACKED) {
return 0;
}
return std::get<PackedTensorHandleData>(data_).NumPackedHandles();
}
Status TensorHandle::ExtractPackedHandle(const int index,
TensorHandle** handle) const {
if (Type() != PACKED) {
return errors::Internal("Invalid ExtractPackedHandleOnDevice call on a",
TypeString(), " handle: ", this);
}
return std::get<PackedTensorHandleData>(data_).ExtractPackedHandle(index,
handle);
}
TensorHandle* TensorHandle::CreateLocalHandle(const tensorflow::Tensor& t) {
tensorflow::Tensor tensor = t;
return CreateLocalHandle(std::move(tensor),
nullptr,
nullptr,
nullptr);
}
TensorHandle* TensorHandle::CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device,
EagerContext* ctx) {
return CreateLocalHandle(std::move(t), d, op_device, nullptr, ctx);
}
TensorHandle* TensorHandle::CreateLocalHandle(tensorflow::Tensor&& t, Device* d,
Device* op_device,
Device* resource_device,
EagerContext* ctx) {
if (t.dtype() == DT_RESOURCE && t.NumElements() > 0) {
return new TensorHandle(std::move(t), d, op_device, ctx);
} else {
return new TensorHandle(std::move(t), d, op_device, resource_device, ctx);
}
}
TensorHandle::TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
Device* resource_device, EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(t.dtype()),
device_((!ctx || d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(resource_device),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<LocalTensorHandleData>, std::move(t)) {
DVLOG(3) << "Creating Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_)
<< " tensor: " << t.DeviceSafeDebugString();
}
TensorHandle::TensorHandle(tensorflow::Tensor&& t, Device* d, Device* op_device,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(DT_RESOURCE),
device_((!ctx || d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(
GetResourceDevice(t.flat<class ResourceHandle>()(0), ctx)),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
handle_dtypes_and_shapes_(
t.flat<class ResourceHandle>()(0).dtypes_and_shapes()),
data_(absl::in_place_type<LocalTensorHandleData>, std::move(t)) {
DVLOG(3) << "Creating Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_)
<< " tensor: " << t.DeviceSafeDebugString();
}
TensorHandle* TensorHandle::CreateEmptyLocalHandle(Device* d, Device* op_device,
Device* resource_device,
tensorflow::DataType dtype,
EagerContext* ctx) {
return new TensorHandle(d, op_device, resource_device, dtype, ctx);
}
TensorHandle::TensorHandle(Device* d, Device* op_device,
Device* resource_device, tensorflow::DataType dtype,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_((d == ctx->HostCPU()) ? nullptr : d),
op_device_(op_device),
resource_device_(resource_device),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<LocalTensorHandleData>) {
DVLOG(3) << "Creating empty Local TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
Status TensorHandle::CreatePackedHandle(std::vector<TensorHandle*>&& handles,
const tensorflow::DataType dtype,
const tensorflow::TensorShape& shape,
const string& device_name,
EagerContext* ctx,
TensorHandle** packed_handle) {
if (handles.empty()) {
return errors::InvalidArgument("Handles should not be empty.");
}
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (dtype == DT_RESOURCE) {
TF_RETURN_IF_ERROR(
handles.at(0)->GetResourceHandleDtypesAndShapes(&dtypes_and_shapes));
}
std::vector<string> devices;
devices.reserve(handles.size());
for (auto* handle : handles) {
devices.push_back(handle->op_device() ? handle->op_device()->name()
: ctx->HostCPU()->name());
}
CompositeDevice* composite_device = nullptr;
TF_RETURN_IF_ERROR(ctx->FindOrCreateCompositeDevice(devices, device_name,
&composite_device));
*packed_handle =
new TensorHandle(std::move(handles), composite_device, dtype, shape, ctx);
(*packed_handle)
->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
return absl::OkStatus();
}
Status TensorHandle::CreatePackedHandle(std::vector<TensorHandle*>&& handles,
EagerContext* ctx,
TensorHandle** packed_handle) {
if (handles.empty()) {
return errors::InvalidArgument("Handles should not be empty.");
}
tensorflow::DataType dtype = handles.at(0)->dtype;
tensorflow::TensorShape shape;
TF_RETURN_IF_ERROR(handles.at(0)->Shape(&shape));
return CreatePackedHandle(std::move(handles), dtype, shape,
"", ctx, packed_handle);
}
TensorHandle::TensorHandle(std::vector<TensorHandle*>&& handles, Device* device,
const tensorflow::DataType dtype,
const tensorflow::TensorShape& shape,
EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(device),
op_device_(device),
resource_device_(dtype == DT_RESOURCE ? device : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<PackedTensorHandleData>, std::move(handles),
shape) {
DVLOG(3) << "Creating a packed TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
#if !defined(IS_MOBILE_PLATFORM)
TensorHandle* TensorHandle::CreateUnshapedRemoteHandle(
int64_t op_id, int32_t output_num, const string& remote_task,
tensorflow::DataType dtype, Device* d, EagerContext* ctx,
const bool unknown_device) {
return new TensorHandle(op_id, output_num, remote_task, dtype, d, ctx,
unknown_device);
}
TensorHandle::TensorHandle(int64_t op_id, int32_t output_num,
const string& remote_task,
tensorflow::DataType dtype, Device* d,
EagerContext* ctx, const bool unknown_device)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(d),
op_device_(d),
resource_device_(dtype == DT_RESOURCE ? d : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
unknown_device_(unknown_device),
ctx_(ctx),
data_(absl::in_place_type<RemoteTensorHandleData>, op_id, output_num,
remote_task, ctx) {
DVLOG(3) << "Creating Unshaped Remote TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
TensorHandle* TensorHandle::CreateLazyRemoteHandle(
int64_t op_id, int32_t output_num, tensorflow::DataType dtype, Device* d,
const bool is_ready, EagerContext* ctx) {
return new TensorHandle(op_id, output_num, dtype, d, is_ready, ctx);
}
TensorHandle::TensorHandle(int64_t op_id, int32_t output_num,
tensorflow::DataType dtype, Device* d,
const bool is_ready, EagerContext* ctx)
: ImmediateExecutionTensorHandle(kEager),
dtype(dtype),
device_(d),
op_device_(d),
resource_device_(dtype == DT_RESOURCE ? d : nullptr),
resource_remote_device_incarnation_(
GetRemoteDeviceIncarnation(resource_device_)),
ctx_(ctx),
data_(absl::in_place_type<RemoteTensorHandleData>, op_id, output_num,
ctx->GetContextViewId(), is_ready) {
DVLOG(3) << "Creating Lazy Remote TensorHandle: " << this
<< " device: " << SafeDeviceDebugString(device_);
}
#endif
TensorHandle::~TensorHandle() { DVLOG(3) << "Deleting tensor handle " << this; }
void TensorHandle::Release() {
DVLOG(3) << "Releasing tensor handle " << this;
Unref();
}
tensorflow::DataType TensorHandle::DataType() const { return dtype; }
bool TensorHandle::IsReady() const {
return std::visit([](auto& data) { return data.IsReady(); }, data_);
}
Status TensorHandle::WaitReady(const char* caller) const {
return std::visit([caller](auto& data) { return data.WaitReady(caller); },
data_);
}
TensorHandle::HandleType TensorHandle::Type() const {
if (data_.index() == 0) {
return LOCAL;
} else if (data_.index() == 1) {
return PACKED;
} else {
return REMOTE;
}
}
string TensorHandle::TypeString() const {
if (data_.index() == 0) {
return "LOCAL";
} else if (data_.index() == 1) {
return "PACKED";
} else {
return "REMOTE";
}
}
Status TensorHandle::Tensor(const tensorflow::Tensor** t) const {
DVLOG(3) << "Tensor on TensorHandle: " << this;
if (Type() != LOCAL) {
return errors::Internal("Invalid Tensor call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.Tensor(t);
}
Status TensorHandle::TensorFromDevice(const Device* d,
const tensorflow::Tensor** t) const {
DVLOG(3) << "TensorFromDevice on TensorHandle: " << this << " device: " << d;
if (d == device_) {
if (Type() != LOCAL) {
return errors::Internal("Invalid Tensor call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.Tensor(t);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in Tensor call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.Tensor(t);
}
Status TensorHandle::TensorValue(const Device* d, tensorflow::TensorValue* t) {
DVLOG(3) << "TensorValue on TensorHandle: " << this << " device: " << d;
if (d == device_) {
if (Type() != LOCAL) {
return errors::Internal("Invalid TensorValue call on a ", TypeString(),
" handle: ", this);
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.TensorValue(t);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in TensorValue call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.TensorValue(t);
}
Status TensorHandle::WaitUnknownDevice() const {
if (unknown_device_) {
TF_RETURN_IF_ERROR(std::visit(
[](auto& data) {
return data.WaitReady("TensorHandle::UnknownDevice");
},
data_));
}
return absl::OkStatus();
}
Device* TensorHandle::DeviceOrHostCPU(const EagerContext& ctx) const {
return (device_ == nullptr) ? ctx.HostCPU() : device_;
}
Status TensorHandle::Shape(tensorflow::TensorShape* shape) {
if (!IsReady() && inference_shape_.IsFullyDefined()) {
bool fill = inference_shape_.AsTensorShape(shape);
DCHECK(fill);
return absl::OkStatus();
} else {
return std::visit([shape](auto& data) { return data.Shape(shape); }, data_);
}
}
Status TensorHandle::InferenceShape(
shape_inference::InferenceContext* const inference_context,
shape_inference::ShapeHandle* shape_handle) {
if (IsReady()) {
std::vector<shape_inference::DimensionHandle> dims_handle;
int num_dims;
TF_RETURN_IF_ERROR(NumDims(&num_dims));
for (int i = 0; i < num_dims; i++) {
int64_t dims;
TF_RETURN_IF_ERROR(Dim(i, &dims));
dims_handle.push_back(inference_context->MakeDim(dims));
}
*shape_handle = inference_context->MakeShape(dims_handle);
return absl::OkStatus();
} else {
if (inference_shape_.unknown_rank()) {
*shape_handle = inference_context->UnknownShape();
return absl::OkStatus();
}
std::vector<shape_inference::DimensionHandle> dims_handle(
inference_shape_.dims());
for (int i = 0; i < dims_handle.size(); i++) {
dims_handle[i] = inference_context->MakeDim(inference_shape_.dim_size(i));
}
*shape_handle = inference_context->MakeShape(dims_handle);
return absl::OkStatus();
}
}
void TensorHandle::SetInferenceShape(
shape_inference::InferenceContext* const inference_context,
const shape_inference::ShapeHandle& shape_handle) {
auto num_dims = inference_context->Rank(shape_handle);
std::vector<int64_t> dims;
if (num_dims == shape_inference::InferenceContext::kUnknownRank) {
inference_shape_ = PartialTensorShape();
return;
}
DCHECK_GE(num_dims, 0);
dims.resize(num_dims);
for (size_t i = 0; i < num_dims; ++i) {
dims[i] = inference_context->Value(inference_context->Dim(shape_handle, i));
}
auto s = PartialTensorShape::MakePartialShape(dims.data(), num_dims,
&inference_shape_);
TF_DCHECK_OK(s);
}
Status TensorHandle::CopyInferenceShape(TensorHandle* other) {
if (IsReady()) {
return absl::OkStatus();
}
if (other->IsReady()) {
TensorShape other_shape;
TF_RETURN_IF_ERROR(other->Shape(&other_shape));
inference_shape_ = other_shape;
} else {
inference_shape_ = other->inference_shape_;
}
return absl::OkStatus();
}
Status TensorHandle::Shape(tensorflow::PartialTensorShape* shape) const {
DCHECK(shape != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank()) {
*shape = inference_shape_;
return absl::OkStatus();
} else {
auto result = std::visit(
[](auto& data) {
TensorShape shape;
Status s = data.Shape(&shape);
return std::make_pair(shape, s);
},
data_);
TF_RETURN_IF_ERROR(result.second);
*shape = result.first;
}
return absl::OkStatus();
}
Status TensorHandle::NumDims(int* num_dims) const {
DCHECK(num_dims != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank()) {
*num_dims = inference_shape_.dims();
return absl::OkStatus();
} else {
return std::visit([num_dims](auto& data) { return data.NumDims(num_dims); },
data_);
}
}
Status TensorHandle::Dim(int dim_index, int64_t* dim) const {
DCHECK(dim != nullptr);
if (!IsReady() && !inference_shape_.unknown_rank() &&
inference_shape_.dim_size(dim_index) != -1) {
*dim = inference_shape_.dim_size(dim_index);
return absl::OkStatus();
} else {
return std::visit(
[dim_index, dim](auto& data) { return data.Dim(dim_index, dim); },
data_);
}
}
Status TensorHandle::NumElements(int64_t* num_elements) const {
DCHECK(num_elements != nullptr);
if (!IsReady() && inference_shape_.IsFullyDefined()) {
*num_elements = inference_shape_.num_elements();
return absl::OkStatus();
} else {
return std::visit(
[num_elements](auto& data) { return data.NumElements(num_elements); },
data_);
}
}
Status TensorHandle::Unprotect(const Device* d) {
DVLOG(3) << "Unprotect on TensorHandle: " << this << " device: " << d;
if (d == device_) {
return std::visit([](auto& data) { return data.Unprotect(); }, data_);
}
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal("Invalid device: ", d,
" in Unprotect call to handle: ", this);
}
auto& mirror = elem->second;
return mirror.Unprotect();
}
bool TensorHandle::HasLocalMirror(const Device* d) const {
DVLOG(3) << "HasLocalMirror on TensorHandle: " << this << " device: " << d;
tf_shared_lock l(mu_);
return local_mirrors_.find(d) != local_mirrors_.end();
}
Status TensorHandle::AddEmptyLocalMirror(const Device* d) {
DVLOG(3) << "AddEmptyLocalMirror on TensorHandle: " << this
<< " device: " << d;
if (d == device_) {
return errors::Internal("Cannot add mirror for primary device.");
}
mutex_lock l(mu_);
if (local_mirrors_.find(d) != local_mirrors_.end()) {
return errors::AlreadyExists("Attempted to duplicate a local mirror.");
}
local_mirrors_.emplace(std::piecewise_construct, std::forward_as_tuple(d),
std::forward_as_tuple());
return absl::OkStatus();
}
#if !defined(IS_MOBILE_PLATFORM)
Status TensorHandle::RemoteAddress(const Device* d, const bool wait_until_ready,
int64_t* op_id, int32* output_num) const {
DVLOG(3) << "RemoteAddress on TensorHandle: " << this << " device: " << d
<< " " << d->name();
const tensorflow::RemoteTensorHandleData* remote_data = nullptr;
if (d != device_) {
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
remote_data = &mirror->second;
} else {
return errors::FailedPrecondition(
"Could not find remote mirror for specified device");
}
}
if (remote_data != nullptr) {
auto status =
remote_data->OpIdAndOutputNum(wait_until_ready, op_id, output_num);
if (!status.ok()) {
return errors::Internal(
absl::StrCat("Remote address looked up from remote mirrors found to "
"be poisoned with status ",
status.ToString()));
} else {
return absl::OkStatus();
}
}
if (Type() != REMOTE) {
return errors::InvalidArgument("Primary device is not remote");
}
auto& data = std::get<RemoteTensorHandleData>(data_);
auto status = data.OpIdAndOutputNum(wait_until_ready, op_id, output_num);
if (!status.ok()) {
return errors::Internal(
"Remote address looked up from remote data found to be poisoned");
} else {
return absl::OkStatus();
}
}
bool TensorHandle::HasRemoteMirror(const Device* d,
uint64 context_view_id) const {
DVLOG(3) << "HasRemoteMirror on TensorHandle: " << this << " device: " << d
<< " " << d->name();
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
if (mirror->second.context_view_id() != context_view_id) {
return false;
}
return true;
}
return false;
}
bool TensorHandle::HasResourceShapeMirror(const Device* d,
uint64 context_view_id) const {
DVLOG(3) << "HasResourceShapeMirror on TensorHandle: " << this
<< " device: " << d << " " << d->name();
tf_shared_lock l(mu_);
auto mirror = resource_shape_mirrors_.find(d->name());
if (mirror != resource_shape_mirrors_.end()) {
if (mirror->second.context_view_id() != context_view_id) {
return false;
}
return true;
}
return false;
}
Status TensorHandle::AddUnshapedRemoteMirror(const Device* d, int64_t op_id,
int output_num,
const string& remote_task,
EagerContext* ctx) {
DVLOG(3) << "AddUnshapedRemoteMirror on TensorHandle: " << this
<< " device: " << d << " " << d->name() << " op_id: " << op_id
<< " output_num: " << output_num;
mutex_lock l(mu_);
auto remote_mirror = remote_mirrors_.find(d->name());
if (remote_mirror != remote_mirrors_.end()) {
if (remote_mirror->second.context_view_id() > ctx->GetContextId()) {
return errors::Internal(
"Attempted to duplicate a remote mirror with inconsistent "
"arguments.");
}
remote_mirrors_.erase(remote_mirror);
}
remote_mirrors_.emplace(
std::piecewise_construct, std::forward_as_tuple(d->name()),
std::forward_as_tuple(op_id, output_num, remote_task, ctx));
return absl::OkStatus();
}
Status TensorHandle::AddResourceShapeMirror(const Device* d, int64_t op_id,
int output_num, EagerContext* ctx) {
DVLOG(3) << "AddResourceShapeMirror on TensorHandle: " << this;
mutex_lock l(mu_);
auto mirror = resource_shape_mirrors_.find(d->name());
if (mirror != resource_shape_mirrors_.end()) {
if (mirror->second.context_view_id() == ctx->GetContextViewId()) {
int64_t existing_op_id;
int existing_output_num;
TF_RETURN_IF_ERROR(mirror->second.OpIdAndOutputNum(false, &existing_op_id,
&existing_output_num));
if (op_id == existing_op_id && output_num == existing_output_num) {
return absl::OkStatus();
}
return absl::InternalError(
"Attempted to duplicate a resource shape mirror.");
}
resource_shape_mirrors_.erase(mirror);
}
resource_shape_mirrors_.emplace(
std::piecewise_construct, std::forward_as_tuple(d->name()),
std::forward_as_tuple(op_id, output_num, ctx->GetContextViewId(),
true));
return absl::OkStatus();
}
Status TensorHandle::SetRemoteShape(const TensorShape& shape, const Device* d,
uint64 context_view_id) {
return SetRemoteShapeAndDevice(shape, d, context_view_id, "");
}
Status TensorHandle::SetRemoteShapeAndDevice(const TensorShape& shape,
const Device* d,
uint64 context_view_id,
string op_device) {
DVLOG(3) << "SetRemoteShape on TensorHandle: " << this << " device: " << d
<< " " << d->name();
if (d != device_) {
tf_shared_lock l(mu_);
auto remote_mirror = remote_mirrors_.find(d->name());
if (remote_mirror == remote_mirrors_.end()) {
return absl::OkStatus();
}
auto& mirror = remote_mirror->second;
if (mirror.context_view_id() == context_view_id) {
auto status = mirror.SetShape(shape);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status.message()
<< ". This should never occur.";
}
return status;
} else if (mirror.context_view_id() < context_view_id) {
return errors::Internal(
absl::Substitute("Unexpected context_view_id ($0) which should not "
"be newer than the "
"one ($1) associated to the remote mirror.",
context_view_id, mirror.context_view_id()));
} else {
LOG(WARNING) << "SetRemoteShape is ignored for a remote mirror that is "
"associated with a newer context_view_id.";
}
return absl::OkStatus();
}
if (Type() != REMOTE) {
return errors::InvalidArgument(
"SetRemoteShape should only be called on remote handles.");
}
auto& data = std::get<RemoteTensorHandleData>(data_);
if (op_device.empty()) {
auto status = data.SetShape(shape);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status.message()
<< ". This should never occur.";
}
return status;
} else {
if (!unknown_device_) {
return errors::Internal("Cannot reset known devices.");
}
Device* device;
TF_RETURN_IF_ERROR(ctx_->FindDeviceFromName(op_device.c_str(), &device));
device_ = device;
op_device_ = device;
resource_device_ = dtype == DT_RESOURCE ? device : nullptr;
resource_remote_device_incarnation_ =
GetRemoteDeviceIncarnation(resource_device_);
string remote_task;
if (!DeviceNameUtils::GetTaskName(device->parsed_name(), &remote_task)) {
return errors::InvalidArgument(
"Unable to find remote task corresponding to device ",
device->name());
}
auto status = data.SetShapeAndRemoteTask(shape, remote_task);
if (!status.ok()) {
LOG(ERROR) << "SetShape returned " << status
<< ". This should never occur.";
}
return status;
}
}
void TensorHandle::PoisonRemote(Status status, const Device* d,
uint64 context_view_id) {
DVLOG(3) << "PoisonRemote on TensorHandle: " << this << " device: " << d
<< " " << d->name();
if (d == device_) {
DCHECK(Type() == REMOTE)
<< "Poison can only be on remote handles: " << this;
auto& data = std::get<RemoteTensorHandleData>(data_);
data.Poison(status);
} else {
tf_shared_lock l(mu_);
auto mirror = remote_mirrors_.find(d->name());
if (mirror != remote_mirrors_.end()) {
if (mirror->second.context_view_id() == context_view_id) {
mirror->second.Poison(status);
}
}
}
}
#endif
Status TensorHandle::AddLocalMirror(tensorflow::Tensor&& tensor,
const Device* d) {
if (d == device_) {
return errors::Internal(
"Local mirror assign conflicts with primary device.");
}
mutex_lock l(mu_);
auto elem =
local_mirrors_.emplace(std::piecewise_construct, std::forward_as_tuple(d),
std::forward_as_tuple(std::move(tensor)));
if (!elem.second) {
return errors::AlreadyExists("Attempted to add existing mirror.");
}
return absl::OkStatus();
}
Status TensorHandle::SetTensor(tensorflow::Tensor&& t, const Device* d) {
DVLOG(3) << "SetTensor on TensorHandle: " << this << " device: " << d;
if (d == device_) {
DCHECK(Type() == LOCAL) << "SetTensor is not called on local handles.";
if (t.dtype() == DT_RESOURCE && t.NumElements() > 0) {
auto& resource_handle = t.flat<class ResourceHandle>()(0);
handle_dtypes_and_shapes_ = resource_handle.dtypes_and_shapes();
}
auto& data = std::get<LocalTensorHandleData>(data_);
return data.SetTensor(std::move(t));
} else {
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
if (elem == local_mirrors_.end()) {
return errors::Internal(
"Attempted to set tensor for non-existent local mirror.");
}
auto& mirror = elem->second;
return mirror.SetTensor(std::move(t));
}
return absl::OkStatus();
}
void TensorHandle::Poison(Status status, const Device* d) {
DVLOG(3) << "Poison on TensorHandle: " << this << " device: " << d;
if (d == device_) {
DCHECK(Type() != REMOTE) << "Poison can only be on local handles: " << this;
std::visit([status](auto& data) { data.Poison(status); }, data_);
} else {
tf_shared_lock l(mu_);
auto elem = local_mirrors_.find(d);
DCHECK(elem != local_mirrors_.end())
<< "Attempted to poison non-existent local mirror, handle: " << this
<< " device: " << d;
auto& mirror = elem->second;
mirror.Poison(status);
}
}
Status TensorHandle::CopyToDevice(const EagerContext& ctx,
tensorflow::Device* d,
tensorflow::Tensor* output) const {
tensorflow::Device* dstd = (d == nullptr) ? ctx.HostCPU() : d;
tensorflow::Device* srcd = DeviceOrHostCPU(ctx);
const bool dst_cpu = dstd->tensorflow_accelerator_device_info() == nullptr;
const bool src_cpu = srcd->tensorflow_accelerator_device_info() == nullptr;
bool is_same_device =
(srcd == dstd) || (srcd->name() == dstd->name()) || (dst_cpu && src_cpu);
const tensorflow::Tensor* src = nullptr;
TF_RETURN_IF_ERROR(Tensor(&src));
if (is_same_device) {
*output = *src;
return absl::OkStatus();
}
if (!dst_cpu && (src->dtype() != tensorflow::DT_VARIANT &&
!tensorflow::DataTypeCanUseMemcpy(src->dtype()))) {
return tensorflow::errors::InvalidArgument(
"Can't copy Tensor with type ",
tensorflow::DataTypeString(src->dtype()), " to device ", dstd->name(),
".");
}
tensorflow::AllocatorAttributes attr;
if (src->dtype() == tensorflow::DT_VARIANT) {
attr.set_on_host(true);
}
const auto* dstd_info = dstd->tensorflow_accelerator_device_info();
tensorflow::Tensor dst(dstd->GetAllocator(attr), src->dtype(), src->shape());
if (src->shape().num_elements() == 0) {
*output = dst;
return absl::OkStatus();
}
tensorflow::DeviceContext* src_device_context = nullptr;
if (!src_cpu) {
src_device_context =
srcd->tensorflow_accelerator_device_info()->default_context;
}
tensorflow::DeviceContext* dst_device_context = nullptr;
if (!dst_cpu) {
if (dstd_info->use_pjrt_tensor_buffer && DataType() != DT_INT4 &&
DataType() != DT_UINT4) {
dst_device_context = dstd_info->pjrt_context;
} else {
dst_device_context = dstd_info->default_context;
}
}
TF_RETURN_IF_ERROR(srcd->Sync());
tensorflow::Notification n;
tensorflow::Status status;
tensorflow::CopyTensor::ViaDMA("copy", src_device_context, dst_device_context,
srcd, dstd, tensorflow::AllocatorAttributes(),
tensorflow::AllocatorAttributes(), src, &dst,
0 ,
[&status, &n](const tensorflow::Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (status.ok()) {
*output = dst;
return absl::OkStatus();
}
return status;
}
Device* GetResourceDevice(const ResourceHandle& handle, EagerContext* ctx) {
if (ctx == nullptr) {
return nullptr;
}
Device* device = nullptr;
if (!ctx->FindDeviceFromName(handle.device().c_str(), &device).ok()) {
LOG(ERROR) << "Cannot find resource device: " << handle.device() << ".";
return nullptr;
}
return device;
}
const char* TensorHandle::DeviceName(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? "/job:localhost/replica:0/task:0/device:CPU:0"
: d->name().c_str();
}
const char* TensorHandle::BackingDeviceName(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = device();
return (d == nullptr) ? "/job:localhost/replica:0/task:0/device:CPU:0"
: d->name().c_str();
}
const char* TensorHandle::DeviceType(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? "CPU" : d->parsed_name().type.c_str();
}
int TensorHandle::DeviceId(Status* status) const {
status->Update(WaitUnknownDevice());
tensorflow::Device* d = op_device();
return (d == nullptr) ? 0 : d->parsed_name().id;
}
} | #include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
TEST(TensorHandle_ShapeTest, AsyncShape) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
for (int64_t a = 0; a < t.shape().dim_size(0); a++) {
for (int64_t b = 0; b < t.shape().dim_size(1); b++) {
t.matrix<uint16>()(a, b) = uint16(a * b);
}
}
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
TensorHandle* sync_th =
TensorHandle::CreateLocalHandle(std::move(t), nullptr, nullptr, ctx);
absl::Cleanup sync_th_cleanup = [&]() { sync_th->Unref(); };
TensorHandle* async_th = TensorHandle::CreateEmptyLocalHandle(
nullptr, nullptr, nullptr, DataType::DT_UINT16, ctx);
absl::Cleanup async_th_cleanup = [&]() { async_th->Unref(); };
EXPECT_TRUE(async_th->CopyInferenceShape(sync_th).ok());
TensorShape sync_shape;
TensorShape async_shape;
EXPECT_TRUE(sync_th->Shape(&sync_shape).ok());
EXPECT_TRUE(async_th->Shape(&async_shape).ok());
EXPECT_EQ(sync_shape, async_shape);
int num_dims = -1;
EXPECT_TRUE(async_th->NumDims(&num_dims).ok());
EXPECT_EQ(num_dims, 2);
int64_t num_elements = -1;
EXPECT_TRUE(async_th->NumElements(&num_elements).ok());
EXPECT_EQ(num_elements, 4);
}
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, bool is_local)
: Device(nullptr, attr), is_local_(is_local) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
bool IsLocal() const override { return is_local_; }
private:
const bool is_local_;
};
static std::unique_ptr<FakeDevice> CreateDevice(const char* type,
const char* name,
bool is_local = true) {
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
int64_t incarnation = random::New64();
while (incarnation == 0) {
incarnation = random::New64();
}
attr.set_incarnation(incarnation);
return std::make_unique<FakeDevice>(attr, is_local);
}
}
class PackedTensorHandleTest : public ::testing::Test {
public:
PackedTensorHandleTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(CreateDevice("CPU", host_name_));
for (const char* name : device_names_) {
devices.push_back(CreateDevice("GPU", name));
}
device_mgr_ = new StaticDeviceMgr(std::move(devices));
context_ = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr_,
false, nullptr,
nullptr, nullptr,
true);
}
~PackedTensorHandleTest() override {
delete device_mgr_;
context_->Unref();
}
EagerContext* context() { return context_; }
std::vector<Device*> ListGPUDevices() const {
auto all_devices = device_mgr_->ListDevices();
return std::vector<Device*>(all_devices.begin() + 1, all_devices.end());
}
bool IsReady(TensorHandle* handle) const { return handle->IsReady(); }
Status WaitReady(TensorHandle* handle) const {
return handle->WaitReady("Test");
}
private:
const std::vector<const char*> device_names_ = {
"/job:worker/replica:0/task:0/device:GPU:0",
"/job:worker/replica:0/task:0/device:GPU:1",
"/job:worker/replica:0/task:1/device:GPU:0",
"/job:worker/replica:0/task:1/device:GPU:1"};
const char* host_name_ = "/job:worker/replica:0/task:0/device:CPU:0";
StaticDeviceMgr* device_mgr_;
EagerContext* context_;
};
TEST_F(PackedTensorHandleTest, PackedHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
DtypeAndPartialTensorShape dtype_and_shape = {DT_FLOAT, {2, 2}};
std::vector<TensorHandle*> handles;
Tensor t0(dtype, shape);
Device* d0 = ListGPUDevices().at(0);
TensorHandle* h0 =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context());
absl::Cleanup h0_cleanup = [&]() { h0->Unref(); };
h0->SetResourceHandleDtypeAndShape({dtype_and_shape});
handles.push_back(h0);
Tensor t1(dtype, shape);
Device* d1 = ListGPUDevices().at(1);
TensorHandle* h1 =
TensorHandle::CreateLocalHandle(std::move(t1), d1, d1, d1, context());
absl::Cleanup h1_cleanup = [&]() { h1->Unref(); };
h1->SetResourceHandleDtypeAndShape({dtype_and_shape});
handles.push_back(h1);
const string remote_task = "/job:worker/replica:0/task:1";
Device* d2 = ListGPUDevices().at(2);
TensorHandle* h2 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d2, context());
absl::Cleanup h2_cleanup = [&]() { h2->Unref(); };
handles.push_back(h2);
Device* d3 = ListGPUDevices().at(3);
TensorHandle* h3 = TensorHandle::CreateUnshapedRemoteHandle(
1, 0, remote_task, dtype, d3, context());
absl::Cleanup h3_cleanup = [&]() { h3->Unref(); };
handles.push_back(h3);
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
EXPECT_EQ(packed_handle->NumPackedHandles(), 4);
EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED);
EXPECT_EQ(packed_handle->dtype, dtype);
TensorShape packed_shape;
TF_ASSERT_OK(packed_handle->Shape(&packed_shape));
EXPECT_EQ(packed_shape, shape);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
TF_ASSERT_OK(
packed_handle->GetResourceHandleDtypesAndShapes(&dtypes_and_shapes));
EXPECT_EQ(dtypes_and_shapes.size(), 1);
EXPECT_EQ(dtypes_and_shapes.at(0).dtype, DT_FLOAT);
EXPECT_EQ(dtypes_and_shapes.at(0).shape.IsIdenticalTo({2, 2}), true);
CompositeDevice* device =
reinterpret_cast<CompositeDevice*>(packed_handle->device());
EXPECT_EQ(device->name(), "/job:worker/replica:0/task:0/device:COMPOSITE:0");
EXPECT_EQ(device->underlying_devices()->size(), 4);
const std::vector<TensorHandle::HandleType> expected_handle_types = {
TensorHandle::LOCAL, TensorHandle::LOCAL, TensorHandle::REMOTE,
TensorHandle::REMOTE};
for (int i = 0; i < packed_handle->NumPackedHandles(); ++i) {
TensorHandle* h = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(i, &h));
EXPECT_EQ(h->device(), ListGPUDevices().at(i));
EXPECT_EQ(h->Type(), expected_handle_types.at(i));
EXPECT_EQ(h->FullType().type_id(), TFT_UNSET);
}
EXPECT_FALSE(IsReady(packed_handle));
TF_ASSERT_OK(h2->SetRemoteShape(shape, ListGPUDevices().at(2),
context()->GetContextViewId()));
EXPECT_FALSE(IsReady(packed_handle));
TF_ASSERT_OK(h3->SetRemoteShape(shape, ListGPUDevices().at(3),
context()->GetContextViewId()));
EXPECT_TRUE(IsReady(packed_handle));
}
TEST_F(PackedTensorHandleTest, PackedSingleHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t(dtype, shape);
Device* d = ListGPUDevices().at(0);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, context());
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
std::vector<TensorHandle*> handles = {h};
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED);
EXPECT_EQ(packed_handle->dtype, dtype);
TensorShape packed_shape;
TF_ASSERT_OK(packed_handle->Shape(&packed_shape));
EXPECT_EQ(packed_shape, shape);
CompositeDevice* device =
reinterpret_cast<CompositeDevice*>(packed_handle->device());
EXPECT_EQ(device->name(), "/job:worker/replica:0/task:0/device:COMPOSITE:0");
EXPECT_EQ(device->underlying_devices()->size(), 1);
EXPECT_EQ(packed_handle->NumPackedHandles(), 1);
TensorHandle* h0 = nullptr;
TF_ASSERT_OK(packed_handle->ExtractPackedHandle(0, &h0));
EXPECT_EQ(h0->device(), d);
EXPECT_TRUE(IsReady(packed_handle));
}
TEST_F(PackedTensorHandleTest, PoisonHandle) {
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t(dtype, shape);
Device* d = ListGPUDevices().at(0);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, context());
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
std::vector<TensorHandle*> handles = {h};
TensorHandle* packed_handle = nullptr;
TF_EXPECT_OK(TensorHandle::CreatePackedHandle(std::move(handles), context(),
&packed_handle));
absl::Cleanup packed_handle_cleanup = [&]() { packed_handle->Unref(); };
TF_EXPECT_OK(WaitReady(packed_handle));
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
packed_handle->Poison(fake_failure_status, packed_handle->device());
EXPECT_THAT(WaitReady(packed_handle),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST(TensorHandle_ResourceDeviceTest, OnLocalDevice) {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr local_device_mgr(std::move(d0));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {2};
Tensor t(dtype, shape);
Device* d = local_device_mgr.ListDevices()[0];
TensorHandle* th =
TensorHandle::CreateLocalHandle(std::move(t), d, d, d, ctx);
absl::Cleanup th_cleanup = [&]() { th->Unref(); };
EXPECT_EQ(0, th->resource_remote_device_incarnation());
EXPECT_TRUE(local_device_mgr.ContainsDevice(
th->resource_device()->attributes().incarnation()));
std::unique_ptr<Device> d1(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr new_device_mgr(std::move(d1));
EXPECT_FALSE(new_device_mgr.ContainsDevice(
th->resource_device()->attributes().incarnation()));
}
TEST(TensorHandle_ResourceDeviceTest, OnRemoteDevice) {
std::unique_ptr<Device> d_local(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
StaticDeviceMgr local_device_mgr(std::move(d_local));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:worker/task:0/device:CPU:0", false));
Device* d0_ptr = d0.get();
std::unique_ptr<Device> d1(
CreateDevice("CPU", "/job:worker/task:1/device:CPU:0", false));
Device* d1_ptr = d1.get();
DynamicDeviceMgr remote_device_mgr;
std::vector<std::unique_ptr<Device>> vector_d0;
vector_d0.push_back(std::move(d0));
TF_ASSERT_OK(remote_device_mgr.AddDevices(std::move(vector_d0)));
TensorHandle* th0 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, "", DT_RESOURCE, d0_ptr, ctx);
absl::Cleanup th0_cleanup = [&]() { th0->Unref(); };
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
std::vector<std::unique_ptr<Device>> vector_d1;
vector_d1.push_back(std::move(d1));
TF_ASSERT_OK(remote_device_mgr.AddDevices(std::move(vector_d1)));
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
TensorHandle* th1 = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, "", DT_RESOURCE, d1_ptr, ctx);
absl::Cleanup th1_cleanup = [&]() { th1->Unref(); };
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th1->resource_remote_device_incarnation()));
std::vector<Device*> remove_d1{d1_ptr};
TF_ASSERT_OK(remote_device_mgr.RemoveDevices(std::move(remove_d1)));
EXPECT_FALSE(remote_device_mgr.ContainsDevice(
th1->resource_remote_device_incarnation()));
EXPECT_TRUE(remote_device_mgr.ContainsDevice(
th0->resource_remote_device_incarnation()));
}
class RemoteTensorHandleTest : public ::testing::Test {
public:
RemoteTensorHandleTest() {
std::vector<std::unique_ptr<Device>> devices;
for (const char* name : device_names_) {
devices.push_back(CreateDevice("CPU", name));
}
device_mgr_ = new StaticDeviceMgr(std::move(devices));
context_ = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr_,
false, nullptr,
nullptr, nullptr,
true);
}
~RemoteTensorHandleTest() override {
delete device_mgr_;
context_->Unref();
}
EagerContext* context() { return context_; }
std::vector<Device*> ListDevices() const {
return device_mgr_->ListDevices();
}
private:
const std::vector<const char*> device_names_ = {
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"};
StaticDeviceMgr* device_mgr_;
EagerContext* context_;
};
TEST_F(RemoteTensorHandleTest, UnknownRemoteDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
Status s;
EXPECT_EQ(h->BackingDeviceName(&s), d2->name());
TF_EXPECT_OK(s);
EXPECT_EQ(h->device(), d2);
}
TEST_F(RemoteTensorHandleTest, PoisonRemote) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
h->PoisonRemote(fake_failure_status, d1, context->GetContextViewId());
Device* d2 = device_mgr.ListDevices().at(2);
EXPECT_THAT(h->SetRemoteShapeAndDevice(shape, d1, context->GetContextViewId(),
d2->name()),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST_F(RemoteTensorHandleTest, PoisonRemoteMirror) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d2, op_id, output_num, remote_task, context));
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
h->PoisonRemote(fake_failure_status, d2, context->GetContextViewId());
EXPECT_THAT(h->SetRemoteShapeAndDevice(shape, d2, context->GetContextViewId(),
d2->name()),
StatusIs(fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST_F(RemoteTensorHandleTest, SetRemoteTensorHandleShapeTwice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d2, op_id, output_num, remote_task, context));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d2, context->GetContextViewId(), d2->name()));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d1->name()));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d1->name()));
TensorShape another_shape({1});
EXPECT_THAT(h->SetRemoteShapeAndDevice(
another_shape, d1, context->GetContextViewId(), d1->name()),
StatusIs(tensorflow::error::INTERNAL,
HasSubstr("Trying to change shape to")));
}
TEST_F(RemoteTensorHandleTest, SetRemoteMirrorShapeTwice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:1/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
const string remote_task = "/job:worker/replica:0/task:1";
Device* d1 = device_mgr.ListDevices().at(1);
TensorHandle* h = TensorHandle::CreateUnshapedRemoteHandle(
0, 0, remote_task, dtype, d1, context,
true);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
EXPECT_EQ(h->device(), d1);
Device* d2 = device_mgr.ListDevices().at(2);
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
int64_t op_id = 1;
int output_num = 2;
TF_ASSERT_OK(
h->AddUnshapedRemoteMirror(d1, op_id, output_num, remote_task, context));
TF_ASSERT_OK(h->SetRemoteShapeAndDevice(
shape, d1, context->GetContextViewId(), d2->name()));
TensorShape another_shape({1});
EXPECT_THAT(h->SetRemoteShapeAndDevice(
another_shape, d1, context->GetContextViewId(), d2->name()),
StatusIs(tensorflow::error::INTERNAL,
HasSubstr("Trying to change shape to")));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceSameDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
const Tensor* tensor_from_device;
TF_EXPECT_OK(h->TensorFromDevice(d0, &tensor_from_device));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceDifferentDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
tensorflow::Tensor tensor;
TF_EXPECT_OK(h->CopyToDevice(*context, d1, &tensor));
TF_EXPECT_OK(h->AddLocalMirror(std::move(tensor), d1));
const Tensor* tensor_from_device;
TF_EXPECT_OK(h->TensorFromDevice(d1, &tensor_from_device));
}
TEST(TensorHandle_LocalTest, TensorFromDeviceInvalidDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:worker/replica:0/task:2/device:CPU:0"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_FLOAT;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
const Tensor* tensor_from_device;
EXPECT_THAT(h->TensorFromDevice(d1, &tensor_from_device),
StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandle_ResourceShapeMirror, CreateAndCheckMirror) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:1"));
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:2"));
StaticDeviceMgr device_mgr(std::move(devices));
EagerContext* context = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr,
false, nullptr,
nullptr, nullptr,
true);
absl::Cleanup context_cleanup = [&]() { context->Unref(); };
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {};
Tensor t0(dtype, shape);
Device* d0 = device_mgr.ListDevices().at(1);
TensorHandle* h =
TensorHandle::CreateLocalHandle(std::move(t0), d0, d0, d0, context);
absl::Cleanup h_cleanup = [&]() { h->Unref(); };
Device* d1 = device_mgr.ListDevices().at(2);
int64_t op_id = 1;
int output_num = 2;
EXPECT_FALSE(h->HasResourceShapeMirror(d1, context->GetContextViewId()));
TF_EXPECT_OK(h->AddResourceShapeMirror(d1, op_id, output_num, context));
EXPECT_TRUE(h->HasResourceShapeMirror(d1, context->GetContextViewId()));
TF_EXPECT_OK(h->AddResourceShapeMirror(d1, op_id, output_num, context));
EXPECT_THAT(h->AddResourceShapeMirror(d1, op_id + 1, output_num, context),
StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandle_DeviceNameTest, OnLocalDevice) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
CreateDevice("CPU", "/job:localhost/replica:0/task:0/device:CPU:0"));
devices.push_back(
CreateDevice("GPU", "/job:localhost/replica:0/task:0/device:GPU:0"));
StaticDeviceMgr local_device_mgr(std::move(devices));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&local_device_mgr, false, nullptr, nullptr, nullptr,
true);
absl::Cleanup ctx_cleanup = [&]() { ctx->Unref(); };
Device* dcpu = local_device_mgr.ListDevices()[0];
Device* dgpu = local_device_mgr.ListDevices()[1];
tensorflow::DataType dtype = DT_RESOURCE;
TensorShape shape = {2};
Tensor tcpu(dtype, shape);
Tensor tgpu(dtype, shape);
Status s;
TensorHandle* th_cpu =
TensorHandle::CreateLocalHandle(std::move(tcpu), dcpu, dcpu, dcpu, ctx);
const char* device_name = th_cpu->DeviceName(&s);
absl::Cleanup th_cpu_cleanup = [&]() { th_cpu->Unref(); };
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_name, "CPU")) << device_name;
const char* backing_device_name = th_cpu->BackingDeviceName(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(backing_device_name, "CPU"))
<< backing_device_name;
const char* device_type = th_cpu->DeviceType(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_type, "CPU")) << device_type;
int device_id = th_cpu->DeviceId(&s);
TF_EXPECT_OK(s);
ASSERT_EQ(0, device_id) << device_id;
TensorHandle* th_gpu =
TensorHandle::CreateLocalHandle(std::move(tgpu), dgpu, dgpu, dgpu, ctx);
absl::Cleanup th_gpu_cleanup = [&]() { th_gpu->Unref(); };
device_name = th_gpu->DeviceName(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_name, "GPU")) << device_name;
backing_device_name = th_gpu->BackingDeviceName(&s);
TF_EXPECT_OK(s);
std::cout << "backing_device_name for GPU: " << backing_device_name
<< std::endl;
ASSERT_TRUE(absl::StrContains(backing_device_name, "GPU"))
<< backing_device_name;
device_type = th_gpu->DeviceType(&s);
TF_EXPECT_OK(s);
ASSERT_TRUE(absl::StrContains(device_type, "GPU")) << device_type;
device_id = th_gpu->DeviceId(&s);
TF_EXPECT_OK(s);
ASSERT_EQ(0, device_id) << device_id;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c5092579-ad27-4d89-8e94-e52defb886c3 | cpp | tensorflow/tensorflow | custom_device | tensorflow/core/common_runtime/eager/custom_device.cc | tensorflow/c/eager/custom_device_test.cc | #include "tensorflow/core/common_runtime/eager/custom_device.h"
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/eager/custom_device_op_handler.h"
namespace tensorflow {
Status CustomDeviceTensorHandle::Shape(PartialTensorShape* shape) const {
int num_dims;
TF_RETURN_IF_ERROR(NumDims(&num_dims));
std::vector<int64_t> dims(num_dims);
for (int i = 0; i < num_dims; ++i) {
TF_RETURN_IF_ERROR(Dim(i, &dims[i]));
}
return PartialTensorShape::MakePartialShape(dims.data(), num_dims, shape);
}
Status CustomDeviceTensorHandle::NumElements(int64_t* num_elements) const {
*num_elements = 1;
int num_dims;
TF_RETURN_IF_ERROR(NumDims(&num_dims));
for (int i = 0; i < num_dims; ++i) {
int64_t dim;
TF_RETURN_IF_ERROR(Dim(i, &dim));
if (dim < 0) {
return errors::InvalidArgument(
absl::StrCat("Tried to compute the number of elements of a tensor "
"representing varying shapes. ",
DebugString()));
}
*num_elements *= dim;
}
return absl::OkStatus();
}
const char* CustomDeviceTensorHandle::DeviceType(Status* status) const {
const DeviceNameUtils::ParsedName* parsed = ParsedName(status);
if (!status->ok()) {
return "";
}
return parsed->type.c_str();
}
int CustomDeviceTensorHandle::DeviceId(Status* status) const {
const DeviceNameUtils::ParsedName* parsed = ParsedName(status);
if (!status->ok()) {
return 0;
}
return parsed->id;
}
AbstractTensorInterface* CustomDeviceTensorHandle::Resolve(Status* status) {
core::RefCountPtr<ImmediateExecutionTensorHandle> copied_off(
context_->GetCustomDeviceOpHandler().CopyTensorHandleToDevice(
context_, this,
DeviceNameUtils::ParsedNameToString(context_->HostCPUParsedName())
.c_str(),
status));
if (!status->ok()) {
return nullptr;
}
return copied_off->Resolve(status);
}
const DeviceNameUtils::ParsedName* CustomDeviceTensorHandle::ParsedName(
Status* status) const {
if (!parsed_name_.has_value()) {
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullOrLocalName(device_->name(), &parsed_name)) {
*status = errors::InvalidArgument(
absl::StrCat("Invalid custom device name ", device_->name()));
return nullptr;
}
parsed_name_.emplace(std::move(parsed_name));
}
return &*parsed_name_;
}
} | #include <memory>
#include "absl/strings/match.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/custom_device_testutil.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/test.h"
TEST(CUSTOM_DEVICE, RegisterSimpleDevice) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* context = TFE_NewContext(opts, status.get());
TFE_DeleteContextOptions(opts);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
bool arrived = false;
bool executed = false;
const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
RegisterLoggingDevice(context, name, true,
&arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* hcpu = TestMatrixTensorHandle(context);
ASSERT_FALSE(arrived);
TFE_TensorHandle* hdevice =
TFE_TensorHandleCopyToDevice(hcpu, context, name, status.get());
ASSERT_TRUE(arrived);
ASSERT_FALSE(executed);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul(
MatMulOp(context, hcpu, hdevice), TFE_DeleteOp);
TFE_OpSetDevice(matmul.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* retval;
int num_retvals = 1;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
TFE_DeleteTensorHandle(retval);
TFE_DeleteTensorHandle(hcpu);
TFE_DeleteTensorHandle(hdevice);
TFE_DeleteContext(context);
}
TEST(CUSTOM_DEVICE, ResetOperation) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
TFE_ContextOptions* opts = TFE_NewContextOptions();
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts, status.get()), TFE_DeleteContext);
TFE_DeleteContextOptions(opts);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
bool arrived = false;
bool executed = false;
const char* custom_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:0";
RegisterLoggingDevice(context.get(), custom_device_name,
true, &arrived, &executed,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> reused_op(
TFE_NewOp(context.get(), "Identity", status.get()), TFE_DeleteOp);
TFE_OpReset(reused_op.get(), "Identity", custom_device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_EQ(tensorflow::string(TFE_OpGetDevice(reused_op.get(), status.get())),
tensorflow::string(custom_device_name));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpReset(reused_op.get(), "Identity",
"/job:localhost/replica:0/task:0/device:CPU:0", status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_EQ(tensorflow::string(TFE_OpGetDevice(reused_op.get(), status.get())),
tensorflow::string("/job:localhost/replica:0/task:0/device:CPU:0"));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
}
TEST(CUSTOM_DEVICE, MakeVariable) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
bool arrived = false;
bool executed = false;
const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
RegisterLoggingDevice(context.get(), name, true,
&arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(op.get(), "shape", {}, 0, status.get());
TFE_OpSetAttrString(op.get(), "container", "", 0);
TFE_OpSetAttrString(op.get(), "shared_name", "", 0);
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* var_handle = nullptr;
int num_retvals = 1;
executed = false;
TFE_Execute(op.get(), &var_handle, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
auto handle_cleaner = tensorflow::gtl::MakeCleanup(
[var_handle]() { TFE_DeleteTensorHandle(var_handle); });
std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> one(
TestScalarTensorHandle(context.get(), 111.f), TFE_DeleteTensorHandle);
op.reset(TFE_NewOp(context.get(), "AssignVariableOp", status.get()));
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
TFE_OpAddInput(op.get(), var_handle, status.get());
TFE_OpAddInput(op.get(), one.get(), status.get());
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
executed = false;
num_retvals = 0;
TFE_Execute(op.get(), nullptr, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
op.reset(TFE_NewOp(context.get(), "ReadVariableOp", status.get()));
TFE_OpAddInput(op.get(), var_handle, status.get());
TFE_OpSetDevice(op.get(), name, status.get());
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
executed = false;
num_retvals = 1;
TFE_TensorHandle* var_value = nullptr;
TFE_Execute(op.get(), &var_value, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
auto value_cleaner = tensorflow::gtl::MakeCleanup(
[var_value]() { TFE_DeleteTensorHandle(var_value); });
ASSERT_EQ(tensorflow::string(name),
tensorflow::string(
TFE_TensorHandleBackingDeviceName(var_value, status.get())));
TFE_TensorHandle* var_value_unpacked =
UnpackTensorHandle(var_value, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> resolved_value(
TFE_TensorHandleResolve(var_value_unpacked, status.get()),
TF_DeleteTensor);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_EQ(111., *static_cast<float*>(TF_TensorData(resolved_value.get())));
op.reset(TFE_NewOp(context.get(), "DestroyResourceOp", status.get()));
TFE_OpAddInput(op.get(), var_handle, status.get());
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
num_retvals = 0;
TFE_Execute(op.get(), nullptr, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
}
TEST(CUSTOM_DEVICE, AccessVariableOnCustomDevice) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
bool arrived = false;
bool executed = false;
const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
RegisterLoggingDevice(context.get(), name, false,
&arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
TFE_OpSetAttrShape(op.get(), "shape", {}, 0, status.get());
TFE_OpSetAttrString(op.get(), "container", "", 0);
TFE_OpSetAttrString(op.get(), "shared_name", "", 0);
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_TensorHandle* var_handle = nullptr;
int num_retvals = 1;
executed = false;
TFE_Execute(op.get(), &var_handle, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
auto handle_cleaner = tensorflow::gtl::MakeCleanup(
[var_handle]() { TFE_DeleteTensorHandle(var_handle); });
std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> one(
TestScalarTensorHandle(context.get(), 111.f), TFE_DeleteTensorHandle);
op.reset(TFE_NewOp(context.get(), "AssignVariableOp", status.get()));
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
TFE_OpAddInput(op.get(), var_handle, status.get());
TFE_OpAddInput(op.get(), one.get(), status.get());
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
executed = false;
num_retvals = 0;
TFE_Execute(op.get(), nullptr, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
op.reset(TFE_NewOp(context.get(), "ReadVariableOp", status.get()));
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpAddInput(op.get(), var_handle, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT);
executed = false;
num_retvals = 1;
TFE_TensorHandle* var_value = nullptr;
TFE_Execute(op.get(), &var_value, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
ASSERT_EQ(
tensorflow::string(name),
tensorflow::string(TFE_TensorHandleDeviceName(var_value, status.get())));
TFE_DeleteTensorHandle(var_value);
op.reset(TFE_NewOp(context.get(), "DestroyResourceOp", status.get()));
TFE_OpAddInput(op.get(), var_handle, status.get());
TFE_OpSetDevice(op.get(), name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
num_retvals = 0;
TFE_Execute(op.get(), nullptr, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
}
TEST(CUSTOM_DEVICE, InputBasedPlacement) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
const char* custom0 = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
const char* custom1 = "/job:localhost/replica:0/task:0/device:CUSTOM:1";
bool arrived = false;
bool executed = false;
RegisterLoggingDevice(context.get(), custom0,
false, &arrived, &executed,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
RegisterLoggingDevice(context.get(), custom1,
true, &arrived, &executed,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcpu(
TestMatrixTensorHandle(context.get()), TFE_DeleteTensorHandle);
ASSERT_FALSE(arrived);
std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcustom0(
TFE_TensorHandleCopyToDevice(hcpu.get(), context.get(), custom0,
status.get()),
TFE_DeleteTensorHandle);
ASSERT_TRUE(arrived);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
arrived = false;
std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcustom1(
TFE_TensorHandleCopyToDevice(hcpu.get(), context.get(), custom1,
status.get()),
TFE_DeleteTensorHandle);
ASSERT_TRUE(arrived);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul(
MatMulOp(context.get(), hcpu.get(), hcpu.get()), TFE_DeleteOp);
TFE_TensorHandle* retval;
int num_retvals = 1;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_DeleteTensorHandle(retval);
matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcustom0.get()));
num_retvals = 1;
executed = false;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
ASSERT_TRUE(executed);
TFE_DeleteTensorHandle(retval);
matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcustom1.get()));
num_retvals = 1;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
ASSERT_NE(TF_OK, TF_GetCode(status.get()));
ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom0));
ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom1));
matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcpu.get()));
num_retvals = 1;
executed = false;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
EXPECT_TRUE(executed);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
TFE_DeleteTensorHandle(retval);
matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcpu.get()));
TFE_OpSetDevice(matmul.get(), "/job:localhost/replica:0/task:0/device:CPU:0",
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
num_retvals = 1;
executed = false;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
EXPECT_FALSE(executed);
ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK);
matmul.reset(MatMulOp(context.get(), hcustom1.get(), hcpu.get()));
num_retvals = 1;
executed = false;
TFE_Execute(matmul.get(), &retval, &num_retvals, status.get());
EXPECT_FALSE(executed);
ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK);
}
TEST(CUSTOM_DEVICE, InvalidRegistrationError) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
bool arrived = false;
bool executed = false;
RegisterLoggingDevice(context.get(), "/device:CUSTOM:0",
true, &arrived, &executed,
status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
RegisterLoggingDevice(context.get(), name, true,
&arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get());
RegisterLoggingDevice(context.get(), name, true,
&arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_ALREADY_EXISTS)
<< TF_Message(status.get());
RegisterLoggingDevice(
context.get(), "/job:localhost/replica:0/task:0/device:CPU:0",
true, &arrived, &executed, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_ALREADY_EXISTS)
<< TF_Message(status.get());
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/custom_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/custom_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fbcfd3c-c07b-407f-91b7-5614abd60af0 | cpp | tensorflow/tensorflow | execute | tensorflow/core/tfrt/mlrt/interpreter/execute.cc | tensorflow/core/common_runtime/eager/execute_test.cc | #include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/register_span.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tsl/profiler/lib/traceme.h"
namespace mlrt {
namespace {
struct CurrentExecutionInfo {
ExecutionContext* current_context = nullptr;
ExecutionContext* ready_context = nullptr;
};
CurrentExecutionInfo& GetCurrentExecutionInfo() {
static thread_local CurrentExecutionInfo current_execution_info;
return current_execution_info;
}
void Resume(ExecutionContext& ready_context) {
auto& current_execution_info = GetCurrentExecutionInfo();
auto* current_context = current_execution_info.current_context;
if ((current_context != nullptr) &&
(current_execution_info.ready_context == nullptr) &&
(current_context->state() == ExecutionContext::State::kReturn) &&
(current_context->function_stack_size() == 1)) {
current_execution_info.ready_context = &ready_context;
} else {
auto* work_queue = ready_context.work_queue();
DCHECK(work_queue);
work_queue->AddTask([&ready_context]() { Execute(ready_context); });
}
}
}
namespace execute_internal {
void UnwindOnError(ExecutionContext& context, int64_t pc);
}
void Execute(ExecutionContext& ctx) {
auto& current_execution_info = GetCurrentExecutionInfo();
current_execution_info.ready_context = &ctx;
for (; current_execution_info.ready_context;) {
current_execution_info.current_context =
current_execution_info.ready_context;
current_execution_info.ready_context = nullptr;
auto& context = *current_execution_info.current_context;
DCHECK(!context.function_stack_.empty());
int function_stack_index = context.function_stack_.size() - 1;
FunctionContext* current_function = &context.function_stack_.back();
int64_t pc = current_function->pc_;
auto kernels = context.loaded_executable().kernels();
auto kernel_object_iter =
current_function->function_object().kernels().begin();
kernel_object_iter += pc;
KernelFrame::State kstate(current_function);
KernelFrame frame(&kstate);
for (; context.state_ == ExecutionContext::State::kRunning; ++pc) {
DCHECK(kernel_object_iter <
current_function->function_object().kernels().end());
bc::Kernel kernel_object = *kernel_object_iter;
frame.set_kernel(kernel_object);
kernels[kernel_object.code()](frame);
++kernel_object_iter;
}
current_function = &context.function_stack_[function_stack_index];
current_function->pc_ = pc;
current_execution_info.current_context = nullptr;
switch (context.state_) {
case ExecutionContext::State::kReady: {
DCHECK(current_execution_info.ready_context == nullptr);
context.state_ = ExecutionContext::State::kRunning;
if (current_function->kernel_context().reenter) {
current_function->pc_--;
}
current_execution_info.ready_context = &context;
break;
}
case ExecutionContext::State::kRunning:
LOG(FATAL) << "This cannot happen.";
break;
case ExecutionContext::State::kReturn: {
tsl::profiler::TraceMe trace_me("Execute::Return");
context.function_stack_.pop_back();
if (context.function_stack_.empty()) {
if (context.exit_handler_) {
std::move(context.exit_handler_)();
}
break;
}
DCHECK(current_execution_info.ready_context == nullptr);
context.state_ = ExecutionContext::State::kRunning;
current_execution_info.ready_context = &context;
break;
}
case ExecutionContext::State::kSuspended: {
DCHECK(current_execution_info.ready_context == nullptr);
tsl::profiler::TraceMe trace_me("Execute::Suspend");
DCHECK(context.suspend_handler_);
std::move(context.suspend_handler_)([&context]() { Resume(context); });
return;
}
case ExecutionContext::State::kError: {
DCHECK(current_execution_info.ready_context == nullptr);
tsl::profiler::TraceMe trace_me("Execute::Error");
execute_internal::UnwindOnError(context, -1);
return;
}
}
}
}
namespace execute_internal {
void UnwindOnError(ExecutionContext& context, int64_t pc) {
std::string function_name;
if (!context.function_stack_.empty()) {
function_name = context.function_stack_.back().function_object().name();
}
context.LogError(context.status());
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: start from function ", function_name,
" with stack size: ", context.function_stack_.size(), " at pc: ", pc,
" for context ", absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
while (!context.function_stack_.empty()) {
DCHECK(context.state_ == ExecutionContext::State::kError);
FunctionContext* current_function = &context.function_stack_.back();
Value context_value(&context);
if (pc == -1) {
DCHECK(context.state_ == ExecutionContext::State::kError);
++pc;
RegisterSpan input_reg_span(
current_function->function_object().input_regs(),
current_function->regs());
for (Value& reg : input_reg_span) {
reg.HandleError(context_value);
if (context.state_ != ExecutionContext::State::kError) {
DCHECK(context.state_ == ExecutionContext::State::kSuspended);
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: entering state", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
--pc;
break;
}
}
}
context.LogError(absl::InternalError(
absl::StrCat("UnwindOnError: unwinding function from ", pc, " to ",
current_function->pc_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
for (; context.state_ == ExecutionContext::State::kError &&
pc <= current_function->pc_;
++pc) {
bc::Kernel kernel = current_function->function_object().kernels()[pc];
RegisterSpan reg_span(kernel.results(), current_function->regs());
for (Value& reg : reg_span) {
reg.HandleError(context_value);
if (context.state_ != ExecutionContext::State::kError) {
DCHECK(context.state_ == ExecutionContext::State::kSuspended);
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: entering state", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
--pc;
break;
}
}
}
if (context.state_ == ExecutionContext::State::kSuspended) {
DCHECK(context.suspend_handler_)
<< "suspend_handler_ must be populated when the state is set to "
"kSuspended.";
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: suspended state ", context.state_, " for context ",
absl::Hex(reinterpret_cast<std::uintptr_t>(&context)))));
std::move(context.suspend_handler_)([&context, pc]() {
auto* work_queue = context.work_queue();
DCHECK(work_queue);
work_queue->AddTask([&context, pc]() {
context.state_ = ExecutionContext::State::kError;
UnwindOnError(context, pc);
});
});
return;
}
DCHECK(context.state_ != ExecutionContext::State::kSuspended);
pc = -1;
context.function_stack_.pop_back();
}
context.LogError(absl::InternalError(absl::StrCat(
"UnwindOnError: done for function ", function_name,
" for context: ", absl::Hex(reinterpret_cast<std::uintptr_t>(&context)),
" at state ", context.state_)));
if (context.exit_handler_) {
std::move(context.exit_handler_)();
}
}
}
} | #include "tensorflow/core/common_runtime/eager/execute.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(ExecuteTest, EagerOperationAsFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
ctx->SetRunEagerOpAsFunction(true);
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
"Mul",
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input1_tensor = test::AsScalar<int64_t>(3);
auto input1 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input1_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input1.get()));
Tensor input2_tensor = test::AsScalar<int64_t>(2);
auto input2 = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input2_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input2.get()));
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, SimpleFunctionInt32BadFullType) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr,
nullptr, nullptr,
true);
const Tensor kTwo = test::AsScalar<int32_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int32"},
{"y: int32"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT32}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT32}, {"DstT", DT_INT32}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT32}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int32_t>(3);
ASSERT_NE(ctx->HostCPUName().c_str(), nullptr);
Device* d = nullptr;
TF_ASSERT_OK(ctx->FindDeviceFromName(ctx->HostCPUName().c_str(), &d));
auto input = core::RefCountPtr<TensorHandle>(
TensorHandle::CreateLocalHandle(std::move(input_tensor), d,
nullptr, ctx));
TF_ASSERT_OK(op->AddInput(input.get()));
FullTypeDef ft;
ft.set_type_id(TFT_TENSOR);
input.get()->SetFullType(ft);
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
Status status = EagerExecute(op.get(), retvals.data(), &num_retvals);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << "Actual status: " << status;
EXPECT_TRUE(
absl::StrContains(status.message(), "TFT_TENSOR has 0 args instead of 1"))
<< "Actual: " << status.message();
ASSERT_EQ(retvals[0], nullptr);
ctx->Unref();
}
TEST(ExecuteTest, CompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
TF_ASSERT_OK(op->SetAttrBool("_XlaMustCompile", true));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(top_level_counter.Delta("CPU"), 1);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, NestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
const FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
TEST(ExecuteTest, MultipleNestedCompiledFunction) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr);
const Tensor kTwo = test::AsScalar<int64_t>(2);
const string function_name = "XTimesTwo";
const FunctionDef x_times_two = FunctionDefHelper::Define(
function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_INT64}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_INT64}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(x_times_two));
const string call_function_name = "FunctionCall";
FunctionDef function_call = FunctionDefHelper::Define(
call_function_name,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"_XlaMustCompile", true},
{"_device", "/job:localhost/replica:0/task:0/device:CPU:0"},
{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"XTimesTwo", {{"T", DT_INT64}})}}},
});
for (auto& node_def : *function_call.mutable_node_def()) {
if (node_def.op() == "StatefulPartitionedCall") {
node_def.set_device("/job:localhost/replica:0/task:0/device:CPU:0");
}
}
TF_ASSERT_OK(ctx->AddFunctionDef(function_call));
const string call_function_name2 = "FunctionCall2";
const FunctionDef function_call2 = FunctionDefHelper::Define(
call_function_name2,
{"x: int64"},
{"y: int64"},
{},
{
{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice({DT_INT64})},
{"Tout", DataTypeSlice({DT_INT64})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"FunctionCall", {{"T", DT_INT64}})}}},
});
TF_ASSERT_OK(ctx->AddFunctionDef(function_call2));
auto op = std::make_unique<EagerOperation>(ctx);
TF_ASSERT_OK(op->Reset(
call_function_name2.c_str(),
"/job:localhost/replica:0/task:0/device:CPU:0"));
Tensor input_tensor = test::AsScalar<int64_t>(3);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor,
ctx->HostCPUName().c_str()));
TF_ASSERT_OK(op->AddInput(input.get()));
monitoring::testing::CellReader<int64_t> counter_reader(
"/tensorflow/core/tf_function_compile");
monitoring::testing::CellReader<int64_t> top_level_counter(
"/tensorflow/core/tf_top_level_jit_compilation");
std::vector<TensorHandle*> retvals(1);
int num_retvals = retvals.size();
TF_ASSERT_OK(EagerExecute(op.get(), retvals.data(), &num_retvals));
EXPECT_EQ(counter_reader.Delta("CPU", "enabled"), 1);
EXPECT_EQ(counter_reader.Delta("CPU", "disabled"), 0);
EXPECT_EQ(top_level_counter.Delta("CPU"), 0);
retvals[0]->Unref();
retvals[0] = nullptr;
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/execute.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51490e3e-50c8-4a6d-a209-b93fee91cf59 | cpp | tensorflow/tensorflow | execute_node | tensorflow/core/common_runtime/eager/execute_node.cc | tensorflow/core/common_runtime/eager/execute_node_test.cc | #include "tensorflow/core/common_runtime/eager/execute_node.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
#if !defined(IS_MOBILE_PLATFORM)
bool ExecuteNodeArgs::IsRemote(EagerContext* ctx, Device* input_device,
TensorHandle* handle) {
uint64 context_view_id = ctx->GetContextViewId();
if (handle->Type() == TensorHandle::REMOTE ||
handle->HasRemoteMirror(input_device, context_view_id)) {
if (!has_remote_inputs_) {
has_remote_inputs_ = true;
}
return true;
}
return false;
}
#endif
Status ExecuteNodeArgs::InitPackedHandle(const int index, EagerContext* ctx,
Device* input_device,
TensorHandle* packed_handle) {
int num_handles = packed_handle->NumPackedHandles();
packed_args_.emplace(index,
absl::InlinedVector<TensorValue, 4UL>(num_handles));
TensorValue* packed_arg_flat = &(packed_args_[index][0]);
for (int i = 0; i < num_handles; ++i) {
TensorHandle* h = nullptr;
TF_RETURN_IF_ERROR(packed_handle->ExtractPackedHandle(i, &h));
const Status status = h->TensorValue(h->device(), &packed_arg_flat[i]);
if (!status.ok()) {
#if !defined(IS_MOBILE_PLATFORM)
if (IsRemote(ctx, input_device, h)) {
continue;
}
#endif
if (h->Type() == TensorHandle::PACKED) {
return errors::InvalidArgument(
"Nested packed handles are not supported");
}
return status;
}
}
return absl::OkStatus();
}
Status ExecuteNodeArgs::Init(
EagerContext* ctx, const absl::InlinedVector<TensorHandle*, 4UL>& op_inputs,
const core::RefCountPtr<KernelAndDevice>& kernel) {
const int n_inputs = op_inputs.size();
if (n_inputs > 0) {
TensorHandle* const* op_inputs_flat = &op_inputs[0];
TensorValue* tensor_args_flat = &tensor_args_[0];
for (int i = 0; i < n_inputs; ++i) {
TensorHandle* in = op_inputs_flat[i];
Device* d = kernel->InputDevice(i);
Status s = in->TensorValue(ctx->CanonicalDevice(d), &tensor_args_flat[i]);
if (!s.ok()) {
#if !defined(IS_MOBILE_PLATFORM)
if (IsRemote(ctx, d, in)) {
continue;
}
#endif
if (in->Type() != TensorHandle::PACKED) {
return s;
}
if (!has_packed_inputs_) {
has_packed_inputs_ = true;
}
TF_RETURN_IF_ERROR(InitPackedHandle(i, ctx, d, in));
}
}
}
#if !defined(IS_MOBILE_PLATFORM)
if (has_remote_inputs_) {
const bool is_function = kernel->IsFunction();
serialize_remote_handle_ =
[ctx, &op_inputs, is_function](
const FunctionArgIndex& index,
eager::RemoteTensorHandle* handle) -> Status {
TensorHandle* h = op_inputs[index.index];
if (op_inputs[index.index]->Type() == TensorHandle::PACKED) {
TF_RETURN_IF_ERROR(
op_inputs[index.index]->ExtractPackedHandle(index.sub_index, &h));
}
Device* device = h->device();
bool wait_until_ready = SkipRemoteHandleWaitReady() ? false : is_function;
return ctx->RemoteMgr()->SerializeRemoteTensorHandle(h, wait_until_ready,
handle, device);
};
}
#endif
return absl::OkStatus();
}
Status ExecuteNodeArgs::GetLocalArg(const FunctionArgIndex& index,
Tensor* val) const {
Status s = EagerKernelArgs::GetLocalArg(index, val);
if (s.ok()) {
return absl::OkStatus();
}
if (packed_args_.contains(index.index)) {
Tensor* arg = packed_args_.at(index.index).at(index.sub_index).tensor;
if (arg) {
*val = *arg;
return absl::OkStatus();
} else {
return errors::NotFound("Argument (", index.index, ",", index.sub_index,
") has no local tensor.");
}
} else {
return s;
}
}
} | #include "tensorflow/core/common_runtime/eager/execute_node.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestKernelAndDeviceFunc final : public KernelAndDeviceFunc {
public:
TestKernelAndDeviceFunc(std::vector<Device*> input_devices,
Device* host_cpu_device)
: KernelAndDeviceFunc(
nullptr, nullptr, {},
{}, {},
nullptr, nullptr,
host_cpu_device, "", false,
false,
false,
true,
false,
std::nullopt,
false,
Rendezvous::Factory(),
nullptr),
test_input_devices_(std::move(input_devices)) {}
Device* InputDevice(int i) const override { return test_input_devices_[i]; }
private:
std::vector<Device*> test_input_devices_;
};
TEST(ExecuteNodeTest, ExecuteNodeArgs) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
Device* device0 = device_mgr.ListDevices().at(0);
auto remote_device_mgr = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> remote_devices;
remote_devices.emplace_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:1"));
TF_ASSERT_OK(remote_device_mgr->AddDevices(std::move(remote_devices)));
Device* device1 = remote_device_mgr->ListDevices().at(0);
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0->name(), device1->name()},
0,
device_mgr.HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
auto remote_mgr = std::make_unique<eager::RemoteMgr>(
true, ctx);
TF_ASSERT_OK(ctx->InitializeRemoteMaster(
nullptr, nullptr,
nullptr, nullptr,
std::move(remote_device_mgr), {},
EagerContext::NewContextId(),
nullptr, &device_mgr, 600,
nullptr, std::move(remote_mgr)));
DataType dtype = DT_FLOAT;
Tensor t0(dtype, TensorShape({}));
t0.scalar<float>()() = {1.0f};
TensorHandle* h0 =
TensorHandle::CreateLocalHandle(std::move(t0), device0, device0, ctx);
Tensor t1(dtype, TensorShape({}));
t1.scalar<float>()() = {2.0f};
TensorHandle* h1 =
TensorHandle::CreateLocalHandle(std::move(t1), device0, device0, ctx);
TensorHandle* h2 = TensorHandle::CreateLazyRemoteHandle(
1, 0, dtype, device1, true, ctx);
TensorHandle* h3 = TensorHandle::CreateLazyRemoteHandle(
2, 1, dtype, device1, true, ctx);
TensorHandle* packed_h = nullptr;
TF_ASSERT_OK(TensorHandle::CreatePackedHandle({h1, h2}, ctx, &packed_h));
absl::InlinedVector<TensorHandle*, 4> inputs = {h0, packed_h, h3};
std::vector<Device*> input_devices;
for (auto* h : inputs) {
input_devices.push_back(h->DeviceOrHostCPU(*ctx));
}
const core::RefCountPtr<KernelAndDevice> kernel(
new TestKernelAndDeviceFunc(std::move(input_devices), device0));
ExecuteNodeArgs args(inputs.size());
TF_EXPECT_OK(args.Init(ctx, inputs, kernel));
EXPECT_TRUE(args.HasRemoteOrPackedInputs());
Tensor local0;
TF_EXPECT_OK(args.GetLocalArg(FunctionArgIndex(0), &local0));
EXPECT_EQ(local0.flat<float>().size(), 1);
EXPECT_EQ(local0.flat<float>()(0), 1.0);
Tensor local1;
TF_EXPECT_OK(args.GetLocalArg(FunctionArgIndex(1, 0), &local1));
EXPECT_EQ(local1.flat<float>().size(), 1);
EXPECT_EQ(local1.flat<float>()(0), 2.0);
eager::RemoteTensorHandle remote0;
TF_EXPECT_OK(args.GetRemoteArg(FunctionArgIndex(1, 1), &remote0));
EXPECT_EQ(remote0.op_id(), 1);
EXPECT_EQ(remote0.output_num(), 0);
eager::RemoteTensorHandle remote1;
TF_EXPECT_OK(args.GetRemoteArg(FunctionArgIndex(2), &remote1));
EXPECT_EQ(remote1.op_id(), 2);
EXPECT_EQ(remote1.output_num(), 1);
h0->Unref();
h1->Unref();
h2->Unref();
h3->Unref();
packed_h->Unref();
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2e87421-cde3-430f-bfe2-9faaf3f39e73 | cpp | tensorflow/tensorflow | placement_utils | tensorflow/core/common_runtime/eager/placement_utils.cc | tensorflow/core/common_runtime/eager/placement_utils_test.cc | #include "tensorflow/core/common_runtime/eager/placement_utils.h"
#include <variant>
#include "absl/status/status.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/eager/custom_device.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace eager {
static bool IsPinnableOp(StringPiece op_name) {
static const gtl::FlatSet<string>* unpinnable_ops = new gtl::FlatSet<string>({
"RandomUniform",
"RandomUniformInt",
"RandomStandardNormal",
"StatelessRandomUniform",
"StatelessRandomUniformInt",
"StatelessRandomUniformFullInt",
"StatelessRandomNormal",
});
return unpinnable_ops->find(string(op_name)) == unpinnable_ops->end() &&
!absl::StartsWith(op_name, "XRT");
}
static Status ValidateTensorHandleRemoteDevice(EagerContext* ctx,
int64_t device_incarnation) {
if (ctx->remote_device_mgr()->ContainsDevice(device_incarnation)) {
return absl::OkStatus();
}
return errors::InvalidArgument(
"Resource input tensor contains an invalid device. This might happen "
"when the client has connected to a different cluster, or some remote "
"workers have been restarted.");
}
bool IsColocationExempt(StringPiece op_name) {
const auto& exempt_ops = InputColocationExemptionRegistry::Global()->Get();
return exempt_ops.find(string(op_name)) != exempt_ops.end();
}
bool IsFunction(StringPiece op_name) {
const OpDef* op_def = nullptr;
Status s = OpDefForOp(string(op_name), &op_def);
if (!s.ok()) {
if (!absl::IsNotFound(s)) {
LOG(WARNING) << "Looking up OpDef failed with error: " << s;
}
return true;
}
return false;
}
Status MaybePinSmallOpsToCpu(
bool* result, StringPiece op_name,
absl::Span<ImmediateExecutionTensorHandle* const> args,
StringPiece cpu_device_name) {
if (IsFunction(op_name) || IsColocationExempt(op_name) ||
!IsPinnableOp(op_name)) {
*result = false;
return absl::OkStatus();
}
if (args.empty()) {
*result = false;
return absl::OkStatus();
}
int i = 0;
for (auto* arg : args) {
Status s;
const char* device_name = arg->DeviceName(&s);
DataType dtype = arg->DataType();
TF_RETURN_IF_ERROR(s);
DVLOG(2) << "for op " << op_name << " input " << i << " "
<< DataTypeString(dtype) << " input device = " << device_name;
if (device_name != cpu_device_name) {
*result = false;
return absl::OkStatus();
}
if (dtype != DataType::DT_INT32 && dtype != DataType::DT_INT64) {
*result = false;
return absl::OkStatus();
}
int64_t num_elements;
TF_RETURN_IF_ERROR(arg->NumElements(&num_elements));
if (num_elements > 64) {
*result = false;
return absl::OkStatus();
}
i++;
}
DVLOG(1) << "Forcing op " << op_name
<< " to be on the CPU since all input tensors have an "
"int32/int64 dtype, and are small (less than 64 elements).";
*result = true;
return absl::OkStatus();
}
Status MaybePinToResourceDevice(Device** device, const EagerOperation& op) {
if (op.colocation_exempt()) {
return absl::OkStatus();
}
EagerContext& ctx = op.EagerContext();
const absl::InlinedVector<TensorHandle*, 4>* inputs;
TF_RETURN_IF_ERROR(op.TensorHandleInputs(&inputs));
Device* op_device = op.Device() == kVariantDeviceNull
? ctx.HostCPU()
: std::get<Device*>(op.Device());
for (int i = 0; i < inputs->size(); ++i) {
TensorHandle* tensor_handle = (*inputs)[i];
if (tensor_handle->dtype == DT_RESOURCE) {
if (tensor_handle->resource_remote_device_incarnation() != 0) {
TF_RETURN_IF_ERROR(ValidateTensorHandleRemoteDevice(
&ctx, tensor_handle->resource_remote_device_incarnation()));
}
Device* resource_device = tensor_handle->resource_device();
DVLOG(2) << "for op " << op.Name() << " input " << i << " "
<< DataTypeString(tensor_handle->dtype)
<< " input device = " << resource_device->name()
<< ", op device = " << op_device->name();
if (resource_device != op_device || op.Device() == kVariantDeviceNull) {
DVLOG(1) << (resource_device != op_device ? "Changing " : "Setting ")
<< "device of operation " << op.Name() << " to "
<< resource_device->name() << " because input #" << i
<< " is a resource in this device.";
*device = resource_device;
return absl::OkStatus();
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/common_runtime/eager/placement_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/execute_node.h"
#include "tensorflow/core/platform/test.h"
#define DEVICE_CPU0 "/job:localhost/replica:0/task:0/device:CPU:0"
#define DEVICE_CPU0_TASK1 "/job:localhost/replica:0/task:1/device:CPU:0"
#define DEVICE_GPU0 "/job:localhost/replica:0/task:0/device:GPU:0"
namespace tensorflow {
namespace {
TEST(PlacementUtilsTest, IsColocationExemptFalse) {
ASSERT_FALSE(eager::IsColocationExempt("Identity"));
}
TEST(PlacementUtilsTest, IsColocationExemptTrue) {
ASSERT_TRUE(eager::IsColocationExempt("IdentityN"));
}
TEST(PlacementUtilsTest, IsFunctionTrue) {
ASSERT_TRUE(eager::IsFunction("MyFunction"));
}
TEST(PlacementUtilsTest, IsFunctionFalse) {
ASSERT_FALSE(eager::IsFunction("Identity"));
}
static Device* CreateDevice(const char* type, const char* name,
bool is_local = true) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr, bool is_local)
: Device(nullptr, attr), is_local_(is_local) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
bool IsLocal() const override { return is_local_; }
private:
const bool is_local_;
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
int64_t incarnation = random::New64();
while (incarnation == 0) {
incarnation = random::New64();
}
attr.set_incarnation(incarnation);
return new FakeDevice(attr, is_local);
}
static void CreateLocalDeviceVector(
std::vector<std::unique_ptr<Device>>& devices) {
std::unique_ptr<Device> d0(CreateDevice("CPU", DEVICE_CPU0));
devices.emplace_back(std::move(d0));
std::unique_ptr<Device> d1(CreateDevice("GPU", DEVICE_GPU0));
devices.emplace_back(std::move(d1));
}
static Device* CreateRemoteDeviceVector(
std::vector<std::unique_ptr<Device>>& devices) {
std::unique_ptr<Device> d0(CreateDevice("CPU", DEVICE_CPU0_TASK1, false));
devices.emplace_back(std::move(d0));
return devices.back().get();
}
struct MaybePinSmallOpsToCpuTestCase {
std::string test_name;
DataType dtype;
TensorShape shape;
string op_name;
const char* device;
bool expect;
};
class PlacementUtilsSmallOpsTest
: public ::testing::TestWithParam<MaybePinSmallOpsToCpuTestCase> {};
TEST_P(PlacementUtilsSmallOpsTest, TestMaybePinSmallOpsToCpu) {
const MaybePinSmallOpsToCpuTestCase& test_case = GetParam();
bool result;
std::vector<std::unique_ptr<Device>> devices;
CreateLocalDeviceVector(devices);
StaticDeviceMgr device_mgr(std::move(devices));
core::RefCountPtr<EagerContext> context;
context = core::RefCountPtr<EagerContext>(new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &device_mgr, false, nullptr, nullptr, nullptr,
true));
auto ctx = context.get();
ctx->SetRunEagerOpAsFunction(true);
std::vector<ImmediateExecutionTensorHandle*> arg;
Tensor input_tensor(test_case.dtype, test_case.shape);
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor, test_case.device));
if (test_case.op_name != "RefIdentity") {
arg.push_back(input.get());
}
TF_ASSERT_OK(eager::MaybePinSmallOpsToCpu(&result, test_case.op_name, arg,
test_case.device));
ASSERT_EQ(result, test_case.expect);
}
INSTANTIATE_TEST_SUITE_P(
MaybePinSmallOpsToCpuTests, PlacementUtilsSmallOpsTest,
::testing::ValuesIn<MaybePinSmallOpsToCpuTestCase>({
{"OkToPin", DT_INT64, {}, "Identity", DEVICE_CPU0, true},
{"NotOkToPin_Float", DT_FLOAT, {}, "Identity", DEVICE_CPU0, false},
{"NotOkToPin_Function", DT_INT64, {}, "MyFunction", DEVICE_CPU0, false},
{"NotOkToPin_NoInputs",
DT_INT64,
{},
"RefIdentity",
DEVICE_CPU0,
false},
{"NotOkToPin_NotCpu", DT_INT64, {}, "Identity", DEVICE_GPU0, false},
{"NotOkToPin_TooBig", DT_INT64, {65}, "Identity", DEVICE_CPU0, false},
}),
[](const ::testing::TestParamInfo<PlacementUtilsSmallOpsTest::ParamType>&
info) { return info.param.test_name; });
struct MaybePinToResourceDeviceTestCase {
std::string test_name;
DataType dtype;
string op_name;
const char* device;
bool expect;
};
class PlacementUtilsResourceDeviceTest
: public ::testing::TestWithParam<MaybePinToResourceDeviceTestCase> {};
TEST_P(PlacementUtilsResourceDeviceTest, TestMaybePinToResourceDevice) {
const MaybePinToResourceDeviceTestCase& test_case = GetParam();
Device* device = nullptr;
std::vector<std::unique_ptr<Device>> local_devices;
CreateLocalDeviceVector(local_devices);
StaticDeviceMgr local_device_mgr(std::move(local_devices));
core::RefCountPtr<EagerContext> context;
context = core::RefCountPtr<EagerContext>(new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_EXPLICIT,
false, &local_device_mgr, false, nullptr, nullptr, nullptr,
true));
auto ctx = context.get();
auto op = EagerOperation(ctx);
TF_ASSERT_OK(op.Reset(test_case.op_name.c_str(), DEVICE_CPU0));
Tensor input_tensor(test_case.dtype, {});
auto input = core::RefCountPtr<ImmediateExecutionTensorHandle>(
ctx->CreateLocalHandleFromTFTensor(input_tensor, test_case.device));
TF_ASSERT_OK(op.AddInput(input.get()));
ASSERT_TRUE(device == nullptr);
TF_ASSERT_OK(eager::MaybePinToResourceDevice(&device, op));
ASSERT_EQ(device != nullptr, test_case.expect);
}
INSTANTIATE_TEST_SUITE_P(
MaybePinToResourceDeviceTestCase, PlacementUtilsResourceDeviceTest,
::testing::ValuesIn<MaybePinToResourceDeviceTestCase>({
{"OkToPin", DT_RESOURCE, "Identity", DEVICE_CPU0, true},
{"NotOkToPin_NotResource", DT_FLOAT, "Identity", DEVICE_CPU0, false},
{"NotOkToPin_ColocationExempt", DT_RESOURCE, "IdentityN", DEVICE_CPU0,
false},
}),
[](const ::testing::TestParamInfo<
PlacementUtilsResourceDeviceTest::ParamType>& info) {
return info.param.test_name;
});
TEST(PlacementUtilsTest, MaybePinToResourceDevice_OtherDevice) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
Device* device0 = device_mgr.ListDevices().at(0);
auto remote_device_mgr = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> remote_devices;
CreateRemoteDeviceVector(remote_devices);
TF_ASSERT_OK(remote_device_mgr->AddDevices(std::move(remote_devices)));
Device* device1 = remote_device_mgr->ListDevices().at(0);
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0->name(), device1->name()},
0,
device_mgr.HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
auto remote_mgr = std::make_unique<eager::RemoteMgr>(
true, ctx);
TF_ASSERT_OK(ctx->InitializeRemoteMaster(
nullptr, nullptr,
nullptr, nullptr,
std::move(remote_device_mgr), {},
EagerContext::NewContextId(),
nullptr, &device_mgr, 600,
nullptr, std::move(remote_mgr)));
ASSERT_NE(ctx->remote_device_mgr(), nullptr);
auto op = EagerOperation(ctx);
TF_ASSERT_OK(op.Reset("Identity", DEVICE_CPU0));
TensorHandle* input = TensorHandle::CreateLazyRemoteHandle(
2, 1, DT_RESOURCE, device1, true,
ctx);
TF_ASSERT_OK(op.AddInput(input));
ASSERT_NE(input->resource_remote_device_incarnation(), 0);
Device* device = nullptr;
TF_ASSERT_OK(eager::MaybePinToResourceDevice(&device, op));
ASSERT_TRUE(device != nullptr);
input->Unref();
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/placement_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/placement_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80a4a321-6ecd-4fb0-b3a8-a72aa0335a14 | cpp | tensorflow/tensorflow | summary_optimizer | tensorflow/core/common_runtime/eager/summary_optimizer.cc | tensorflow/core/common_runtime/eager/summary_optimizer_test.cc | #include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
namespace tensorflow::summary_optimizer {
namespace {
constexpr char kDisableSummariesAtRuntime[] = "disable_summaries_at_runtime";
constexpr char kFlushSummaryWriter[] = "FlushSummaryWriter";
constexpr char kWriteSummary[] = "write_summary";
constexpr char kForwardFunctionName[] = "forward_function_name";
constexpr char kBackwardFunctionName[] = "backward_function_name";
constexpr char kEmptyString[] = "";
using summary_optimizer::internal::NormalizeEdgeName;
using ArgDef = OpDef::ArgDef;
void UpdateNestedFunctionName(NodeDef& ndef) {
for (auto& [k, v] : *ndef.mutable_attr()) {
if (v.has_func()) {
v.mutable_func()->set_name(StrippedFunctionName(v.func().name()));
} else if (v.list().func_size() > 0) {
for (auto& func : *v.mutable_list()->mutable_func()) {
func.set_name(StrippedFunctionName(func.name()));
}
}
}
}
void PruneDeletedInputDeps(
const absl::flat_hash_set<std::string>& nodes_to_keep, NodeDef& ndef) {
auto inputs = ndef.input();
ndef.clear_input();
for (const std::string& input : inputs) {
if (nodes_to_keep.contains(NormalizeEdgeName(input))) {
ndef.add_input(input);
}
}
}
FunctionDef StripSummary(const FunctionDef& fdef_with_summaries) {
FunctionDef fdef = fdef_with_summaries;
fdef.mutable_signature()->set_name(
StrippedFunctionName(fdef.signature().name()));
auto nodes = fdef.node_def();
fdef.clear_node_def();
absl::flat_hash_set<std::string> nodes_to_keep;
absl::c_transform(nodes, std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const NodeDef& node_def) { return node_def.name(); });
absl::c_transform(fdef.signature().input_arg(),
std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const ArgDef& input_arg) { return input_arg.name(); });
for (const NodeDef& ndef : nodes) {
if (ndef.op() == kFlushSummaryWriter) nodes_to_keep.erase(ndef.name());
for (const auto& substr : absl::StrSplit(ndef.name(), '/')) {
if (substr == kWriteSummary) {
nodes_to_keep.erase(ndef.name());
break;
}
}
}
for (NodeDef& ndef : nodes) {
if (!nodes_to_keep.contains(ndef.name())) continue;
PruneDeletedInputDeps(nodes_to_keep, ndef);
UpdateNestedFunctionName(ndef);
*fdef.add_node_def() = std::move(ndef);
}
auto control_ret = fdef.control_ret();
fdef.clear_control_ret();
for (const auto& [signature_node_name, node_name] : control_ret) {
if (!nodes_to_keep.contains(NormalizeEdgeName(node_name))) continue;
fdef.mutable_control_ret()->insert({signature_node_name, node_name});
}
auto control_outputs = fdef.signature().control_output();
fdef.mutable_signature()->clear_control_output();
for (const std::string& control_output : control_outputs) {
if (!fdef.control_ret().contains(control_output)) continue;
fdef.mutable_signature()->add_control_output(control_output);
}
for (auto& [k, v] : *fdef.mutable_attr()) {
if (k == kForwardFunctionName || k == kBackwardFunctionName) {
v.set_s(StrippedFunctionName(v.s()));
}
if (k == kDisableSummariesAtRuntime) v.clear_list();
}
return fdef;
}
}
namespace internal {
std::string NormalizeEdgeName(absl::string_view name) {
std::vector<std::string> edge_name =
absl::StrSplit(name, absl::ByAnyChar("^:"));
return edge_name[0].empty() ? edge_name[1] : edge_name[0];
}
}
std::pair<absl::string_view, bool> GetDisableSummariesInputArg(
const FunctionDef& fdef) {
auto it = fdef.attr().find(kDisableSummariesAtRuntime);
if (it == fdef.attr().end()) return {kEmptyString, false};
if (it->second.has_list()) {
const auto& list = it->second.list();
if (list.s_size() == 1 && list.b_size() == 1) {
return {list.s(0), list.b(0)};
}
}
return {kEmptyString, false};
}
std::vector<FunctionDef> StripSummaries(const FunctionDef& fdef,
const FunctionLibraryDefinition& flib) {
std::vector<FunctionDef> results;
if (GetDisableSummariesInputArg(fdef).first.empty()) return results;
results.push_back(StripSummary(fdef));
FunctionLibraryDefinition reachable_library = flib.ReachableDefinitions(fdef);
for (const std::string& fname : reachable_library.ListFunctionNames()) {
auto* nested_fdef = flib.Find(fname);
if (nested_fdef == nullptr) continue;
results.push_back(StripSummary(*nested_fdef));
}
return results;
}
std::string StrippedFunctionName(absl::string_view fname) {
return absl::StrCat(fname, "__instance__no_summaries");
}
} | #include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <algorithm>
#include <string>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::tensorflow::summary_optimizer::GetDisableSummariesInputArg;
using ::tensorflow::summary_optimizer::StrippedFunctionName;
using ::tensorflow::summary_optimizer::StripSummaries;
using ::tensorflow::summary_optimizer::internal::NormalizeEdgeName;
using ::tsl::protobuf::TextFormat;
using ::tsl::protobuf::util::MessageDifferencer;
template <typename T>
void CompareProto(const T& expected, const std::string& text_proto) {
T proto;
ASSERT_TRUE(TextFormat::ParseFromString(text_proto, &proto));
MessageDifferencer differencer;
EXPECT_TRUE(differencer.Compare(expected, proto));
}
TEST(SummaryOptimizerInternal, NormalizesEdgeName) {
EXPECT_EQ(NormalizeEdgeName("include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary:0"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary/identity:0"),
"include_summary/identity");
}
TEST(SummaryOptimizer, GetsDisableSummariesInputArg) {
FunctionDef fdef;
auto input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "");
EXPECT_FALSE(input_arg.second);
AttrValue attr_val;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
list { s: "remove_summary" b: true }
)pb",
&attr_val));
fdef.mutable_attr()->insert({"disable_summaries_at_runtime", attr_val});
input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "remove_summary");
EXPECT_TRUE(input_arg.second);
}
TEST(SummaryOptimizer, StripsSummaries) {
FunctionDef fdef;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
signature {
name: "train" # Function name should be updated.
input_arg: { name: "include_summaries" }
control_output: "out_pruned" # Control output should be pruned
# because it was pruned from
# `control_ret`.
control_output: "out"
}
node_def { name: "x" }
node_def {
name: "write_summary/Identity"
} # Node should get pruned based on name.
node_def {
name: "Identity/x"
input: "write_summary/Identity" # Summary scope input should get
# pruned.
input: "x"
}
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2" }
func: { name: "nested_fn3" }
}
}
}
}
node_def {
op: "FlushSummaryWriter"
} # Node should get pruned based on op.
control_ret {
key: "out_pruned",
value: "write_summary/Identity:0"
} # Control return should get pruned because node was pruned.
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name"
value: {
s: "__inference_train_1"
} # Forward function name should be updated.
}
attr {
key: "backward_function_name"
value: {
s: "__inference_train_2"
} # Backward function name should be updated.
}
attr {
key: "disable_summaries_at_runtime"
value: { list { s: "include_summaries" b: false } }
}
)pb",
&fdef));
FunctionDef nested_fdef;
nested_fdef.mutable_signature()->set_name("nested_fn");
FunctionDef nested_fdef2;
nested_fdef2.mutable_signature()->set_name("nested_fn2");
FunctionDef nested_fdef3;
nested_fdef3.mutable_signature()->set_name("nested_fn3");
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef2));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef3));
std::vector<FunctionDef> stripped_fdefs = StripSummaries(fdef, flib);
ASSERT_EQ(stripped_fdefs.size(), 4);
struct {
bool operator()(const FunctionDef& lhs, const FunctionDef& rhs) const {
return lhs.signature().name() > rhs.signature().name();
}
} fdefOrdering;
std::sort(stripped_fdefs.begin(), stripped_fdefs.end(), fdefOrdering);
CompareProto(stripped_fdefs[0], R"pb(
signature {
name: "train__instance__no_summaries"
input_arg: { name: "include_summaries" }
control_output: "out"
}
node_def { name: "x" }
node_def { name: "Identity/x" input: "x" }
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn__instance__no_summaries" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2__instance__no_summaries" }
func: { name: "nested_fn3__instance__no_summaries" }
}
}
}
}
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name",
value: { s: "__inference_train_1__instance__no_summaries" }
}
attr {
key: "backward_function_name",
value: { s: "__inference_train_2__instance__no_summaries" }
}
attr {
key: "disable_summaries_at_runtime"
value {}
}
)pb");
CompareProto(stripped_fdefs[1], R"pb(
signature { name: "nested_fn__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[2], R"pb(
signature { name: "nested_fn3__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[3], R"pb(
signature { name: "nested_fn2__instance__no_summaries" }
)pb");
}
TEST(SummaryOptimizer, DoesNotStripSummariesWhenNotEnabled) {
FunctionDef fdef;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
signature { name: "train" }
attr {
key: "disable_summaries_at_runtime",
value: {}
}
)pb",
&fdef));
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
fdef.clear_attr();
TF_ASSERT_OK(flib.RemoveFunction("train"));
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
}
TEST(SummaryOptimizer, GeneratesNewFunctionName) {
EXPECT_EQ(StrippedFunctionName("train"), "train__instance__no_summaries");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/summary_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/summary_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a5d48b6-b23a-4473-9c78-e527da0dec6b | cpp | tensorflow/tensorflow | tensor_handle_data | tensorflow/core/common_runtime/eager/tensor_handle_data.cc | tensorflow/core/common_runtime/eager/tensor_handle_data_test.cc | #include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include <utility>
#include <variant>
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
Status LocalTensorHandleData::Tensor(const tensorflow::Tensor** t) const {
TF_RETURN_IF_ERROR(WaitReady("Tensor"));
*t = &tensor_;
return absl::OkStatus();
}
Status LocalTensorHandleData::TensorValue(tensorflow::TensorValue* t) {
TF_RETURN_IF_ERROR(WaitReady("TensorValue"));
tensorflow::Tensor& tensor = tensor_;
*t = tensorflow::TensorValue(&tensor);
return absl::OkStatus();
}
Status LocalTensorHandleData::Shape(TensorShape* shape) const {
TF_RETURN_IF_ERROR(WaitReady("Shape"));
*shape = tensor_.shape();
return absl::OkStatus();
}
Status LocalTensorHandleData::NumDims(int* num_dims) const {
TF_RETURN_IF_ERROR(WaitReady("NumDims"));
*num_dims = tensor_.dims();
return absl::OkStatus();
}
Status LocalTensorHandleData::Dim(int dim_index, int64_t* dim) const {
TF_RETURN_IF_ERROR(WaitReady("Dim"));
*dim = tensor_.dim_size(dim_index);
return absl::OkStatus();
}
Status LocalTensorHandleData::NumElements(int64_t* num_elements) const {
TF_RETURN_IF_ERROR(WaitReady("NumElements"));
*num_elements = tensor_.NumElements();
return absl::OkStatus();
}
Status LocalTensorHandleData::Unprotect() {
if (!IsReady()) {
return errors::Internal("Cannot unprotect a non-ready tensor");
}
forwarding_protection_tensor_ = tensorflow::Tensor();
return absl::OkStatus();
}
Status LocalTensorHandleData::SetTensor(tensorflow::Tensor&& t) {
DCHECK(!IsReady()) << "SetTensor is only called on non-ready handles.";
tensor_ = std::move(t);
forwarding_protection_tensor_ = tensor_;
auto& state = std::get<BlockingControl>(ctrl_);
state.SetReady();
return absl::OkStatus();
}
string LocalTensorHandleData::DebugString() const {
if (IsReady()) {
return tensor_.DeviceSafeDebugString();
} else {
return "LocalTensorHandleData";
}
}
void LocalTensorHandleData::BlockingControl::SetReady() {
mutex_lock l(mu_);
is_ready_ = true;
}
Status LocalTensorHandleData::BlockingControl::WaitReady(
const char* caller) const {
tf_shared_lock l(mu_);
if (!is_ready_) {
tsl::profiler::TraceMe activity(
[caller] { return absl::StrCat(caller, " WaitReady"); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << "WaitReady: " << caller << " " << this;
mu_.Await(Condition(&is_ready_));
}
return is_poisoned_;
}
void LocalTensorHandleData::BlockingControl::Poison(Status status) {
mutex_lock l(mu_);
if (is_ready_) {
LOG(ERROR) << "Poison can only be called on non-ready handle: " << this;
return;
}
is_poisoned_ = status;
is_ready_ = true;
}
} | #include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include <utility>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TensorHandleData, TensorAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
const tensorflow::Tensor* ret_tensor;
TF_EXPECT_OK(handle_data.Tensor(&ret_tensor));
EXPECT_EQ(ret_tensor->dtype(), DT_UINT16);
EXPECT_EQ(ret_tensor->dims(), 2);
}
TEST(TensorHandleData, TensorValueAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
tensorflow::TensorValue tensor_value;
TF_EXPECT_OK(handle_data.TensorValue(&tensor_value));
EXPECT_EQ(tensor_value.dtype(), DT_UINT16);
}
TEST(TensorHandleData, TensorShapeAttribute) {
TensorShape shape({2, 2});
Tensor t(DT_UINT16, shape);
LocalTensorHandleData handle_data(std::move(t));
tensorflow::TensorShape tensor_shape;
TF_EXPECT_OK(handle_data.Shape(&tensor_shape));
EXPECT_EQ(tensor_shape, shape);
}
TEST(TensorHandleData, NumDimsAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
int num_dims;
TF_EXPECT_OK(handle_data.NumDims(&num_dims));
EXPECT_EQ(num_dims, 2);
}
TEST(TensorHandleData, DimAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
int64_t dim;
TF_EXPECT_OK(handle_data.Dim(1, &dim));
EXPECT_EQ(dim, 3);
}
TEST(TensorHandleData, NumElementsAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
int64_t num_elements;
TF_EXPECT_OK(handle_data.NumElements(&num_elements));
EXPECT_EQ(num_elements, 6);
}
TEST(TensorHandleData, UnprotectReady) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
EXPECT_TRUE(handle_data.IsReady());
TF_EXPECT_OK(handle_data.Unprotect());
}
TEST(TensorHandleData, UnprotectNotReady) {
LocalTensorHandleData handle_data;
EXPECT_FALSE(handle_data.IsReady());
EXPECT_THAT(handle_data.Unprotect(),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandleData, DebugString) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
EXPECT_THAT(handle_data.DebugString(),
::testing::HasSubstr("Tensor<type: uint16 shape: [2,3]>"));
}
TEST(TensorHandleData, NonBlockingControlPoisonHandle) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
TF_EXPECT_OK(handle_data.IsPoisoned());
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
handle_data.Poison(fake_failure_status);
TF_EXPECT_OK(handle_data.IsPoisoned());
}
TEST(TensorHandleData, BlockingControlPoisonHandle) {
LocalTensorHandleData handle_data;
TF_EXPECT_OK(handle_data.IsPoisoned());
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
handle_data.Poison(fake_failure_status);
EXPECT_THAT(handle_data.IsPoisoned(),
tensorflow::testing::StatusIs(
fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST(TensorHandleData, BlockingControlSetTensor) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data;
TF_EXPECT_OK(handle_data.SetTensor(std::move(t)));
int64_t num_elements;
TF_EXPECT_OK(handle_data.NumElements(&num_elements));
EXPECT_EQ(num_elements, 6);
}
TEST(TensorHandleData, BlockingControlNotReadyDebugString) {
LocalTensorHandleData handle_data;
EXPECT_THAT(handle_data.DebugString(),
::testing::HasSubstr("LocalTensorHandleData"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_data.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c83ba0db-1096-4396-a51b-56cd13874e1c | cpp | tensorflow/tensorflow | attr_builder | tensorflow/core/common_runtime/eager/attr_builder.cc | tensorflow/core/common_runtime/eager/attr_builder_test.cc | #include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include <memory>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
namespace tensorflow {
namespace {
mutex g_op_name_to_attr_type_map_lock(LINKER_INITIALIZED);
tensorflow::gtl::FlatMap<string, const AttrTypeMap*>* OpNameToAttrTypeMap() {
static auto* const m =
new tensorflow::gtl::FlatMap<string, const AttrTypeMap*>;
return m;
}
const uint32 kIsList = 1U << 31;
AttrTypeMap* DefaultFunctionAttrTypeMap() {
AttrTypeMap* map = new AttrTypeMap();
(*map)["executor_type"] = TF_ATTR_STRING;
(*map)["config_proto"] = TF_ATTR_STRING;
return map;
}
const AttrTypeMap* GetDefaultFunctionAttrTypeMap() {
static const AttrTypeMap* map = DefaultFunctionAttrTypeMap();
return map;
}
}
Status OpDefForOp(const string& op_name, const OpDef** op_def) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = OpRegistry::Global()->LookUp(op_name, &op_reg_data);
if (s.ok()) {
*op_def = &op_reg_data->op_def;
}
return s;
}
Status AttrTypeMapForOp(const char* op_name, const AttrTypeMap** out,
bool* is_function) {
{
tf_shared_lock l(g_op_name_to_attr_type_map_lock);
*is_function = false;
*out = gtl::FindPtrOrNull(*OpNameToAttrTypeMap(), op_name);
if (*out != nullptr) return absl::OkStatus();
}
mutex_lock l(g_op_name_to_attr_type_map_lock);
*out = gtl::FindPtrOrNull(*OpNameToAttrTypeMap(), op_name);
if (*out != nullptr) return absl::OkStatus();
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name, &op_def);
if (absl::IsNotFound(s)) {
*out = GetDefaultFunctionAttrTypeMap();
*is_function = true;
return absl::OkStatus();
} else if (!s.ok()) {
return s;
}
std::unique_ptr<AttrTypeMap> m(new AttrTypeMap);
for (const auto& attr : op_def->attr()) {
string type = attr.type();
const bool is_list = (type.length() > 6 && type.compare(0, 4, "list") == 0);
if (is_list) {
type = type.substr(5, type.length() - 6);
}
uint32 t = is_list ? kIsList : 0;
if (type == "string") {
t |= TF_ATTR_STRING;
} else if (type == "int") {
t |= TF_ATTR_INT;
} else if (type == "float") {
t |= TF_ATTR_FLOAT;
} else if (type == "bool") {
t |= TF_ATTR_BOOL;
} else if (type == "type") {
t |= TF_ATTR_TYPE;
} else if (type == "shape") {
t |= TF_ATTR_SHAPE;
} else if (type == "tensor") {
t |= TF_ATTR_TENSOR;
} else if (type == "func") {
t |= TF_ATTR_FUNC;
} else {
return errors::Unimplemented(
"TODO(agarwal): Enable support for ops with attributes of type '",
type, "'");
}
gtl::InsertIfNotPresent(m.get(), attr.name(), t);
}
*out = m.get();
auto r = OpNameToAttrTypeMap()->emplace(op_name, m.release());
DCHECK(r.second) << "AttrTypeMap already exists for " << op_name;
return absl::OkStatus();
}
#define DEFINE_GET_ATTR(TYPE, FIELD, ATTR_TYPE) \
template <> \
Status AttrBuilder::Get(StringPiece attr_name, TYPE* value) const { \
auto it = encoded_attrs_.find(string(attr_name)); \
if (it == encoded_attrs_.end()) { \
return errors::NotFound("No attr named '", attr_name, \
"' found in AttrBuilder for ", op_name_); \
} \
attr_tmp_.ParseFromString(it->second); \
TF_RETURN_IF_ERROR(AttrValueHasType(attr_tmp_, ATTR_TYPE)); \
*value = attr_tmp_.FIELD(); \
return OkStatus(); \
}
DEFINE_GET_ATTR(float, f, "float");
DEFINE_GET_ATTR(int, i, "int");
DEFINE_GET_ATTR(int64_t, i, "int");
DEFINE_GET_ATTR(bool, b, "bool");
DEFINE_GET_ATTR(tensorflow::DataType, type, "type");
#undef DEFINE_GET_ATTR
template <>
Status AttrBuilder::Get(StringPiece attr_name,
absl::InlinedVector<DataType, 4>* value) const {
auto it = encoded_attrs_.find(string(attr_name));
if (it == encoded_attrs_.end()) {
return errors::NotFound("No attr named '", attr_name,
"' found in AttrBuilder for ", op_name_);
}
attr_tmp_.ParseFromString(it->second);
TF_RETURN_IF_ERROR(AttrValueHasType(attr_tmp_, "list(type)"));
for (size_t i = 0; i < attr_tmp_.list().type_size(); i++) {
value->push_back(attr_tmp_.list().type(i));
}
return absl::OkStatus();
}
AttrBuilder& AttrBuilder::NumInputs(int n) {
num_inputs_ = n;
node_def_finalized_ = false;
return *this;
}
void AttrBuilder::FillAttrValueMap(AttrValueMap* m) const {
for (auto& entry : encoded_attrs_) {
attr_tmp_.ParseFromString(entry.second);
m->insert(AttrValueMap::value_type(entry.first, attr_tmp_));
}
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name().c_str(), &op_def);
if (!s.ok()) return;
DCHECK(op_def);
for (const auto& attr_def : op_def->attr()) {
if (attr_def.has_default_value() && !m->count(attr_def.name())) {
SetInAttrValueMap(m, attr_def.name(), attr_def.default_value());
}
}
}
namespace {
bool ValueMatchesDefault(const OpDef* op_def, const string& attr_name,
const AttrValue& attr_value) {
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.name() == attr_name && attr_def.has_default_value() &&
AreAttrValuesEqual(attr_def.default_value(), attr_value)) {
return true;
}
}
return false;
}
}
void AttrBuilder::FillAttrValueMapWithoutDefaults(AttrValueMap* m) const {
const OpDef* op_def = nullptr;
Status s = OpDefForOp(op_name().c_str(), &op_def);
for (auto& entry : encoded_attrs_) {
attr_tmp_.ParseFromString(entry.second);
if (!s.ok() || !ValueMatchesDefault(op_def, entry.first, attr_tmp_)) {
m->insert(AttrValueMap::value_type(entry.first, attr_tmp_));
}
}
}
void AttrBuilder::AddAttrIfNotPresent(StringPiece attr_name,
const AttrValue& value) {
encoded_attrs_.emplace(string(attr_name), value.SerializeAsString());
}
const NodeDef& AttrBuilder::BuildNodeDef() {
if (node_def_finalized_) return node_def_;
node_def_.Clear();
node_def_.set_name(op_name_);
node_def_.set_op(op_name_);
for (int i = 0; i < num_inputs_; ++i) {
node_def_.add_input("dummy_input");
}
FillAttrValueMap(node_def_.mutable_attr());
node_def_finalized_ = true;
return node_def_;
}
void AttrBuilder::CopyAttributes(const AttrBuilder& other) {
encoded_attrs_.insert(other.encoded_attrs_.begin(),
other.encoded_attrs_.end());
}
Status AttrTypeByName(const AttrTypeMap& m, const string& attr_name,
TF_AttrType* out, unsigned char* is_list) {
auto* t = gtl::FindOrNull(m, attr_name);
if (t == nullptr) {
return errors::InvalidArgument("Attribute '", attr_name,
"' does not exist for this operation");
}
*out = static_cast<TF_AttrType>(*t & ~kIsList);
if (*t & kIsList) {
*is_list = 1;
} else {
*is_list = 0;
}
return absl::OkStatus();
}
namespace {
void CombineUnordered(const tensorflow::Fprint128& a,
tensorflow::Fprint128* b) {
b->low64 += a.low64;
b->high64 += a.high64;
}
inline tensorflow::Fprint128 CacheKeyHelper(StringPiece s,
const tensorflow::Fprint128& b) {
tensorflow::Fprint128 a = tensorflow::Fingerprint128(s);
return FingerprintCat128(a, b);
}
inline tensorflow::Fprint128 CacheKeyHelper(StringPiece s, uint64 b) {
return CacheKeyHelper(s, {b, b});
}
}
tensorflow::Fprint128 AttrBuilder::CacheKey(const StringPiece device) {
if (!cached_cache_key_ || device != device_for_cached_cache_key_) {
cached_cache_key_ = BuildCacheKeyForDevice(device);
device_for_cached_cache_key_ = string(device);
}
return *cached_cache_key_;
}
tensorflow::Fprint128 AttrBuilder::BuildCacheKeyForDevice(
const StringPiece device) const {
tensorflow::Fprint128 f = tensorflow::Fingerprint128(op_name());
f = tsl::FingerprintCat128(f, tensorflow::Fingerprint128(device));
for (const auto& p : encoded_attrs_) {
CombineUnordered(
CacheKeyHelper(p.first, tensorflow::Fingerprint128(p.second)), &f);
}
return f;
}
void AttrBuilder::GetNameAttrList(
tensorflow::NameAttrList* name_and_attrs) const {
FillAttrValueMap(name_and_attrs->mutable_attr());
name_and_attrs->set_name(op_name());
}
Status AttrBuilder::GetTypeList(
absl::string_view attr_name,
absl::InlinedVector<DataType, 4>* type_list) const {
return Get(attr_name, type_list);
}
bool AttrBuilder::GetInt(absl::string_view attr_name, int64_t* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetFloat(absl::string_view attr_name, float* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetBool(absl::string_view attr_name, bool* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
bool AttrBuilder::GetType(absl::string_view attr_name,
tensorflow::DataType* result) const {
Status s = Get(attr_name, result);
return s.ok();
}
} | #include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include <memory>
#include <vector>
#include "tensorflow/c/c_api.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(AttrTypeMap, Lookup) {
const AttrTypeMap* m = nullptr;
bool is_function = false;
Status s = AttrTypeMapForOp("SomeFunctionName", &m, &is_function);
EXPECT_TRUE(s.ok());
EXPECT_TRUE(is_function);
ASSERT_NE(m->end(), m->find("executor_type"));
EXPECT_EQ(TF_ATTR_STRING, m->find("executor_type")->second);
ASSERT_NE(m->end(), m->find("config_proto"));
EXPECT_EQ(TF_ATTR_STRING, m->find("config_proto")->second);
is_function = true;
s = AttrTypeMapForOp("MatMul", &m, &is_function);
EXPECT_FALSE(is_function);
ASSERT_TRUE(s.ok()) << s;
TF_AttrType t;
unsigned char is_list = 1;
s = AttrTypeByName(*m, "ThisAttribyteCannotPossiblyExist", &t, &is_list);
EXPECT_FALSE(s.ok());
EXPECT_NE(is_list, 0);
s = AttrTypeByName(*m, "transpose_a", &t, &is_list);
ASSERT_TRUE(s.ok()) << s;
EXPECT_EQ(TF_ATTR_BOOL, t);
EXPECT_EQ(is_list, 0);
s = AttrTypeMapForOp("Squeeze", &m, &is_function);
ASSERT_TRUE(s.ok()) << s;
s = AttrTypeByName(*m, "squeeze_dims", &t, &is_list);
ASSERT_TRUE(s.ok()) << s;
EXPECT_EQ(TF_ATTR_INT, t);
EXPECT_NE(is_list, 0);
}
TEST(AttrTypeMap, CacheKey) {
AttrBuilder a("op_name");
a.NumInputs(2);
a.Set("T", TF_FLOAT);
tensorflow::Fprint128 cache_key = a.CacheKey("cpu:0");
ASSERT_FALSE(cache_key == a.CacheKey("cpu:1"));
ASSERT_TRUE(cache_key == a.CacheKey("cpu:0"));
a.Set("x", 1.0);
ASSERT_FALSE(cache_key == a.CacheKey("cpu:0"));
}
string ToString(const AttrValueMap& m) {
std::vector<string> strs;
for (const auto& e : m) {
strs.push_back(absl::StrCat(e.first, " -> ", e.second.DebugString()));
}
return absl::StrJoin(strs, "\n");
}
TEST(AttrBuilder, FillAttrValueMapWithoutDefaults_MatMul) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
AttrValueMap m;
a.FillAttrValueMapWithoutDefaults(&m);
ASSERT_EQ(1, m.size()) << ToString(m);
ASSERT_EQ(true, m["transpose_a"].b()) << ToString(m);
}
TEST(AttrBuilder, FillAttrValueMapWithoutDefaults_UnknownOp) {
AttrBuilder a("SomeUnknownOp");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
AttrValueMap m;
a.FillAttrValueMapWithoutDefaults(&m);
ASSERT_EQ(2, m.size()) << ToString(m);
ASSERT_EQ(true, m["transpose_a"].b()) << ToString(m);
ASSERT_EQ(false, m["transpose_b"].b()) << ToString(m);
}
TEST(AttrBuilder, GetTypeAndNumber) {
AttrBuilder a("Concat");
a.Set("T", DT_FLOAT);
a.Set("N", 2);
DataType type;
ASSERT_TRUE(a.GetType("T", &type));
ASSERT_EQ(DT_FLOAT, type);
int64_t num;
ASSERT_TRUE(a.GetInt("N", &num));
ASSERT_EQ(2, num);
}
TEST(AttrBuilder, GetTypeList) {
AttrBuilder a("IdentityN");
a.Set("T", absl::Span<const DataType>({DT_FLOAT, DT_INT64}));
absl::InlinedVector<DataType, 4> type_list;
Status s = a.GetTypeList("T", &type_list);
ASSERT_TRUE(s.ok()) << s;
ASSERT_EQ(2, type_list.size()) << type_list.size();
ASSERT_EQ(DT_FLOAT, type_list[0]) << type_list[0];
ASSERT_EQ(DT_INT64, type_list[1]) << type_list[1];
}
TEST(AttrBuilder, BuildNodeDef) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
a.NumInputs(2);
const NodeDef& node_def = a.BuildNodeDef();
auto attrs = node_def.attr();
EXPECT_EQ(node_def.name(), "MatMul");
ASSERT_NE(attrs.find("transpose_a"), attrs.end());
EXPECT_EQ(attrs.find("transpose_a")->second.b(), true);
ASSERT_NE(attrs.find("transpose_b"), attrs.end());
EXPECT_EQ(attrs.find("transpose_b")->second.b(), false);
EXPECT_EQ(node_def.input_size(), 2);
}
TEST(AttrBuilder, BuildNodeDef_Modified) {
AttrBuilder a("MatMul");
a.Set("transpose_a", true);
a.Set("transpose_b", false);
a.Set("grad_x", true);
a.Set("grad_y", false);
a.NumInputs(2);
const NodeDef& node_def = a.BuildNodeDef();
EXPECT_EQ(node_def.attr().size(), 6);
a.Set("new_attr", 15);
a.NumInputs(3);
const NodeDef& node_def2 = a.BuildNodeDef();
auto attrs = node_def2.attr();
EXPECT_EQ(attrs.size(), 7);
ASSERT_NE(attrs.find("transpose_a"), attrs.end());
EXPECT_EQ(attrs.find("transpose_a")->second.b(), true);
ASSERT_NE(attrs.find("transpose_b"), attrs.end());
EXPECT_EQ(attrs.find("transpose_b")->second.b(), false);
ASSERT_NE(attrs.find("grad_x"), attrs.end());
EXPECT_EQ(attrs.find("grad_x")->second.b(), true);
ASSERT_NE(attrs.find("grad_y"), attrs.end());
EXPECT_EQ(attrs.find("grad_y")->second.b(), false);
ASSERT_NE(attrs.find("new_attr"), attrs.end());
EXPECT_EQ(attrs.find("new_attr")->second.i(), 15);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/attr_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/attr_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7801b87e-4f22-4f70-8c3c-dc16e2b87f00 | cpp | tensorflow/tensorflow | eager_operation | tensorflow/core/common_runtime/eager/eager_operation.cc | tensorflow/core/common_runtime/eager/eager_operation_test.cc | #include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/common_runtime/eager/custom_device.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/platform/casts.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
namespace tensorflow {
void EagerOperation::Clear() {
for (ImmediateExecutionTensorHandle* h : inputs_) {
h->Unref();
}
inputs_.clear();
custom_device_tensor_handles_count_ = 0;
ClearInferenceState();
}
Status EagerOperation::SetAttrValue(const char* attr_name,
const AttrValue& value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrString(const char* attr_name, const char* data,
size_t length) {
MutableAttrs()->Set(attr_name, StringPiece(data, length));
return absl::OkStatus();
}
Status EagerOperation::SetAttrInt(const char* attr_name, int64_t value) {
MutableAttrs()->Set(attr_name, static_cast<int64_t>(value));
return absl::OkStatus();
}
Status EagerOperation::SetAttrFloat(const char* attr_name, float value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrBool(const char* attr_name, bool value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrType(const char* attr_name, DataType value) {
MutableAttrs()->Set(attr_name, value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrShape(const char* attr_name, const int64_t* dims,
const int num_dims) {
if (num_dims > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Value specified for `", attr_name, "` has ",
num_dims,
" dimensions which is over the limit of ",
TensorShape::MaxDimensions(), ".");
}
TensorShapeProto proto;
if (num_dims < 0) {
proto.set_unknown_rank(true);
} else {
for (int d = 0; d < num_dims; ++d) {
proto.add_dim()->set_size(dims[d]);
}
}
MutableAttrs()->Set(attr_name, proto);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunction(const char* attr_name,
const AbstractOperation* value) {
AttrValue attr_value;
NameAttrList* func = attr_value.mutable_func();
func->set_name(value->Name());
auto* value_operation = down_cast<const EagerOperation*>(value);
value_operation->Attrs().FillAttrValueMap(func->mutable_attr());
MutableAttrs()->Set(attr_name, attr_value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunctionName(const char* attr_name,
const char* data, size_t length) {
AttrValue attr_value;
NameAttrList* func = attr_value.mutable_func();
func->set_name(data, length);
MutableAttrs()->Set(attr_name, attr_value);
return absl::OkStatus();
}
Status EagerOperation::SetAttrTensor(const char* attr_name,
AbstractTensorInterface* tensor) {
Tensor t = TensorFromInterface(tensor);
MutableAttrs()->Set(attr_name, t);
return absl::OkStatus();
}
Status EagerOperation::SetAttrStringList(const char* attr_name,
const void* const* values,
const size_t* lengths,
int num_values) {
std::vector<StringPiece> v(num_values);
for (int i = 0; i < num_values; ++i) {
v[i] = StringPiece(static_cast<const char*>(values[i]), lengths[i]);
}
MutableAttrs()->Set(attr_name, v);
return absl::OkStatus();
}
Status EagerOperation::SetAttrFloatList(const char* attr_name,
const float* values, int num_values) {
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const float>(values, num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrIntList(const char* attr_name,
const int64_t* values, int num_values) {
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<const int64_t>(
reinterpret_cast<const int64_t*>(values), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrTypeList(const char* attr_name,
const DataType* values, int num_values) {
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const DataType>(values, num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrBoolList(const char* attr_name,
const unsigned char* values,
int num_values) {
std::unique_ptr<bool[]> b(new bool[num_values]);
for (int i = 0; i < num_values; ++i) {
b[i] = values[i];
}
MutableAttrs()->Set(attr_name,
gtl::ArraySlice<const bool>(b.get(), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrShapeList(const char* attr_name,
const int64_t** dims,
const int* num_dims, int num_values) {
std::unique_ptr<TensorShapeProto[]> proto(new TensorShapeProto[num_values]);
for (int i = 0; i < num_values; ++i) {
const auto num_dims_i = num_dims[i];
if (num_dims_i > TensorShape::MaxDimensions()) {
return errors::InvalidArgument(
strings::StrCat("Value specified for `", attr_name, "` has ",
num_dims_i, " dimensions which is over the limit of ",
TensorShape::MaxDimensions(), "."));
}
if (num_dims_i < 0) {
proto[i].set_unknown_rank(true);
} else {
const int64_t* dims_i = dims[i];
auto proto_i = &proto[i];
for (int d = 0; d < num_dims_i; ++d) {
proto_i->add_dim()->set_size(dims_i[d]);
}
}
}
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<TensorShapeProto>(proto.get(), num_values));
return absl::OkStatus();
}
Status EagerOperation::SetAttrFunctionList(
const char* attr_name, absl::Span<const AbstractOperation*> values) {
size_t num_values = values.size();
std::unique_ptr<NameAttrList[]> funcs(new NameAttrList[num_values]);
for (int i = 0; i < num_values; i++) {
auto* value_operation = down_cast<const EagerOperation*>(values[i]);
funcs[i].set_name(value_operation->Name());
value_operation->Attrs().FillAttrValueMap(funcs[i].mutable_attr());
}
MutableAttrs()->Set(
attr_name, gtl::ArraySlice<const NameAttrList>(funcs.get(), num_values));
return absl::OkStatus();
}
const OpDef* EagerOperation::GetOpDef(Status* status) {
const tensorflow::OpDef* op_def = OpDef();
if (op_def) return op_def;
*status = OpDefForOp(Name(), &op_def);
return op_def;
}
Status EagerOperation::InputLength(const char* input_name, int* length) {
Status status;
const tensorflow::OpDef* op_def = GetOpDef(&status);
if (!status.ok()) {
return status;
}
AttrValueMap attrs;
Attrs().FillAttrValueMap(&attrs);
NameRangeMap name_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(AttrSlice(&attrs), *op_def, &name_ranges, nullptr));
auto iter = name_ranges.find(input_name);
if (iter == name_ranges.end()) {
return errors::InvalidArgument("Input '", input_name, "' not found");
}
*length = iter->second.second - iter->second.first;
return absl::OkStatus();
}
absl::Span<ImmediateExecutionTensorHandle* const> EagerOperation::GetInputs()
const {
return absl::MakeSpan(
reinterpret_cast<ImmediateExecutionTensorHandle* const*>(inputs_.data()),
inputs_.size());
}
Status EagerOperation::OutputLength(const char* output_name, int* length) {
Status status;
const tensorflow::OpDef* op_def = GetOpDef(&status);
if (!status.ok()) {
return status;
}
AttrValueMap attrs;
Attrs().FillAttrValueMap(&attrs);
NameRangeMap name_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(AttrSlice(&attrs), *op_def, nullptr, &name_ranges));
auto iter = name_ranges.find(output_name);
if (iter == name_ranges.end()) {
return errors::InvalidArgument("Output '", output_name, "' not found");
}
*length = iter->second.second - iter->second.first;
return absl::OkStatus();
}
Status EagerOperation::AddInput(AbstractTensorHandle* input) {
ImmediateExecutionTensorHandle* h =
down_cast<ImmediateExecutionTensorHandle*>(input);
if (CustomDeviceTensorHandle::classof(h)) {
custom_device_tensor_handles_count_++;
}
AddTensorHandle(h);
return MaybeInferSingleInputAttrs(h);
}
Status EagerOperation::AddInputList(
absl::Span<AbstractTensorHandle* const> inputs) {
for (auto& input : inputs) {
if (CustomDeviceTensorHandle::classof(input)) {
custom_device_tensor_handles_count_++;
}
ImmediateExecutionTensorHandle* h =
down_cast<ImmediateExecutionTensorHandle*>(input);
AddTensorHandle(h);
}
return InferInputListAttrs(inputs.size());
}
Status EagerOperation::SetInput(size_t index,
ImmediateExecutionTensorHandle* input) {
if (index >= inputs_.size()) {
return errors::InvalidArgument("Index >= inputs.size: %d >= %d", index,
inputs_.size());
}
auto* previous = inputs_[index];
if (CustomDeviceTensorHandle::classof(previous)) {
custom_device_tensor_handles_count_--;
}
if (CustomDeviceTensorHandle::classof(input)) {
custom_device_tensor_handles_count_++;
}
input->Ref();
inputs_[index] = input;
previous->Unref();
return absl::OkStatus();
}
Status EagerOperation::Reset(
const char* op, const char* device_name, bool remote,
EagerExecutor* executor,
const absl::optional<EagerFunctionParams> eager_func_params) {
DCHECK(inputs_.empty());
ClearInferenceState();
bool is_function = false;
TF_RETURN_IF_ERROR(AttrTypeMapForOp(op, &attr_types_, &is_function));
colocation_exempt_ = is_function;
if (!is_function) {
const auto& exempt_ops = InputColocationExemptionRegistry::Global()->Get();
colocation_exempt_ = exempt_ops.find(op) != exempt_ops.end();
TF_RETURN_IF_ERROR(OpDefForOp(op, &op_def_));
} else if (!remote) {
const FunctionLibraryDefinition* func_lib_def;
if (eager_func_params.has_value() &&
eager_func_params.value().func_lib_def_override != nullptr) {
func_lib_def = eager_func_params.value().func_lib_def_override;
} else {
func_lib_def = ctx_.FuncLibDef();
}
if (func_lib_def->Find(op) == nullptr) {
return absl::NotFoundError(absl::StrCat(
"'", op,
"' is neither a type of a primitive operation nor a name "
"of a function registered in binary running on ",
port::Hostname(),
". Make sure the operation or function is "
"registered in the binary running in this process."));
}
}
attrs_.Reset(op);
stack_trace_.reset();
is_function_ = is_function;
cancellation_manager_ = nullptr;
executor_ = executor ? executor : &ctx_.Executor();
if (eager_func_params.has_value()) {
eager_func_params_ = eager_func_params;
}
op_name_ = op;
return SetDeviceName(device_name);
}
Status EagerOperation::MaybeInferSingleInputAttrs(
ImmediateExecutionTensorHandle* handle) {
if (!op_def_) return absl::OkStatus();
const auto& input_def = op_def_->input_arg(inference_arg_idx_++);
if (!input_def.number_attr().empty() || !input_def.type_list_attr().empty()) {
ClearInferenceState();
return absl::OkStatus();
}
const std::string& type_attr = input_def.type_attr();
if (!type_attr.empty() &&
inference_attrs_.find(type_attr) == inference_attrs_.end()) {
MutableAttrs()->Set(type_attr, handle->DataType());
inference_attrs_.insert(type_attr);
}
return absl::OkStatus();
}
void EagerOperation::InferSingleTypeInputListAttrs(
const OpDef::ArgDef& input_def, const DataType dtype, int num_inputs) {
if (inference_attrs_.find(input_def.number_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(input_def.number_attr(), num_inputs);
inference_attrs_.insert(input_def.number_attr());
}
if (inference_attrs_.find(input_def.type_attr()) == inference_attrs_.end()) {
MutableAttrs()->Set(input_def.type_attr(), dtype);
inference_attrs_.insert(input_def.type_attr());
}
}
void EagerOperation::InferMixedTypeInputListAttrs(
const OpDef::ArgDef& input_def, const std::vector<DataType>& dtypes) {
if (inference_attrs_.find(input_def.type_list_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(
input_def.type_list_attr(),
gtl::ArraySlice<const DataType>(dtypes.data(), dtypes.size()));
inference_attrs_.insert(input_def.type_list_attr());
}
}
Status EagerOperation::InferInputListAttrs(int num_inputs) {
if (!op_def_) return absl::OkStatus();
int start = inference_arg_idx_;
const auto& input_def = op_def_->input_arg(inference_arg_idx_++);
if (!input_def.type_list_attr().empty()) {
std::vector<DataType> dtypes(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
dtypes[i] = inputs_[start + i]->DataType();
}
InferMixedTypeInputListAttrs(input_def, dtypes);
} else if (!input_def.type_attr().empty() &&
!input_def.number_attr().empty()) {
InferSingleTypeInputListAttrs(input_def, inputs_[start]->DataType(),
num_inputs);
} else if (!input_def.number_attr().empty()) {
if (inference_attrs_.find(input_def.number_attr()) ==
inference_attrs_.end()) {
MutableAttrs()->Set(input_def.number_attr(), num_inputs);
inference_attrs_.insert(input_def.number_attr());
}
} else {
return errors::InvalidArgument("Invalid input list definition");
}
return absl::OkStatus();
}
Status EagerOperation::TensorHandleInputs(
const absl::InlinedVector<TensorHandle*, 4>** inputs) const {
if (TF_PREDICT_TRUE(!HasCustomDeviceInput())) {
*inputs = reinterpret_cast<const absl::InlinedVector<TensorHandle*, 4>*>(
&inputs_);
return absl::OkStatus();
} else {
return errors::Internal("The operation unexpectedly had custom devices.");
}
}
Status EagerOperation::MutableTensorHandleInputs(
absl::InlinedVector<TensorHandle*, 4>** inputs) {
if (TF_PREDICT_TRUE(!HasCustomDeviceInput())) {
*inputs =
reinterpret_cast<absl::InlinedVector<TensorHandle*, 4>*>(&inputs_);
return absl::OkStatus();
} else {
return errors::Internal("The operation unexpectedly had custom devices.");
}
}
Status EagerOperation::SetDeviceName(const char* c_name) {
string name(c_name != nullptr ? c_name : "");
if (name != last_set_device_name_) {
if (!DeviceNameUtils::ParseFullName(name, &device_parsed_name_)) {
return errors::InvalidArgument("Malformed device specification '", name,
"' in eager op: ", DebugString());
}
last_set_device_name_ = name;
device_name_ = DeviceNameUtils::ParsedNameToString(device_parsed_name_);
device_ = kVariantDeviceNull;
}
return absl::OkStatus();
}
bool EagerOperation::IsLocal() const {
if (ctx_.remote_device_mgr() == nullptr) return true;
if (!device_parsed_name_.has_job && !device_parsed_name_.has_replica &&
!device_parsed_name_.has_task)
return true;
auto& host_cpu_name = ctx_.HostCPU()->parsed_name();
return device_parsed_name_.job == host_cpu_name.job &&
device_parsed_name_.replica == host_cpu_name.replica &&
device_parsed_name_.task == host_cpu_name.task;
}
string VariantDeviceDebugString(VariantDevice device) {
if (device == kVariantDeviceNull) {
return "[]";
} else if (std::holds_alternative<CustomDevice*>(device)) {
return std::get<CustomDevice*>(device)->name();
} else {
return std::get<Device*>(device)->DebugString();
}
}
const AbstractOpAttrs* EagerOperation::GetOpAttrs() const { return &attrs_; }
void EagerOperation::AddAttrs(const AbstractOpAttrs* op_attrs) {
attrs_.CopyAttributes(*(down_cast<const AttrBuilder*>(op_attrs)));
}
string EagerOperation::DebugString() const {
string out;
VLOG(1) << "EagerOperation::DebugString() over " << this;
strings::StrAppend(&out, "Name: ", Name(), "\n");
strings::StrAppend(&out, "Device Name: [", device_name_, "]\n");
strings::StrAppend(&out, "Device: ", VariantDeviceDebugString(Device()),
"\n");
for (const auto& input : inputs_) {
VLOG(1) << "Input ptr: " << input;
strings::StrAppend(&out, "Input: ", input->DebugString(), "\n");
}
NodeDef ndef;
Attrs().FillAttrValueMap(ndef.mutable_attr());
strings::StrAppend(&out, "Attrs: ", ndef.DebugString(), "\n");
return out;
}
void EagerOperation::AddTensorHandle(ImmediateExecutionTensorHandle* h) {
h->Ref();
inputs_.push_back(h);
attrs_.NumInputs(static_cast<int>(inputs_.size()));
}
} | #include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(EagerOperationTest, DeviceName) {
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
auto op = new EagerOperation(ctx);
TF_ASSERT_OK(op->SetDeviceName("/device:DONTHAVE"));
EXPECT_EQ("/device:DONTHAVE:*", op->DeviceName());
TF_ASSERT_OK(op->SetDeviceName(""));
EXPECT_EQ("", op->DeviceName());
TF_ASSERT_OK(op->SetDeviceName("/job:localhost"));
EXPECT_EQ("/job:localhost", op->DeviceName());
EXPECT_NE(absl::OkStatus(), op->SetDeviceName("/not/a/valid/name"));
delete op;
ctx->Unref();
}
TEST(EagerOperationTest, EagerFunctionParamsAndStepId) {
StaticDeviceMgr device_mgr(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
tensorflow::FunctionDef function_def;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(
" signature {"
" name: 'DummyFunction'"
" }",
&function_def));
TF_ASSERT_OK(ctx->AddFunctionDef(function_def));
auto op = new EagerOperation(ctx);
EXPECT_FALSE(op->eager_func_params().has_value());
string device_name = "/job:localhost/replica:0/task:0/device:CPU:0";
TF_ASSERT_OK(op->SetDeviceName(device_name.c_str()));
TF_ASSERT_OK(op->Reset("DummyFunction", device_name.c_str()));
op->SetStepId(255);
EXPECT_EQ(op->eager_func_params()->step_id.value(), 255);
delete op;
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_operation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/eager_operation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55dc148d-bb3c-4c44-8572-b7c721de9c6b | cpp | tensorflow/tensorflow | update_api_def | tensorflow/core/api_def/update_api_def.cc | tensorflow/core/api_def/update_api_def_test.cc | #include "tensorflow/core/api_def/update_api_def.h"
#include <ctype.h>
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFileFormat[] = "api_def_%s.pbtxt";
constexpr char kDocStart[] = ".Doc(R\"doc(";
constexpr char kDocEnd[] = ")doc\")";
void FillBaseApiDef(ApiDef* api_def, const OpDef& op) {
api_def->set_graph_op_name(op.name());
for (auto& input_arg : op.input_arg()) {
if (!input_arg.description().empty()) {
auto* api_def_in_arg = api_def->add_in_arg();
api_def_in_arg->set_name(input_arg.name());
api_def_in_arg->set_description(input_arg.description());
}
}
for (auto& output_arg : op.output_arg()) {
if (!output_arg.description().empty()) {
auto* api_def_out_arg = api_def->add_out_arg();
api_def_out_arg->set_name(output_arg.name());
api_def_out_arg->set_description(output_arg.description());
}
}
for (auto& attr : op.attr()) {
if (!attr.description().empty()) {
auto* api_def_attr = api_def->add_attr();
api_def_attr->set_name(attr.name());
api_def_attr->set_description(attr.description());
}
}
api_def->set_summary(op.summary());
api_def->set_description(op.description());
}
bool OpHasDocs(const OpDef& op) {
if (!op.summary().empty() || !op.description().empty()) {
return true;
}
for (const auto& arg : op.input_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& arg : op.output_arg()) {
if (!arg.description().empty()) {
return true;
}
}
for (const auto& attr : op.attr()) {
if (!attr.description().empty()) {
return true;
}
}
return false;
}
bool CheckDocsMatch(const OpDef& op1, const OpDef& op2) {
if (op1.summary() != op2.summary() ||
op1.description() != op2.description() ||
op1.input_arg_size() != op2.input_arg_size() ||
op1.output_arg_size() != op2.output_arg_size() ||
op1.attr_size() != op2.attr_size()) {
return false;
}
for (int i = 0; i < op1.input_arg_size(); ++i) {
if (op1.input_arg(i).description() != op2.input_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.output_arg_size(); ++i) {
if (op1.output_arg(i).description() != op2.output_arg(i).description()) {
return false;
}
}
for (int i = 0; i < op1.attr_size(); ++i) {
if (op1.attr(i).description() != op2.attr(i).description()) {
return false;
}
}
return true;
}
bool ValidateOpDocs(const OpDef& op, const string& doc) {
OpDefBuilder b(op.name());
for (const auto& arg : op.input_arg()) {
b.Input(arg.name() + ":string");
}
for (const auto& arg : op.output_arg()) {
b.Output(arg.name() + ":string");
}
for (const auto& attr : op.attr()) {
b.Attr(attr.name() + ":string");
}
b.Doc(doc);
OpRegistrationData op_reg_data;
TF_CHECK_OK(b.Finalize(&op_reg_data));
return CheckDocsMatch(op, op_reg_data.op_def);
}
}
string RemoveDoc(const OpDef& op, const string& file_contents,
size_t start_location) {
const auto doc_start_location = file_contents.find(kDocStart, start_location);
const string format_error = strings::Printf(
"Could not find %s doc for removal. Make sure the doc is defined with "
"'%s' prefix and '%s' suffix or remove the doc manually.",
op.name().c_str(), kDocStart, kDocEnd);
if (doc_start_location == string::npos) {
std::cerr << format_error << std::endl;
LOG(ERROR) << "Didn't find doc start";
return file_contents;
}
const auto doc_end_location = file_contents.find(kDocEnd, doc_start_location);
if (doc_end_location == string::npos) {
LOG(ERROR) << "Didn't find doc start";
std::cerr << format_error << std::endl;
return file_contents;
}
const auto doc_start_size = sizeof(kDocStart) - 1;
string doc_text = file_contents.substr(
doc_start_location + doc_start_size,
doc_end_location - doc_start_location - doc_start_size);
if (!ValidateOpDocs(op, doc_text)) {
LOG(ERROR) << "Invalid doc: " << doc_text;
std::cerr << format_error << std::endl;
return file_contents;
}
auto before_doc = file_contents.substr(0, doc_start_location);
absl::StripTrailingAsciiWhitespace(&before_doc);
return before_doc +
file_contents.substr(doc_end_location + sizeof(kDocEnd) - 1);
}
namespace {
void RemoveDocs(const std::vector<const OpDef*>& ops,
const std::vector<string>& op_files) {
std::set<string> processed_ops;
for (const auto& file : op_files) {
string file_contents;
bool file_contents_updated = false;
TF_CHECK_OK(ReadFileToString(Env::Default(), file, &file_contents));
for (auto op : ops) {
if (processed_ops.find(op->name()) != processed_ops.end()) {
continue;
}
string register_call =
strings::Printf("REGISTER_OP(\"%s\")", op->name().c_str());
const auto register_call_location = file_contents.find(register_call);
if (register_call_location == string::npos) {
continue;
}
std::cout << "Removing .Doc call for " << op->name() << " from " << file
<< "." << std::endl;
file_contents = RemoveDoc(*op, file_contents, register_call_location);
file_contents_updated = true;
processed_ops.insert(op->name());
}
if (file_contents_updated) {
TF_CHECK_OK(WriteStringToFile(Env::Default(), file, file_contents))
<< "Could not remove .Doc calls in " << file
<< ". Make sure the file is writable.";
}
}
}
}
string CreateApiDef(const OpDef& op) {
ApiDefs api_defs;
FillBaseApiDef(api_defs.add_op(), op);
const std::vector<string> multi_line_fields = {"description"};
std::string new_api_defs_str;
::tensorflow::protobuf::TextFormat::PrintToString(api_defs,
&new_api_defs_str);
return PBTxtToMultiline(new_api_defs_str, multi_line_fields);
}
void CreateApiDefs(const OpList& ops, const string& api_def_dir,
const string& op_file_pattern) {
auto* excluded_ops = GetExcludedOps();
std::vector<const OpDef*> new_ops_with_docs;
for (const auto& op : ops.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
string file_path =
io::JoinPath(tensorflow::string(api_def_dir), kApiDefFileFormat);
file_path = strings::Printf(file_path.c_str(), op.name().c_str());
if (!Env::Default()->FileExists(file_path).ok()) {
std::cout << "Creating ApiDef file " << file_path << std::endl;
const auto& api_def_text = CreateApiDef(op);
TF_CHECK_OK(WriteStringToFile(Env::Default(), file_path, api_def_text));
if (OpHasDocs(op)) {
new_ops_with_docs.push_back(&op);
}
}
}
if (!op_file_pattern.empty()) {
std::vector<string> op_files;
TF_CHECK_OK(Env::Default()->GetMatchingPaths(op_file_pattern, &op_files));
RemoveDocs(new_ops_with_docs, op_files);
}
}
} | #include "tensorflow/core/api_def/update_api_def.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(UpdateApiDefTest, TestRemoveDocSingleOp) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Output("output: T")
.Attr("b: type")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op1.
Description
for Op1.
b : Description for b.
a: Description for a.
output: Description for output.
)doc");
)opdef";
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
EXPECT_EQ(op_def_text,
RemoveDoc(op, op_def_text_with_doc, 0 ));
}
TEST(UpdateApiDefTest, TestRemoveDocMultipleOps) {
const string op_def_text = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape);
)opdef";
const string op_def_text_with_doc = R"opdef(
REGISTER_OP("Op1")
.Input("a: T")
.Doc(R"doc(
Summary for Op1.
)doc")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Op2")
.Input("a: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op2.
)doc");
REGISTER_OP("Op3")
.Input("c: T")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Summary for Op3.
)doc");
)opdef";
const string op1_text = R"(
name: "Op1"
input_arg {
name: "a"
}
summary: "Summary for Op1."
)";
const string op2_text = R"(
name: "Op2"
input_arg {
name: "a"
}
summary: "Summary for Op2."
)";
const string op3_text = R"(
name: "Op3"
input_arg {
name: "c"
}
summary: "Summary for Op3."
)";
OpDef op1, op2, op3;
protobuf::TextFormat::ParseFromString(op1_text, &op1);
protobuf::TextFormat::ParseFromString(op2_text, &op2);
protobuf::TextFormat::ParseFromString(op3_text, &op3);
string updated_text =
RemoveDoc(op2, op_def_text_with_doc,
op_def_text_with_doc.find("Op2") );
EXPECT_EQ(string::npos, updated_text.find("Summary for Op2"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op1"));
EXPECT_NE(string::npos, updated_text.find("Summary for Op3"));
updated_text = RemoveDoc(op3, updated_text,
updated_text.find("Op3") );
updated_text = RemoveDoc(op1, updated_text,
updated_text.find("Op1") );
EXPECT_EQ(op_def_text, updated_text);
}
TEST(UpdateApiDefTest, TestCreateApiDef) {
const string op_text = R"(
name: "Op1"
input_arg {
name: "a"
description: "Description for a."
}
output_arg {
name: "output"
description: "Description for output."
}
attr {
name: "b"
description: "Description for b."
}
summary: "Summary for Op1."
description: "Description\nfor Op1."
)";
OpDef op;
protobuf::TextFormat::ParseFromString(op_text, &op);
const string expected_api_def = R"(op {
graph_op_name: "Op1"
in_arg {
name: "a"
description: <<END
Description for a.
END
}
out_arg {
name: "output"
description: <<END
Description for output.
END
}
attr {
name: "b"
description: <<END
Description for b.
END
}
summary: "Summary for Op1."
description: <<END
Description
for Op1.
END
}
)";
EXPECT_EQ(expected_api_def, CreateApiDef(op));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/update_api_def.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/update_api_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f86148fb-f654-4d7d-b45a-5e7e21319870 | cpp | tensorflow/tensorflow | runtime_client | tensorflow/core/function/runtime_client/runtime_client.cc | tensorflow/core/function/runtime_client/runtime_client_test.cc | #include "tensorflow/core/function/runtime_client/runtime_client.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#if !defined(DISABLE_MLIR)
#include "tensorflow/compiler/mlir/python/mlir.h"
#endif
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace core {
namespace function {
EagerContext& GlobalEagerContext() {
static EagerContext* global_ctx = []() {
SessionOptions opts;
std::vector<std::unique_ptr<Device>> devices;
Status&& device_init_status = DeviceFactory::AddDevices(
opts, "/job:localhost/replica:0/task:0", &devices);
CHECK(device_init_status.ok());
return new EagerContext(
opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false,
new DynamicDeviceMgr(std::move(devices)),
true,
nullptr,
nullptr,
nullptr,
true);
}();
return *global_ctx;
}
EagerContext& GlobalPythonEagerContext() {
EagerContext* ctx = reinterpret_cast<EagerContext*>(GetCEagerContext());
DCHECK(ctx) << "The Python eager context must be initialized first.";
return *ctx;
}
absl::StatusOr<FunctionDef> Runtime::GetFunctionProto(StringPiece name) {
EagerContext& ctx = this->eager_ctx_;
const FunctionDef* f = ctx.FindFunctionDef(std::string(name));
if (f == nullptr) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Could not find an attribute for key ", name));
}
return *f;
}
Status Runtime::CreateFunction(const FunctionDef& fdef) {
const auto& fname = fdef.signature().name();
if (this->eager_ctx_.FindFunctionByName(fname)) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(this->eager_ctx_.RemoveFunction(fname),
"removing function ", fname);
}
return this->eager_ctx_.AddFunctionDef(fdef);
}
Status Runtime::CreateFunction(OpaqueTfgGraphFuncOp* fop) {
mlir::tfg::GraphFuncOp fop_proper =
*reinterpret_cast<mlir::tfg::GraphFuncOp*>(fop);
return mlir::tfg::ConvertToFunctionDef(fop_proper,
*this->eager_ctx_.FuncLibDef());
}
Status Runtime::CreateFunction(OpaqueTfFuncOp* fop) {
mlir::func::FuncOp fop_proper = *reinterpret_cast<mlir::func::FuncOp*>(fop);
const auto& fname = fop_proper.getName().str();
GraphExportConfig config;
FunctionDef fdef;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(fop_proper, config,
&fdef),
"creating function ", fname);
return CreateFunction(fdef);
}
Status Runtime::TransformFunction(StringPiece name, StringPiece pipeline_name,
Dialect dialect) {
mlir::MLIRContext ctx;
mlir::PassManager pm(&ctx);
std::string error;
llvm::raw_string_ostream error_stream(error);
if (mlir::failed(mlir::parsePassPipeline(std::string(pipeline_name), pm,
error_stream))) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("locating pass pipeline ", pipeline_name, ": ",
error_stream.str()));
}
auto fn = GetFunctionProto(name);
TF_RETURN_WITH_CONTEXT_IF_ERROR(fn.status(), "loading function ", name);
GraphDef graph;
*graph.mutable_library()->add_function() = *fn;
tensorflow::GraphDebugInfo debug_info;
if (dialect == Dialect::TFG) {
auto mlir_fn = mlir::tfg::ImportGraphDef(&ctx, debug_info, graph);
TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ",
name);
mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx);
if (failed(pm.run(mlir_fn->get()))) {
return diagnostics_handler.Combine(
Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("running pass pipeline ", pipeline_name, ": ")));
}
for (auto fn : mlir_fn->get().getBody()->getOps<mlir::tfg::GraphFuncOp>()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CreateFunction(reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fn)),
absl::StrCat("updating function ", fn.getName().str()));
}
return absl::OkStatus();
}
if (dialect == Dialect::TF) {
Status status;
FunctionLibraryDefinition& flib_def = *this->eager_ctx_.FuncLibDef();
std::unique_ptr<FunctionBody> fbody;
status = FunctionDefToBodyHelper(*fn, AttrSlice(), &flib_def, &fbody);
TF_RETURN_WITH_CONTEXT_IF_ERROR(status, "importing function ", name);
auto mlir_fn = ConvertFunctionToMlir(fbody.get(), flib_def, &ctx);
TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ",
name);
mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx);
if (failed(pm.run(mlir_fn->get()))) {
return diagnostics_handler.Combine(
Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("running pass pipeline ", pipeline_name, ": ")));
}
for (auto fn : mlir_fn->get().getBody()->getOps<mlir::func::FuncOp>()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CreateFunction(reinterpret_cast<OpaqueTfFuncOp*>(&fn)),
absl::StrCat("updating function ", fn.getName().str()));
}
return absl::OkStatus();
}
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Unsupported dialect: ", dialect,
". Supported dialects are Dialect::TFG and Dialect::TF."));
}
absl::StatusOr<ReturnValues> Runtime::CallFunction(
StringPiece name, absl::Span<AbstractTensorHandle* const> args) {
EagerContext& ctx = this->eager_ctx_;
ImmediateOpPtr op(ctx.CreateOperation());
TF_RETURN_WITH_CONTEXT_IF_ERROR(op->Reset(name.data(), nullptr),
"initializing call op for ", name);
TF_RETURN_WITH_CONTEXT_IF_ERROR(op->AddInputList(args),
"preparing call args for ", name);
const FunctionDef* fn_def = ctx.GetFunctionDef(string(name));
int num_retvals = fn_def->signature().output_arg_size();
int actual_retvals = num_retvals;
std::vector<ImmediateExecutionTensorHandle*> retvals(num_retvals);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
op->Execute(absl::MakeSpan(
reinterpret_cast<AbstractTensorHandle**>(retvals.data()),
num_retvals),
&actual_retvals),
"executing call op for ", name);
DCHECK(num_retvals == actual_retvals);
ReturnValues final_returns;
for (const auto& r : retvals) {
final_returns.emplace_back(ImmediateTensorHandlePtr(r));
}
return final_returns;
}
}
}
} | #include "tensorflow/core/function/runtime_client/runtime_client.h"
#include <stdint.h>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/tensor_interface.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/function/testing/test_pass.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace function {
namespace {
EagerContextPtr TestingEagerCtx() {
SessionOptions opts;
std::vector<std::unique_ptr<Device>> devices;
Status&& device_init_status = DeviceFactory::AddDevices(
opts, "/job:localhost/replica:0/task:0", &devices);
CHECK(device_init_status.ok());
return EagerContextPtr(new EagerContext(
opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false,
new DynamicDeviceMgr(std::move(devices)),
true,
nullptr,
nullptr,
nullptr,
true));
}
int IntValue(ImmediateExecutionTensorHandle& h) {
Status status;
AbstractTensorPtr t(h.Resolve(&status));
DCHECK(status.ok());
switch (h.DataType()) {
case DT_INT32:
return *(static_cast<int32_t*>(t->Data()));
case DT_INT64:
return *(static_cast<int64_t*>(t->Data()));
default:
DCHECK(false) << "invalid data type";
return 0;
}
}
ImmediateTensorHandlePtr IntScalarTensor(EagerContext& ctx, int value) {
AbstractTensorPtr tensor(ctx.CreateInt32Scalar(value));
ImmediateTensorHandlePtr handle(ctx.CreateLocalHandle(tensor.get()));
return handle;
}
FunctionDef MakeNullaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: 'NullaryFunction'
output_arg { name: 'o' type: DT_INT32 }
}
node_def {
name: 'retval'
op: 'Const'
attr {
key: 'dtype'
value { type: DT_INT32 }
}
attr {
key: 'value'
value {
tensor {
dtype: DT_INT32
tensor_shape {}
int_val: 1
}
}
}
}
ret { key: 'o' value: 'retval:output' })pb",
&fd));
return fd;
}
FunctionDef MakeUnaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "UnaryFunction"
input_arg { name: "x" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "ret"
op: "Identity"
input: "x"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
FunctionDef MakeBinaryFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "BinaryFunction"
input_arg { name: "x" type: DT_INT32 }
input_arg { name: "y" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "x_plus_y"
op: "AddV2"
input: "x"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "ret"
op: "Identity"
input: "x_plus_y:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
FunctionDef MakeMultiplyFunction() {
FunctionDef fd;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(signature {
name: "MultiplyFunction"
input_arg { name: "x" type: DT_INT32 }
input_arg { name: "y" type: DT_INT32 }
output_arg { name: "ret" type: DT_INT32 }
}
node_def {
name: "x_times_y"
op: "Mul"
input: "x"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node_def {
name: "ret"
op: "Identity"
input: "x_times_y:z:0"
attr {
key: "T"
value { type: DT_INT32 }
}
}
ret { key: "ret" value: "ret:output:0" })pb",
&fd));
return fd;
}
TEST(GlobalContext, Basic) {
Runtime rt(GlobalEagerContext());
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, Call) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, GetRoundtrip) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<FunctionDef> fdef_ret = rt.GetFunctionProto("NullaryFunction");
TF_ASSERT_OK(fdef_ret.status());
FunctionDef fdef = *fdef_ret;
fdef.mutable_signature()->set_name("SecondFunction");
TF_ASSERT_OK(rt.CreateFunction(fdef));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("SecondFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CreateTest, MlirFromGraphDef) {
mlir::MLIRContext mctx;
mctx.getOrLoadDialect<mlir::tfg::TFGraphDialect>();
auto m = mlir::parseSourceString<mlir::ModuleOp>(
R"mlir(
module {
tfg.func @NullaryFunction()
-> (tensor<i32> {tfg.dtype = i32, tfg.name = "o"})
{
%Const, %ctl = Const name("retval") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>)
return(%Const) : tensor<i32>
}
}
)mlir",
&mctx);
mlir::tfg::GraphFuncOp fop =
*m->getBody()->op_begin<mlir::tfg::GraphFuncOp>();
EagerContextPtr ectx = TestingEagerCtx();
Runtime rt(*ectx);
OpaqueTfgGraphFuncOp* opaque_fop =
reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fop);
TF_ASSERT_OK(rt.CreateFunction(opaque_fop));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Nullary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction()));
absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Unary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeUnaryFunction()));
auto x = IntScalarTensor(*ctx, 1);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("UnaryFunction", {x.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 1);
}
TEST(CallTest, Binary) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
auto x = IntScalarTensor(*ctx, 1);
auto y = IntScalarTensor(*ctx, 1);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 2);
}
TEST(TransformTest, TestPassOnBinaryFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass"));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 6);
}
TEST(TransformTest, TestPassOnMultiplyFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeMultiplyFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("MultiplyFunction", "test-pass-tf-dialect",
Runtime::Dialect::TF));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("MultiplyFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 5);
}
TEST(TransformTest, TestMixedPassesOnBinaryFunction) {
EagerContextPtr ctx = TestingEagerCtx();
Runtime rt(*ctx);
TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction()));
testing::RegisterTestPass();
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass"));
TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass-tf-dialect",
Runtime::Dialect::TF));
auto x = IntScalarTensor(*ctx, 2);
auto y = IntScalarTensor(*ctx, 3);
absl::StatusOr<ReturnValues> rets =
rt.CallFunction("BinaryFunction", {x.get(), y.get()});
TF_ASSERT_OK(rets.status());
ASSERT_EQ(rets->size(), 1);
ASSERT_EQ(rets->at(0)->DataType(), DT_INT32);
EXPECT_EQ(IntValue(*(rets->at(0))), 5);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/function/runtime_client/runtime_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/function/runtime_client/runtime_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
acb6e25d-5035-4121-96bc-58bf96d1e10c | cpp | tensorflow/tensorflow | graph_executor | tensorflow/core/tfrt/graph_executor/graph_executor.cc | tensorflow/core/tfrt/graph_executor/graph_executor_test.cc | #include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_utils.h"
#include "tensorflow/core/tfrt/common/metrics.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/executable_context.h"
#include "tensorflow/core/tfrt/graph_executor/export_mlir.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/sync_resource_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/step_id.h"
#include "tensorflow/core/tfrt/runtime/stream.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/stubs/tfrt_native_lowering_stub.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/chain.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/support/string_util.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr char kDeadlineExceededMessage[] = "Deadline exceeded.";
constexpr char kTensorNameJoiningDelimiter[] = "-";
constexpr char kArgumentTypeJoiningDelimiter[] = "^";
constexpr char kFallbackInitFunction[] = "_tfrt_fallback_init";
constexpr char kResourceInitFunction[] = "_tfrt_resource_init";
StepId GetNextStepId() {
static StepIdGenerator gen;
return gen.GetNextStepId();
}
auto* graph_executor_mode = monitoring::Gauge<std::string, 2>::New(
"/tfrt/graph_executor/mode",
"Record the total number of imported savedmodel using different graph "
"executor modes (BEF vs MLRT interpreter)",
"model_name", "model_version");
}
tensorflow::Status RunMlrtFunction(
mlrt::bc::Function function,
const mlrt::LoadedExecutable& loaded_executable,
const tsl::RCReference<tfrt::RequestContext>& request_context,
tfrt::ConcurrentWorkQueue& work_queue,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
SyncResourceState* sync_resource_state) {
DCHECK(function);
const auto* fallback_request_state =
request_context->GetDataIfExists<tfd::KernelFallbackCompatRequestState>();
DCHECK(fallback_request_state);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(&work_queue);
tfrt::ExecutionContext exec_ctx(request_context);
AddSyncContext(execution_context, *request_context->host(),
sync_resource_state);
execution_context.AddUserContext(std::make_unique<tf_mlrt::Context>(
fallback_request_state, request_context->resource_context(),
request_context->cancellation_context().get()));
execution_context.AddUserErrorLogger(
[fallback_request_state](absl::Status status) {
if (fallback_request_state) {
LOG(ERROR) << "Model "
<< fallback_request_state->session_metadata().name()
<< " version "
<< fallback_request_state->session_metadata().version()
<< " has error: " << status;
}
});
absl::InlinedVector<mlrt::Value, 4> mlrt_inputs;
mlrt_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
mlrt_inputs.emplace_back(FallbackTensor(input));
}
absl::InlinedVector<mlrt::Value, 4> mlrt_outputs(
function.output_regs().size());
tsl::RCReference<tsl::AsyncValue> chain =
tsl::MakeConstructedAsyncValueRef<tsl::Chain>();
execution_context.set_exit_handler(
[chain = chain.get()]() { chain->SetStateConcrete(); });
execution_context.CallByMove(function, absl::MakeSpan(mlrt_inputs),
absl::MakeSpan(mlrt_outputs));
work_queue.AddTask(
[&execution_context]() { mlrt::Execute(execution_context); });
work_queue.Await(chain);
if (!execution_context.status().ok()) {
outputs->resize(mlrt_outputs.size(), tensorflow::Tensor());
return execution_context.status();
}
for (auto& mlrt_output : mlrt_outputs) {
DCHECK(mlrt_output.HasValue());
outputs->push_back(std::move(mlrt_output.Get<FallbackTensor>().tensor()));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<RequestInfo>> CreateRequestInfo(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array,
tensorflow::tfrt_stub::FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
CostRecorder* cost_recorder) {
auto request_info = std::make_unique<RequestInfo>();
DCHECK(options.runtime);
const Runtime& runtime = *options.runtime;
int64_t request_id = 0;
if (work_queue != nullptr) {
request_id = work_queue->id();
if (request_id == 0) request_id = GetNextStepId().id;
request_info->request_queue = work_queue;
} else {
request_id = GetNextStepId().id;
TF_ASSIGN_OR_RETURN(request_info->request_queue_owner,
runtime.CreateRequestQueue(request_id));
request_info->request_queue = request_info->request_queue_owner.get();
}
auto* request_queue = request_info->request_queue;
request_info->runner = [request_queue](std::function<void()> f) {
request_queue->AddTask(std::move(f));
};
tfrt::RequestContextBuilder request_context_builder(
runtime.core_runtime()->GetHostContext(), resource_context, request_id);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
request_context_builder.context_data()
.emplace<tfd::KernelFallbackCompatRequestState>(
&request_info->runner, &fallback_state.device_manager(),
request_context_builder.id(), runner_table, resource_array,
request_queue->GetIntraOpThreadPool(), options.model_metadata,
&process_function_library_runtime);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_runtime_config(&options.runtime_config);
fallback_request_state.set_cancellation_manager(
&request_info->cancellation_manager);
tfrt::RequestOptions request_options;
request_options.priority = run_options.priority;
request_context_builder.set_request_options(request_options);
auto expected_req_ctx = std::move(request_context_builder).build();
if (!expected_req_ctx) {
return tensorflow::errors::Internal(
tfrt::StrCat(expected_req_ctx.takeError()));
}
request_info->tfrt_request_context = std::move(expected_req_ctx.get());
return request_info;
}
tensorflow::Status GraphExecutionRunOnFunction(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
absl::string_view signature_name, const SymbolUids& symbol_uids,
const tfrt::Function* func, const mlrt::LoadedExecutable* loaded_executable,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, const Runtime& runtime,
FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tfrt::RequestDeadlineTracker* req_deadline_tracker,
std::optional<StreamCallbackId> stream_callback_id,
CostRecorder* cost_recorder) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options, run_options, run_options.work_queue,
resource_context, client_graph_resource_context,
runner_table, resource_array, fallback_state,
process_function_library_runtime, cost_recorder));
int64_t request_id = request_info->tfrt_request_context->id();
tsl::profiler::TraceMe traceme(
[request_id, signature_name, &options, symbol_uids] {
return tsl::profiler::TraceMeEncode(
"TfrtModelRun",
{{"_r", 1},
{"id", request_id},
{"signature", signature_name},
{"model_id", absl::StrCat(options.model_metadata.name(), ":",
options.model_metadata.version())},
{"tf_symbol_uid", symbol_uids.tf_symbol_uid},
{"tfrt_symbol_uid", symbol_uids.tfrt_symbol_uid}});
});
if (run_options.deadline.has_value()) {
auto deadline = run_options.deadline.value();
if (absl::ToChronoTime(absl::Now()) > deadline) {
return tensorflow::errors::DeadlineExceeded(kDeadlineExceededMessage);
}
if (req_deadline_tracker == nullptr) {
return tensorflow::errors::InvalidArgument(
"req_deadline_tracker must be non-null");
}
req_deadline_tracker->CancelRequestOnDeadline(
deadline, request_info->tfrt_request_context);
}
ScopedStreamCallback scoped_stream_callback;
if (run_options.streamed_output_callback && !stream_callback_id.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Signature '", signature_name, "' does not support streaming."));
}
if (stream_callback_id.has_value()) {
if (!run_options.streamed_output_callback) {
return absl::InvalidArgumentError(
absl::StrCat("Signature '", signature_name,
"' contains streaming ops but is called using Predict "
"without the streamed callback."));
}
}
if (run_options.streamed_output_callback) {
if (!stream_callback_id.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Signature ", signature_name, " does not support streaming."));
}
auto streamed_output_callback = run_options.streamed_output_callback;
TF_ASSIGN_OR_RETURN(
scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
options.model_metadata.name(), *stream_callback_id,
StepId(request_id), std::move(streamed_output_callback)));
}
if (loaded_executable) {
auto function = loaded_executable->GetFunction(signature_name);
if (!function) {
return errors::InvalidArgument(absl::StrCat(
"Function not found in MLRT executable: ", signature_name));
}
return RunMlrtFunction(function, *loaded_executable,
request_info->tfrt_request_context,
*request_info->request_queue, inputs, outputs,
nullptr);
}
DCHECK(func);
tfrt::ExecutionContext exec_ctx{request_info->tfrt_request_context};
if (run_options.work_queue) {
exec_ctx.set_work_queue(run_options.work_queue);
} else if (request_info->request_queue) {
exec_ctx.set_work_queue(request_info->request_queue);
} else {
exec_ctx.set_work_queue(runtime.work_queue());
}
llvm::SmallVector<tfrt::AsyncValue*, 4> arguments;
auto cleanup = tensorflow::gtl::MakeCleanup([&]() {
for (auto* argument : arguments) argument->DropRef();
});
arguments.push_back(tfrt::GetReadyChain().release());
for (const auto& input : inputs) {
arguments.push_back(
tfrt::MakeAvailableAsyncValueRef<FallbackTensor>(input).release());
}
if (arguments.size() != func->argument_types().size())
return tensorflow::errors::Internal("incorrect number of inputs.");
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4> chain_and_results;
chain_and_results.resize(func->result_types().size());
std::array<tfrt::RCReference<tfrt::AsyncValue>, 1> executed = {
EnqueueWork(exec_ctx, [&]() -> tfrt::Chain {
func->Execute(exec_ctx, arguments, chain_and_results);
return {};
})};
exec_ctx.work_queue().Await(executed);
exec_ctx.work_queue().Await(chain_and_results);
DCHECK(!chain_and_results.empty());
tfrt::RCReference<tfrt::AsyncValue>& chain = chain_and_results[0];
auto results = llvm::drop_begin(chain_and_results, 1);
tensorflow::StatusGroup status_group;
if (chain->IsError()) {
status_group.Update(chain->GetError());
}
for (tfrt::RCReference<tfrt::AsyncValue>& result : results) {
DCHECK(result->IsAvailable());
if (result->IsError()) {
status_group.Update(result->GetError());
outputs->push_back(tensorflow::Tensor());
continue;
}
DCHECK(result->IsType<FallbackTensor>());
const auto& host_tensor = result->get<FallbackTensor>().tensor();
outputs->push_back(host_tensor);
}
if (request_info->tfrt_request_context->IsCancelled()) {
return tensorflow::errors::DeadlineExceeded(kDeadlineExceededMessage);
}
return status_group.as_summary_status();
}
GraphExecutor::GraphExecutor(
Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
std::unique_ptr<tensorflow::tfrt_stub::TfrtGraphExecutionState>
graph_execution_state,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry)
: options_(std::move(options)),
fallback_state_(std::move(fallback_state)),
graph_execution_state_(std::move(graph_execution_state)),
req_deadline_tracker_(options_.runtime->core_runtime()->GetHostContext()),
kernel_registry_(std::move(kernel_registry)),
resource_context_(std::move(resource_context)) {
DCHECK(resource_context_);
SetSessionCreatedMetric();
}
absl::StatusOr<std::unique_ptr<GraphExecutor>> GraphExecutor::Create(
Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
tensorflow::GraphDef graph_def,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry) {
if (options.runtime == nullptr) {
return errors::InvalidArgument("options.runtime must be non-null ");
}
if (options.enable_online_cost_analysis) {
options.cost_analysis_options.version = Options::CostAnalysisOptions::kOnce;
}
TfrtGraphExecutionState::Options graph_execution_state_options;
graph_execution_state_options.run_placer_grappler_on_functions =
options.run_placer_grappler_on_functions;
options.compile_options.fuse_get_resource_ops_in_hoisting =
!options.enable_mlrt;
graph_executor_mode
->GetCell(options.model_metadata.name(),
absl::StrCat(options.model_metadata.version()))
->Set(options.enable_mlrt ? "mlrt" : "bef");
TF_ASSIGN_OR_RETURN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(graph_execution_state_options,
std::move(graph_def), *fallback_state));
return std::make_unique<GraphExecutor>(
std::move(options), std::move(fallback_state),
std::move(resource_context), std::move(graph_execution_state),
std::move(kernel_registry));
}
namespace {
void CreateSortedNamesAndOriginalIndices(absl::Span<const std::string> names,
std::vector<std::string>& sorted_names,
std::vector<int>& original_indices) {
DCHECK(sorted_names.empty());
DCHECK(original_indices.empty());
original_indices.resize(names.size());
std::iota(original_indices.begin(), original_indices.end(), 0);
std::sort(original_indices.begin(), original_indices.end(),
[&](int x, int y) { return names[x] < names[y]; });
sorted_names.reserve(names.size());
for (int original_index : original_indices) {
DCHECK_LT(original_index, names.size());
sorted_names.push_back(names[original_index]);
}
}
}
tensorflow::Status GraphExecutor::Run(
const RunOptions& run_options,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
std::vector<tensorflow::Tensor>* outputs) {
std::vector<std::string> input_names;
input_names.reserve(inputs.size());
for (const auto& p : inputs) input_names.push_back(p.first);
std::vector<std::string> sorted_input_names;
std::vector<int> input_original_indices;
CreateSortedNamesAndOriginalIndices(input_names, sorted_input_names,
input_original_indices);
std::vector<tensorflow::DataType> sorted_input_dtypes;
sorted_input_dtypes.reserve(inputs.size());
for (int original_index : input_original_indices) {
sorted_input_dtypes.push_back(inputs.at(original_index).second.dtype());
}
std::vector<std::string> sorted_output_names;
std::vector<int> output_original_indices;
CreateSortedNamesAndOriginalIndices(output_tensor_names, sorted_output_names,
output_original_indices);
std::vector<std::string> sorted_target_node_names(target_tensor_names.begin(),
target_tensor_names.end());
std::sort(sorted_target_node_names.begin(), sorted_target_node_names.end());
TF_ASSIGN_OR_RETURN(
LoadedClientGraph & loaded_client_graph,
GetOrCreateLoadedClientGraph(
run_options, sorted_input_names, sorted_input_dtypes,
sorted_output_names, sorted_target_node_names, run_options.work_queue,
{}, inputs));
auto executable_context = loaded_client_graph.executable_context();
const mlrt::LoadedExecutable* loaded_executable = nullptr;
const tfrt::Function* func = nullptr;
if (executable_context->IsForMlrt()) {
loaded_executable = executable_context->bytecode_executable.get();
} else {
func =
executable_context->bef_file->GetFunction(loaded_client_graph.name());
}
DCHECK(func || loaded_executable);
std::vector<tensorflow::Tensor> flat_inputs;
if (!loaded_client_graph.is_restore()) {
flat_inputs.reserve(inputs.size());
for (int original_index : input_original_indices) {
flat_inputs.push_back(inputs.at(original_index).second);
}
}
auto now = absl::Now() + simulated_duration_;
bool do_recompilation;
CostRecorder* cost_recorder =
loaded_client_graph.MaybeGetCostRecorder(now, &do_recompilation);
std::vector<tensorflow::Tensor> flat_outputs;
TF_RETURN_IF_ERROR(GraphExecutionRunOnFunction(
options_, run_options, loaded_client_graph.name(),
loaded_client_graph.symbol_uids(), func, loaded_executable, flat_inputs,
&flat_outputs, resource_context_.get(),
&executable_context->resource_context,
&loaded_client_graph.runner_table(),
&loaded_client_graph.resource_array(), runtime(), fallback_state(),
loaded_client_graph.process_function_library_runtime(),
&req_deadline_tracker_, loaded_client_graph.stream_callback_id(),
cost_recorder));
if (do_recompilation) {
TF_RETURN_IF_ERROR(
loaded_client_graph.UpdateCost(*cost_recorder, runtime()));
tensorflow::mutex_lock l(num_recompilations_mu_);
num_recompilations_ += 1;
}
if (cost_recorder != nullptr) {
loaded_client_graph.UpdateCostAnalysisData(now, do_recompilation);
}
auto flat_output_iter = flat_outputs.begin();
outputs->resize(flat_outputs.size());
for (int original_index : output_original_indices) {
(*outputs)[original_index] = std::move(*flat_output_iter);
++flat_output_iter;
}
absl::Time end = absl::Now() + simulated_duration_;
absl::Duration elapsed_duration = end - now;
loaded_client_graph.latency_sampler()->Add(
absl::ToDoubleMicroseconds(elapsed_duration));
return absl::OkStatus();
}
tensorflow::Status GraphExecutor::Extend(const GraphDef& graph) {
return graph_execution_state_->Extend(graph);
}
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
GraphExecutor::ImportAndCompileClientGraph(
const GraphExecutor::ClientGraph& client_graph,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
auto import_start_time = absl::Now();
mlir::DialectRegistry registry;
RegisterMlirDialect(registry, options_.compile_options.backend_compiler);
auto context = std::make_unique<mlir::MLIRContext>(
registry, mlir::MLIRContext::Threading::DISABLED);
context->loadAllAvailableDialects();
ASSIGN_OR_RETURN_IN_IMPORT(
auto flib_def_and_module,
ImportClientGraphToMlirModule(client_graph, context.get()));
auto& [flib_def, module] = flib_def_and_module;
std::string checkpoint_path;
if (options_.compile_options.backend_compiler &&
mlir::tf_saved_model::IsRestoreGraph(module.get())) {
if (inputs.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1 input for restore graph, but got ", inputs.size(), "."));
}
const tensorflow::Tensor& input = inputs[0].second;
if (input.dtype() != tensorflow::DT_STRING) {
return absl::InvalidArgumentError(
absl::StrCat("Expected string input for restore graph, but got ",
input.dtype(), "."));
}
checkpoint_path = input.scalar<tstring>()();
}
TF_ASSIGN_OR_RETURN(
auto stream_callback_id,
CreateStreamCallbackId(options().model_metadata.name(), module.get()));
SymbolUids symbol_uids;
symbol_uids.tf_symbol_uid = MaybeUploadMlirToXsymbol(module.get());
auto import_duration = absl::Now() - import_start_time;
LOG(INFO) << "TFRT finished importing client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(import_duration)
<< " ms. Client graph name: " << client_graph.name;
auto compile_start_time = absl::Now();
mlir::OwningOpRef<mlir::ModuleOp> module_with_op_keys;
std::shared_ptr<ExecutableContext> executable_context = nullptr;
ModelRuntimeContext model_context(&options_,
options_.compile_options.saved_model_dir,
resource_context_.get());
if (checkpoint_path.empty()) {
model_context.set_function_library_definition(&flib_def);
}
model_context.set_checkpoint_path(checkpoint_path);
if (options_.compile_options.compile_to_sync_tfrt_dialect) {
if (kernel_registry_ == nullptr) {
return tensorflow::errors::Internal("Missing kernel registry in MLRT.");
}
ASSIGN_OR_RETURN_IN_COMPILE(
executable_context,
tfrt::BuildExecutableContext(module.get(), *kernel_registry_));
} else if (options_.enable_mlrt) {
if (kernel_registry_ == nullptr) {
return tensorflow::errors::Internal("Missing kernel registry in MLRT.");
}
ASSIGN_OR_RETURN_IN_COMPILE(
auto bytecode_buffer,
tensorflow::mlrt_compiler::ConvertTfMlirToBytecode(
options_.compile_options, fallback_state(), module.get(),
model_context, &module_with_op_keys));
mlrt::bc::Executable executable(bytecode_buffer.data());
auto bytecode_executable =
std::make_unique<mlrt::LoadedExecutable>(executable, *kernel_registry_);
executable_context = std::make_shared<ExecutableContext>(
std::move(bytecode_buffer), std::move(bytecode_executable));
} else {
tfrt::BefBuffer bef;
TF_RETURN_IF_ERROR(
tensorflow::ConvertTfMlirToBef(options_.compile_options, module.get(),
&bef, model_context, &fallback_state()));
ASSIGN_OR_RETURN_IN_COMPILE(
auto bef_file, tfrt::CreateBefFileFromBefBuffer(runtime(), bef));
executable_context = std::make_shared<ExecutableContext>(
std::move(bef), std::move(bef_file));
}
symbol_uids.tfrt_symbol_uid = MaybeUploadMlirToXsymbol(module.get());
auto compile_duration = absl::Now() - compile_start_time;
LOG(INFO) << "TFRT finished compiling client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(compile_duration)
<< " ms. Client graph name: " << client_graph.name;
auto* latency_sampler =
tensorflow::tfrt_metrics::GetTfrtGraphExecutorLatencySampler(
options_.model_metadata.name(), options_.model_metadata.version(),
client_graph.name);
return std::make_unique<LoadedClientGraph>(
client_graph.name, std::move(symbol_uids), this, std::move(context),
std::move(module_with_op_keys), std::move(module),
std::move(executable_context), stream_callback_id,
!checkpoint_path.empty(), std::move(flib_def), latency_sampler);
}
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
GraphExecutor::LoadClientGraph(
const GraphExecutor::ClientGraph& client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
LOG(INFO) << "TFRT loading client graph (" << &client_graph << ") "
<< client_graph.name;
TF_ASSIGN_OR_RETURN(auto loaded_client_graph,
ImportAndCompileClientGraph(client_graph, inputs));
auto init_start_time = absl::Now();
if (loaded_client_graph->executable_context()->IsForMlrt()) {
RETURN_IF_ERROR_IN_INIT(InitBytecode(loaded_client_graph.get()));
} else {
RETURN_IF_ERROR_IN_INIT(InitBef(loaded_client_graph.get(), work_queue));
}
auto init_duration = absl::Now() - init_start_time;
LOG(INFO) << "TFRT finished initializing client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(init_duration)
<< " ms. Client graph name: " << client_graph.name;
return loaded_client_graph;
}
absl::StatusOr<
std::pair<FunctionLibraryDefinition, mlir::OwningOpRef<mlir::ModuleOp>>>
GraphExecutor::ImportClientGraphToMlirModule(
const GraphExecutor::ClientGraph& client_graph,
mlir::MLIRContext* context) const {
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.graph_func_name = client_graph.name;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
graph_import_config.inputs = client_graph.input_nodes;
graph_import_config.outputs = client_graph.output_nodes;
graph_import_config.control_outputs = client_graph.target_nodes;
graph_import_config.set_original_tf_func_name = true;
TF_ASSIGN_OR_RETURN(
auto optimized_graph,
graph_execution_state_->CreateOptimizedGraph(graph_import_config));
LOG(INFO) << "TFRT import client graph (" << &client_graph
<< "): Functionalization took "
<< absl::ToInt64Milliseconds(
optimized_graph.functionalization_duration)
<< " ms. Client graph name: " << client_graph.name;
LOG(INFO) << "TFRT import client graph (" << &client_graph
<< "): Grappler took "
<< absl::ToInt64Milliseconds(optimized_graph.grappler_duration)
<< " ms. Client graph name: " << client_graph.name;
TF_ASSIGN_OR_RETURN(
auto module,
tensorflow::ConvertGraphToMlir(*optimized_graph.graph, {},
optimized_graph.graph->flib_def(),
graph_import_config, context));
return std::make_pair(std::move(*optimized_graph.graph->mutable_flib_def()),
std::move(module));
}
tensorflow::Status GraphExecutor::InitBef(
LoadedClientGraph* loaded_client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue) {
auto* bef_file = loaded_client_graph->executable_context()->bef_file.get();
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(
options_, {}, work_queue, resource_context_.get(),
nullptr,
&loaded_client_graph->runner_table(),
&loaded_client_graph->resource_array(), fallback_state(),
loaded_client_graph->process_function_library_runtime()));
tfrt::ExecutionContext exec_ctx(request_info->tfrt_request_context);
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, kFallbackInitFunction));
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, kResourceInitFunction));
return absl::OkStatus();
}
tensorflow::Status GraphExecutor::InitBytecode(
LoadedClientGraph* loaded_graph) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options_, {},
options_.runtime->work_queue(), resource_context_.get(),
nullptr,
&loaded_graph->runner_table(),
&loaded_graph->resource_array(), fallback_state(),
loaded_graph->process_function_library_runtime()));
const auto* loaded_executable =
loaded_graph->executable_context()->bytecode_executable.get();
DCHECK(loaded_executable);
std::vector<tensorflow::Tensor> outputs;
if (auto function = loaded_executable->GetFunction(kFallbackInitFunction)) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, *loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
&loaded_graph->sync_resource_state()));
}
if (auto function = loaded_executable->GetFunction(kResourceInitFunction)) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, *loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
&loaded_graph->sync_resource_state()));
}
return absl::OkStatus();
}
absl::StatusOr<std::reference_wrapper<GraphExecutor::LoadedClientGraph>>
GraphExecutor::GetOrCreateLoadedClientGraph(
const RunOptions& run_options,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::string_view graph_name,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
const std::string joined_name =
!graph_name.empty()
? std::string(graph_name)
: absl::StrCat(
absl::StrJoin(input_tensor_names, kTensorNameJoiningDelimiter),
kArgumentTypeJoiningDelimiter,
absl::StrJoin(output_tensor_names, kTensorNameJoiningDelimiter),
kArgumentTypeJoiningDelimiter,
absl::StrJoin(target_tensor_names,
kTensorNameJoiningDelimiter));
tensorflow::mutex_lock l(loaded_client_graphs_mu_);
const auto iter = loaded_client_graphs_.find(joined_name);
if (iter != loaded_client_graphs_.end()) return {*iter->second};
if (run_options.disable_compilation) {
return tensorflow::errors::InvalidArgument(
absl::StrCat("GraphExecutor: compilation is disabled in execution but "
"the compiled graph is not found for ",
joined_name));
}
tensorflow::GraphImportConfig::InputArrays input_nodes;
DCHECK_EQ(input_tensor_names.size(), input_tensor_dtypes.size());
for (int i = 0; i < input_tensor_names.size(); ++i) {
const auto& input_name = input_tensor_names[i];
auto input_dtype = input_tensor_dtypes[i];
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = input_dtype;
array_info.shape.set_unknown_rank(true);
input_nodes[input_name] = array_info;
}
ClientGraph client_graph{
run_options.name.empty() ? joined_name : run_options.name,
std::move(input_nodes),
{output_tensor_names.begin(), output_tensor_names.end()},
{target_tensor_names.begin(), target_tensor_names.end()}};
TF_ASSIGN_OR_RETURN(auto loaded_client_graph,
LoadClientGraph(client_graph, work_queue, inputs));
auto* loaded_client_graph_ptr = loaded_client_graph.get();
loaded_client_graphs_[joined_name] = std::move(loaded_client_graph);
return {*loaded_client_graph_ptr};
}
tensorflow::Status GraphExecutor::RunWithSyncInterpreter(
const std::string& graph_name, absl::Span<mlrt::Value> input_values,
absl::Span<const std::string> input_names,
absl::Span<const tensorflow::DataType> input_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
absl::Span<mlrt::Value> outputs) {
TF_ASSIGN_OR_RETURN(
LoadedClientGraph & loaded_client_graph,
GetOrCreateLoadedClientGraph(
{}, input_names, input_dtypes, output_tensor_names,
target_tensor_names,
nullptr,
graph_name.empty() ? output_tensor_names[0] : graph_name));
auto executable_context = loaded_client_graph.executable_context();
mlrt::ExecutionContext execution_context(
executable_context->bytecode_executable.get());
AddSyncContext(execution_context,
*options_.runtime->core_runtime()->GetHostContext(),
&loaded_client_graph.sync_resource_state());
tensorflow::tfd::KernelFallbackCompatRequestState kernel_fallback_state(
tfd::GetDefaultRunner(), &fallback_state().device_manager(),
0, &loaded_client_graph.runner_table(),
&loaded_client_graph.resource_array(),
nullptr, std::nullopt,
&loaded_client_graph.process_function_library_runtime());
auto tf_context = std::make_unique<tensorflow::tf_mlrt::Context>(
&kernel_fallback_state, resource_context_.get());
execution_context.AddUserContext(std::move(tf_context));
auto serving_function = executable_context->bytecode_executable->GetFunction(
loaded_client_graph.name());
DCHECK(serving_function);
execution_context.CallByMove(serving_function, input_values, outputs);
mlrt::Execute(execution_context);
return execution_context.status();
}
CostRecorder* GraphExecutor::LoadedClientGraph::MaybeGetCostRecorder(
absl::Time now, bool* do_recompilation) {
*do_recompilation = false;
tensorflow::mutex_lock l(cost_analysis_data_.mu);
if (!cost_analysis_data_.is_available) {
return nullptr;
}
const auto& options = graph_executor_->options().cost_analysis_options;
absl::Duration elapsed_duration = now - cost_analysis_data_.start_time;
double intended_num_updates = absl::ToDoubleSeconds(elapsed_duration) /
absl::ToDoubleSeconds(options.reset_interval) *
options.updates_per_interval;
if (intended_num_updates - cost_analysis_data_.num_cost_updates >= 1) {
cost_analysis_data_.is_available = false;
*do_recompilation = 1 + cost_analysis_data_.num_cost_updates >=
options.updates_per_interval;
return cost_analysis_data_.cost_recorder.get();
}
return nullptr;
}
Status GraphExecutor::LoadedClientGraph::UpdateCost(
const CostRecorder& cost_recorder, const Runtime& runtime) {
LOG(INFO) << "TFRT updating op costs of loaded client graph (" << this << ") "
<< name_;
std::shared_ptr<ExecutableContext> new_executable_context = nullptr;
if (executable_context()->IsForMlrt()) {
auto tf_mlir_with_op_keys = ::mlir::OwningOpRef<mlir::ModuleOp>(
cost_analysis_data_.tf_mlir_with_op_keys.get().clone());
TF_ASSIGN_OR_RETURN(
auto bytecode_buffer,
tensorflow::mlrt_compiler::ConvertTfMlirWithOpKeysToBytecode(
graph_executor_->options().compile_options,
graph_executor_->fallback_state(), tf_mlir_with_op_keys.get(),
cost_recorder));
mlrt::bc::Executable executable(bytecode_buffer.data());
auto bytecode_executable = std::make_unique<mlrt::LoadedExecutable>(
executable, *graph_executor_->kernel_registry_);
new_executable_context = std::make_shared<ExecutableContext>(
std::move(bytecode_buffer), std::move(bytecode_executable));
} else {
auto tfrt_mlir = ::mlir::OwningOpRef<mlir::ModuleOp>(
cost_analysis_data_.tfrt_mlir.get().clone());
mlir::StatusScopedDiagnosticHandler diag_handler(
tfrt_mlir.get().getContext());
tfrt_compiler::UpdateOpCostInTfrtMlir(tfrt_mlir.get(), cost_recorder);
auto bef = tfrt::ConvertMLIRToBEF(tfrt_mlir.get(),
true);
if (bef.empty()) {
return diag_handler.Combine(
tensorflow::errors::Internal("failed to convert MLIR to BEF."));
}
bef.shrink_to_fit();
TF_ASSIGN_OR_RETURN(auto bef_file,
tfrt::CreateBefFileFromBefBuffer(runtime, bef));
new_executable_context = std::make_shared<ExecutableContext>(
std::move(bef), std::move(bef_file));
}
{
tensorflow::mutex_lock lock(executable_context_mu_);
executable_context_ = std::move(new_executable_context);
}
return absl::OkStatus();
}
GraphExecutor::LoadedClientGraph::LoadedClientGraph(
std::string name, SymbolUids symbol_uids, GraphExecutor* graph_executor,
std::unique_ptr<mlir::MLIRContext> mlir_context,
mlir::OwningOpRef<mlir::ModuleOp> tf_mlir_with_op_keys,
mlir::OwningOpRef<mlir::ModuleOp> tfrt_mlir,
std::shared_ptr<ExecutableContext> executable_context,
std::optional<StreamCallbackId> stream_callback_id, bool is_restore,
FunctionLibraryDefinition flib_def,
tsl::monitoring::SamplerCell* latency_sampler)
: name_(std::move(name)),
symbol_uids_(std::move(symbol_uids)),
graph_executor_(graph_executor),
mlir_context_(std::move(mlir_context)),
executable_context_(std::move(executable_context)),
stream_callback_id_(stream_callback_id),
is_restore_(is_restore),
flib_def_(std::move(flib_def)),
pflr_(&graph_executor->fallback_state().device_manager(),
graph_executor->fallback_state().session_options().env,
&graph_executor->fallback_state().session_options().config,
TF_GRAPH_DEF_VERSION, &flib_def_,
graph_executor->fallback_state()
.session_options()
.config.graph_options()
.optimizer_options(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](int64_t, const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}),
latency_sampler_(latency_sampler) {
const auto& options = graph_executor_->options().cost_analysis_options;
if (options.version != Options::CostAnalysisOptions::kDisabled) {
cost_analysis_data_.start_time = absl::Now() - options.reset_interval;
cost_analysis_data_.is_available = true;
cost_analysis_data_.num_cost_updates = options.updates_per_interval - 1;
cost_analysis_data_.cost_recorder = std::make_unique<CostRecorder>();
if (executable_context_->IsForMlrt()) {
cost_analysis_data_.tf_mlir_with_op_keys =
std::move(tf_mlir_with_op_keys);
} else {
cost_analysis_data_.tfrt_mlir = std::move(tfrt_mlir);
}
}
}
void GraphExecutor::LoadedClientGraph::UpdateCostAnalysisData(
absl::Time now, bool do_recompilation) {
tensorflow::mutex_lock lock(cost_analysis_data_.mu);
if (!do_recompilation) {
cost_analysis_data_.num_cost_updates += 1;
cost_analysis_data_.is_available = true;
return;
}
if (graph_executor_->options().cost_analysis_options.version ==
Options::CostAnalysisOptions::kOnce) {
cost_analysis_data_.is_available = false;
cost_analysis_data_.tfrt_mlir = nullptr;
cost_analysis_data_.tf_mlir_with_op_keys = nullptr;
cost_analysis_data_.cost_recorder = nullptr;
} else {
cost_analysis_data_.cost_recorder = std::make_unique<CostRecorder>();
cost_analysis_data_.is_available = true;
cost_analysis_data_.start_time = now;
cost_analysis_data_.num_cost_updates = 0;
}
}
tensorflow::Status GraphExecutor::CompileGraph(
const std::string& graph_name,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names) {
return GetOrCreateLoadedClientGraph(
{}, input_tensor_names, input_tensor_dtypes,
output_tensor_names, target_tensor_names,
nullptr, graph_name)
.status();
}
void RegisterMlirDialect(mlir::DialectRegistry& registry,
tensorflow::BackendCompiler* backend_compiler) {
registry.insert<mlir::BuiltinDialect, mlir::func::FuncDialect>();
mlir::RegisterAllTensorFlowDialects(registry);
if (backend_compiler) {
backend_compiler->GetDependentDialects(registry);
}
}
}
} | #include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "learning/brain/experimental/tfrt/native_lowering/kernels/math_kernels.h"
#include "learning/brain/experimental/tfrt/native_lowering/kernels/sync_fallback_kernels.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/platform/statusor.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/tensor/dense_host_tensor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::status::StatusIs;
class GraphExecutorForTestingCostAnalysis : public GraphExecutor {
public:
int num_recompilations() {
tensorflow::mutex_lock lock(num_recompilations_mu_);
return num_recompilations_;
}
void AdvanceTime(absl::Duration duration) {
simulated_duration_ = simulated_duration_ + duration;
}
};
class GraphExecutorTest : public ::testing::TestWithParam<bool> {};
tensorflow::Status GetSimpleGraphDef(GraphDef& graph_def) {
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
return scope.ToGraphDef(&graph_def);
}
std::unique_ptr<mlrt::KernelRegistry> GetKernelRegistry() {
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tfrt::cpu::RegisterMlrtMathKernels(kernel_registry.get());
tfrt::cpu::RegisterMlrtFallbackCompatKernels(kernel_registry.get());
return kernel_registry;
}
TEST_P(GraphExecutorTest, Vanilla) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisOptionsOverrideToOnce) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_online_cost_analysis = true;
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_EQ(graph_executor->num_recompilations(), 0);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisEveryTime) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), i + 1);
}
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisDisabled) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 0);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisPeriodic) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::Minutes(10);
options.cost_analysis_options.updates_per_interval = 5;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
}
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 3);
}
REGISTER_OP("TestCancel")
.Input("x: T")
.Output("z: T")
.Attr("T: {int32}")
.SetShapeFn(::tensorflow::shape_inference::UnchangedShape);
class TestCancelKernel : public OpKernel {
public:
explicit TestCancelKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
auto status = absl::CancelledError();
ctx->cancellation_manager()->StartCancelWithStatus(status);
ctx->SetStatus(status);
}
};
REGISTER_KERNEL_BUILDER(Name("TestCancel").Device(DEVICE_CPU),
TestCancelKernel);
REGISTER_OP("TestIsCancelled").Output("z: T").Attr("T: {bool}").SetIsStateful();
class TestIsCancelledKernel : public OpKernel {
public:
explicit TestIsCancelledKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
ctx->set_output(
0, tensorflow::Tensor(ctx->cancellation_manager()->IsCancelled()));
}
};
REGISTER_KERNEL_BUILDER(Name("TestIsCancelled").Device(DEVICE_CPU),
TestIsCancelledKernel);
TEST_P(GraphExecutorTest, Cancellation) {
GraphDef graph_def;
tensorflow::GraphDefBuilder builder(
tensorflow::GraphDefBuilder::kFailImmediately);
const tensorflow::TensorShape tensor_shape({10, 9});
tensorflow::Node* input = tensorflow::ops::SourceOp(
"Placeholder", builder.opts()
.WithName("input")
.WithAttr("dtype", tensorflow::DT_INT32)
.WithAttr("shape", tensor_shape));
tensorflow::ops::SourceOp("TestIsCancelled",
builder.opts()
.WithName("is_cancelled")
.WithAttr("T", tensorflow::DT_BOOL));
tensorflow::ops::UnaryOp("TestCancel", input,
builder.opts()
.WithName("test_cancel")
.WithAttr("T", tensorflow::DT_INT32));
TF_ASSERT_OK(builder.ToGraphDef(&graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
{
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_THAT(graph_executor->Run({}, inputs,
{"test_cancel:0"},
{}, &outputs),
StatusIs(absl::StatusCode::kCancelled));
}
{
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, {},
{"is_cancelled:0"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<bool>(outputs[0]),
::testing::ElementsAreArray({false}));
}
}
INSTANTIATE_TEST_SUITE_P(GraphExecutorTestSuite, GraphExecutorTest,
::testing::Bool());
TEST_F(GraphExecutorTest, Extend) {
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
auto session_options = CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
session_options, graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(graph_executor->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, DisableCompilation) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
GraphExecutor::RunOptions run_options;
run_options.disable_compilation = true;
auto status = graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("GraphExecutor: compilation is disabled in "
"execution but the compiled graph is not found"));
run_options.disable_compilation = false;
TF_ASSERT_OK(graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, SyncExecute) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.compile_options.compile_to_sync_tfrt_dialect = true;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<mlrt::Value> inputs;
tfrt::DenseHostTensor dht =
tfrt::CreateTensorFromValues<int32_t>({1, 3}, {1, 1, 1});
inputs.emplace_back(std::move(dht));
std::vector<mlrt::Value> results;
results.resize(1);
TF_ASSERT_OK(graph_executor->RunWithSyncInterpreter(
"test_graph", absl::Span<mlrt::Value>(inputs),
{"input"}, {DT_INT32},
{"rank"},
{}, absl::Span<mlrt::Value>(results)));
tfrt::DenseHostTensor expected =
tfrt::CreateTensorFromValues<int32_t>({}, {2});
EXPECT_EQ(expected, results[0].Get<tfrt::DenseHostTensor>());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/graph_executor/graph_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/graph_executor/graph_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dba42569-ea24-4bc0-b340-9f383dc1dacc | cpp | tensorflow/tensorflow | config | tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/config_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include <utility>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
namespace {
void PopulateDefaultCalibrationOptions(QuantizationConfig& quant_config) {
if (!quant_config.has_calibration_options() ||
quant_config.calibration_options().calibration_method() ==
CalibrationOptions::CALIBRATION_METHOD_UNSPECIFIED) {
quant_config.mutable_calibration_options()->set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_MIN_MAX);
}
switch (quant_config.calibration_options().calibration_method()) {
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC:
if (quant_config.calibration_options()
.calibration_parameters()
.num_bins() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_num_bins(512);
}
if (quant_config.calibration_options().calibration_method() ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE) {
if (quant_config.calibration_options()
.calibration_parameters()
.min_percentile() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_min_percentile(0.001);
}
if (quant_config.calibration_options()
.calibration_parameters()
.max_percentile() == 0) {
quant_config.mutable_calibration_options()
->mutable_calibration_parameters()
->set_max_percentile(99.999);
}
}
break;
default:
break;
}
}
QuantizationSpec GetDefaultStaticRangePtqSpec(StaticRangePtqPreset preset) {
QuantizationSpec spec{};
spec.mutable_matcher()->mutable_function_name()->set_regex(
preset.enable_full_int_quantization() ? ".*"
: "^.*(dot_general|gather).*");
spec.mutable_method()->mutable_static_range_ptq();
return spec;
}
QuantizationSpec GetDefaultWeightOnlyPtqSpec() {
QuantizationSpec spec{};
spec.mutable_matcher()->mutable_function_name()->set_regex(
"^.*(conv|dot_general).*");
WeightOnlyPtq& weight_only_ptq_spec =
*spec.mutable_method()->mutable_weight_only_ptq();
if (auto [iter, inserted] =
weight_only_ptq_spec.mutable_input_quantized_types()->try_emplace(1);
inserted) {
iter->second.mutable_dimension_specs();
}
return spec;
}
QuantizationSpec GetPtqSpecForConvolution(Method::MethodCase method_case) {
QuantizationSpec spec{};
if (method_case != Method::kStaticRangePtq) {
return spec;
}
spec.mutable_matcher()->mutable_function_name()->set_regex(
"composite_conv.*");
QuantizedType conv_weight_quantized_type{};
conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3);
StaticRangePtq& static_range_ptq_spec =
*spec.mutable_method()->mutable_static_range_ptq();
static_range_ptq_spec.mutable_input_quantized_types()->try_emplace(
1, std::move(conv_weight_quantized_type));
return spec;
};
void ExpandStaticRangePtqPreset(const StaticRangePtqPreset& preset,
QuantizationConfig& config) {
if (config.calibration_options().representative_datasets().empty()) {
auto preset_datasets = preset.representative_datasets();
config.mutable_calibration_options()
->mutable_representative_datasets()
->Add(preset_datasets.begin(), preset_datasets.end());
}
QuantizationSpecs new_specs{};
*new_specs.add_specs() =
GetDefaultStaticRangePtqSpec(config.static_range_ptq_preset());
*new_specs.add_specs() =
GetPtqSpecForConvolution(Method::MethodCase::kStaticRangePtq);
const QuantizationSpecs& previous_specs = config.specs();
new_specs.mutable_specs()->Add(previous_specs.specs().begin(),
previous_specs.specs().end());
config.clear_static_range_ptq_preset();
config.mutable_specs()->Swap(&new_specs);
}
void ExpandWeightOnlyPtqPreset(QuantizationConfig& config) {
QuantizationSpecs new_specs{};
*new_specs.add_specs() = GetDefaultWeightOnlyPtqSpec();
const QuantizationSpecs& previous_specs = config.specs();
new_specs.mutable_specs()->Add(previous_specs.specs().begin(),
previous_specs.specs().end());
config.clear_weight_only_ptq_preset();
config.mutable_specs()->Swap(&new_specs);
}
}
QuantizationConfig ExpandPresets(const QuantizationConfig& config) {
QuantizationConfig new_config = config;
switch (config.preset_case()) {
case QuantizationConfig::kStaticRangePtqPreset:
ExpandStaticRangePtqPreset(config.static_range_ptq_preset(), new_config);
break;
case QuantizationConfig::kWeightOnlyPtqPreset:
ExpandWeightOnlyPtqPreset(new_config);
break;
default:
break;
}
return new_config;
}
bool HasQuantizationMethod(const QuantizationSpecs& specs,
Method::MethodCase method_case) {
for (const auto& spec : specs.specs()) {
if (spec.method().method_case() == method_case) {
return true;
}
}
return false;
}
QuantizationConfig PopulateDefaults(
const QuantizationConfig& user_provided_config) {
QuantizationConfig config = user_provided_config;
PopulateDefaultCalibrationOptions(config);
PipelineConfig& pipeline_config = *config.mutable_pipeline_config();
if (!pipeline_config.has_unpack_quantized_types()) {
pipeline_config.set_unpack_quantized_types(true);
}
return config;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
namespace {
using ::testing::Eq;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::testing::Truly;
TEST(PopulateDefaultsTest, PopulateDefaultsForEmptyConfig) {
QuantizationConfig config{};
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_TRUE(new_config.pipeline_config().unpack_quantized_types());
}
TEST(PopulateDefaultsTest, PopulateDefaultsForConfigWithUnpackQuantizedTypes) {
QuantizationConfig config{};
config.mutable_pipeline_config()->set_unpack_quantized_types(false);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_FALSE(new_config.pipeline_config().unpack_quantized_types());
}
TEST(PopulateDefaultsTest, DefaultCalibrationOptionsPopulated) {
QuantizationConfig config{};
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_MIN_MAX));
}
TEST(PopulateDefaultsTest,
DefaultCalibrationOptionsPopulatedForUnspecifiedMethod) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_UNSPECIFIED);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_MIN_MAX));
}
TEST(PopulateDefaultsTest, ExplicitCalibrationOptionsNotOverridden) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX);
calibration_options.mutable_calibration_parameters()->set_num_bins(512);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
}
TEST(PopulateDefaultsTest, DefaultNumbersPopulatedForPartOfCalibrationOptions) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE);
calibration_options.mutable_calibration_parameters()->set_num_bins(512);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.min_percentile(),
Eq(0.001f));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.max_percentile(),
Eq(99.999f));
}
TEST(PopulateDefaultsTest,
DefaultNumbersPopulatedForCalibrationOptionsOfHistogramMseBruteforce) {
QuantizationConfig config{};
CalibrationOptions& calibration_options =
*config.mutable_calibration_options();
calibration_options.set_calibration_method(
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE);
const QuantizationConfig new_config = PopulateDefaults(config);
EXPECT_THAT(
new_config.calibration_options().calibration_method(),
Eq(CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE));
EXPECT_THAT(
new_config.calibration_options().calibration_parameters().num_bins(),
Eq(512));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.min_percentile(),
Eq(0.0f));
EXPECT_THAT(new_config.calibration_options()
.calibration_parameters()
.max_percentile(),
Eq(0.0f));
}
TEST(ExpandPresetsTest, ExpandUnspecifiedPreset) {
QuantizationConfig config{};
const QuantizationConfig new_config = ExpandPresets(config);
EXPECT_FALSE(new_config.has_specs());
EXPECT_FALSE(new_config.has_calibration_options());
EXPECT_FALSE(new_config.has_pipeline_config());
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqEnableFullIntquantization) {
QuantizationConfig config{};
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
config.mutable_static_range_ptq_preset()->set_enable_full_int_quantization(
true);
preset_dataset_config.mutable_tf_record()->set_path("/test/path");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(2));
const QuantizationSpec& default_spec = new_config.specs().specs(0);
EXPECT_THAT(default_spec.matcher().function_name().regex(), StrEq(".*"));
EXPECT_TRUE(default_spec.method().has_static_range_ptq());
const QuantizationSpec& conv_spec = new_config.specs().specs(1);
EXPECT_THAT(conv_spec.matcher().function_name().regex(),
StrEq("composite_conv.*"));
ASSERT_TRUE(conv_spec.method().has_static_range_ptq());
const StaticRangePtq& srq_spec = conv_spec.method().static_range_ptq();
ASSERT_THAT(srq_spec.input_quantized_types(), SizeIs(1));
ASSERT_TRUE(srq_spec.input_quantized_types().contains(1));
ASSERT_TRUE(srq_spec.input_quantized_types().at(1).has_dimension_specs());
const QuantizedDimension& dimension_specs =
srq_spec.input_quantized_types().at(1).dimension_specs();
ASSERT_TRUE(dimension_specs.has_dimension());
EXPECT_THAT(dimension_specs.dimension(), Eq(3));
ASSERT_THAT(new_config.calibration_options().representative_datasets(),
SizeIs(1));
EXPECT_THAT(new_config.calibration_options()
.representative_datasets(0)
.tf_record()
.path(),
StrEq("/test/path"));
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqPresetDefault) {
QuantizationConfig config{};
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
preset_dataset_config.mutable_tf_record()->set_path("/test/path");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(2));
const QuantizationSpec& spec = new_config.specs().specs(0);
EXPECT_THAT(spec.matcher().function_name().regex(),
StrEq("^.*(dot_general|gather).*"));
EXPECT_TRUE(spec.method().has_static_range_ptq());
}
TEST(ExpandPresetsTest,
ExpandStaticRangePtqPresetWithTopLevelRepresentativeDataset) {
QuantizationConfig config{};
RepresentativeDatasetConfig& top_level_dataset_config =
*config.mutable_calibration_options()->add_representative_datasets();
top_level_dataset_config.mutable_tf_record()->set_path("/test/path/1");
RepresentativeDatasetConfig& preset_dataset_config =
*config.mutable_static_range_ptq_preset()->add_representative_datasets();
preset_dataset_config.mutable_tf_record()->set_path("/test/path/2");
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.calibration_options().representative_datasets(),
SizeIs(1));
EXPECT_THAT(new_config.calibration_options()
.representative_datasets(0)
.tf_record()
.path(),
StrEq("/test/path/1"));
}
TEST(ExpandPresetsTest, ExpandStaticRangePtqPresetThenAppendExplicitSpecs) {
QuantizationConfig config{};
config.mutable_static_range_ptq_preset()->set_enable_full_int_quantization(
true);
QuantizationSpec& user_provided_spec = *config.mutable_specs()->add_specs();
user_provided_spec.mutable_matcher()->mutable_function_name()->set_regex(
"composite_dot_general_fn_1");
user_provided_spec.mutable_method()->mutable_no_quantization();
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(3));
const QuantizationSpec& first_spec = new_config.specs().specs(0);
EXPECT_THAT(first_spec.matcher().function_name().regex(), StrEq(".*"));
EXPECT_TRUE(first_spec.method().has_static_range_ptq());
const QuantizationSpec& second_spec = new_config.specs().specs(1);
EXPECT_THAT(second_spec.matcher().function_name().regex(),
StrEq("composite_conv.*"));
EXPECT_TRUE(second_spec.method().has_static_range_ptq());
const QuantizationSpec& third_spec = new_config.specs().specs(2);
EXPECT_THAT(third_spec.matcher().function_name().regex(),
StrEq("composite_dot_general_fn_1"));
EXPECT_TRUE(third_spec.method().has_no_quantization());
}
TEST(ExpandPresetsTest, ExpandWeightOnlyPtqPresetDefault) {
QuantizationConfig config{};
*config.mutable_weight_only_ptq_preset() = WeightOnlyPtqPreset();
const QuantizationConfig new_config = ExpandPresets(config);
ASSERT_THAT(new_config.specs().specs(), SizeIs(1));
const QuantizationSpec& spec = new_config.specs().specs(0);
EXPECT_THAT(spec.matcher().function_name().regex(),
StrEq("^.*(conv|dot_general).*"));
EXPECT_TRUE(spec.method().has_weight_only_ptq());
const WeightOnlyPtq& weight_only_ptq_spec = spec.method().weight_only_ptq();
EXPECT_THAT(weight_only_ptq_spec.input_quantized_types(),
UnorderedElementsAre(Pair(
1, Truly([](const auto& quantized_type) {
return quantized_type.has_dimension_specs() &&
!quantized_type.dimension_specs().has_dimension();
}))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34fb00dd-c8fd-49ca-aa3d-7b157bea692d | cpp | tensorflow/tensorflow | tfrt_session | tensorflow/core/tfrt/tfrt_session/tfrt_session.cc | tensorflow/core/tfrt/tfrt_session/tfrt_session_test.cc | #include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/die_if_null.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "Eigen/ThreadPool"
#include "llvm/ADT/STLExtras.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/local_session_selection.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/session_factory.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/batch_kernel.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace {
class ThreadPoolInterfaceWrapper : public thread::ThreadPoolInterface {
public:
explicit ThreadPoolInterfaceWrapper(Eigen::ThreadPoolInterface* thread_pool)
: thread_pool_{thread_pool} {
DCHECK(thread_pool);
}
void Schedule(std::function<void()> fn) override {
return thread_pool().Schedule(std::move(fn));
}
void ScheduleWithHint(std::function<void()> fn, int start, int end) override {
return thread_pool().ScheduleWithHint(std::move(fn), start, end);
}
void Cancel() override { thread_pool().Cancel(); }
int NumThreads() const override { return thread_pool().NumThreads(); }
int CurrentThreadId() const override {
return thread_pool().CurrentThreadId();
}
private:
Eigen::ThreadPoolInterface& thread_pool() const {
DCHECK(thread_pool_);
return *thread_pool_;
}
Eigen::ThreadPoolInterface* thread_pool_ = nullptr;
};
class TfrtSessionInterOpThreadPools {
public:
TfrtSessionInterOpThreadPools(int size, bool run_in_caller_thread)
: thread_pools_(size), run_in_caller_thread_(run_in_caller_thread) {}
void SetThreadPool(int index, ThreadPoolInterfaceWrapper* thread_pool) {
thread_pools_.at(index) = thread_pool;
}
absl::StatusOr<ThreadPoolInterfaceWrapper*> GetThreadPool(int index) {
if (index < 0 || index >= thread_pools_.size())
return errors::InvalidArgument("Invalid thread pool index ", index);
return thread_pools_[index];
}
bool run_in_caller_thread() const { return run_in_caller_thread_; }
private:
std::vector<ThreadPoolInterfaceWrapper*> thread_pools_;
bool run_in_caller_thread_;
};
class TfrtSession : public tensorflow::Session {
public:
explicit TfrtSession(const SessionOptions& options,
tensorflow::tfrt_stub::Runtime* runtime,
TfrtDeviceInfraTarget device_target,
bool tpu_use_tpu_runner, bool use_gpu,
TfrtSessionInterOpThreadPools inter_op_thread_pools,
bool enable_mlrt,
tensorflow::BackendCompiler* backend_compiler,
std::unique_ptr<StaticDeviceMgr> device_manager)
: runtime_{runtime},
device_target_{device_target},
tpu_use_tpu_runner_{tpu_use_tpu_runner},
use_gpu_{use_gpu},
inter_op_thread_pools_{std::move(inter_op_thread_pools)},
enable_mlrt_(enable_mlrt),
options_{options},
backend_compiler_(backend_compiler),
device_manager_(std::move(device_manager)) {}
Status Create(const GraphDef& graph) override {
return Create(GraphDef(graph));
}
Status Create(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return CreateLocked(std::move(graph));
}
Status CreateLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (graph.node_size() == 0) {
LOG(ERROR) << "Ignoring empty graph.";
return absl::OkStatus();
}
if (session_state_ == SessionState::kCreated) {
return errors::AlreadyExists(
"A Graph has already been created for this session.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
auto options = GetGraphExecutionOptions();
tensorflow::tfrt_stub::UpdateTpuTargetByBridgeCompatibility(options, graph);
auto* nodes = graph.mutable_node();
for (auto it = nodes->begin(), end = nodes->end(); it != end; ++it) {
if (it->name() == "ConfigureDistributedTPU") {
nodes->erase(it);
break;
}
}
auto session_options =
tensorflow::tfrt_stub::CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_optimize_for_static_graph(
options_.config.experimental().optimize_for_static_graph());
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(
options_.config.experimental().disable_optimize_for_static_graph());
LOG_FIRST_N(INFO, 10) << "SessionOptions: "
<< session_options.config.DebugString();
const auto& fdef_lib = graph.library();
TF_ASSIGN_OR_RETURN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::CreateWithDeviceMgr(
session_options, fdef_lib, device_manager_.get()));
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tensorflow::tf_mlrt::RegisterTfMlrtBatchKernels(*kernel_registry);
auto resource_context = std::make_unique<tfrt::ResourceContext>();
tfrt_stub::ModelRuntimeContext model_context(
&options, "unknown_export_dir", resource_context.get());
model_context.set_graph_def(&graph);
model_context.set_device_mgr(&fallback_state->device_manager());
model_context.set_is_local_session(
!options_.config.experimental().enable_multi_host() &&
!options_.config.experimental().tfrt_use_ifrt());
TF_RETURN_IF_ERROR(options.runtime->CreateRuntimeResources(model_context));
GraphOptimizationPassOptions optimization_options;
optimization_options.session_options = &options_;
FunctionLibraryDefinition flib_def = fallback_state->func_lib_def();
optimization_options.flib_def = &flib_def;
std::unordered_map<string, std::unique_ptr<Graph>> partition_graphs;
auto initial_graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
tensorflow::GraphConstructorOptions opts;
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(
tensorflow::ConvertGraphDefToGraph(opts, graph, initial_graph.get()));
partition_graphs["graph"] = std::move(initial_graph);
optimization_options.partition_graphs = &partition_graphs;
OptimizationPassRegistry::Global()->LogAllGroupings(1);
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
LOG_FIRST_N(INFO, 10) << "GraphExecutionOptions: " << options;
TF_ASSIGN_OR_RETURN(
graph_executor_,
tensorflow::tfrt_stub::GraphExecutor::Create(
options, std::move(fallback_state), std::move(resource_context),
std::move(graph), std::move(kernel_registry)));
session_state_ = SessionState::kCreated;
return absl::OkStatus();
}
Status Extend(const GraphDef& graph) override {
return Extend(GraphDef(graph));
}
Status Extend(GraphDef&& graph) override {
absl::MutexLock lock(&session_state_lock_);
return ExtendLocked(std::move(graph));
}
Status ExtendLocked(GraphDef graph)
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kCreated) {
return graph_executor_->Extend(graph);
}
return CreateLocked(std::move(graph));
}
Status RunInternal(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs,
const thread::ThreadPoolOptions& thread_pool_options) {
{
absl::MutexLock lock(&session_state_lock_);
if (session_state_ == SessionState::kInitialized) {
return errors::Unavailable("Session not created yet.");
}
TF_RETURN_IF_ERROR(CheckNotClosedLocked());
}
DCHECK(outputs || output_tensor_names.empty()) << "No outputs in Run()";
tensorflow::tfrt_stub::GraphExecutionRunOptions
graph_execution_run_options{};
if (run_options.timeout_in_ms() > 0) {
graph_execution_run_options.deadline = absl::ToChronoTime(
absl::Now() + absl::Milliseconds(run_options.timeout_in_ms()));
}
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> work_queue;
auto* const intra_op_thread_pool = thread_pool_options.intra_op_threadpool;
if (inter_op_thread_pools_.run_in_caller_thread() ||
run_options.inter_op_thread_pool() == -1) {
work_queue = tfrt_stub::WrapDefaultWorkQueue(
tfrt::CreateSingleThreadedWorkQueue(), intra_op_thread_pool);
} else if (thread_pool_options.inter_op_threadpool != nullptr) {
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool,
thread_pool_options.inter_op_threadpool);
} else {
TF_ASSIGN_OR_RETURN(auto* thread_pool,
inter_op_thread_pools_.GetThreadPool(
run_options.inter_op_thread_pool()));
work_queue =
std::make_unique<tensorflow::tfrt_stub::TfThreadPoolWorkQueue>(
tfrt::GetUniqueInt(), intra_op_thread_pool, thread_pool);
}
graph_execution_run_options.work_queue = work_queue.get();
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(graph_executor_->Run(
graph_execution_run_options, inputs, output_tensor_names,
target_node_names, &output_tensors));
if (outputs) {
DCHECK_EQ(output_tensors.size(), output_tensor_names.size());
outputs->swap(output_tensors);
} else {
DCHECK(output_tensor_names.empty()) << "No outputs in Run()";
}
return absl::OkStatus();
}
Status Run(const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs) override {
return RunInternal(RunOptions{}, inputs, output_tensor_names,
target_node_names, outputs, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_node_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata) override {
return Run(run_options, inputs, output_tensor_names, target_node_names,
outputs, run_metadata, {});
}
Status Run(const RunOptions& run_options,
const std::vector<std::pair<std::string, Tensor>>& inputs,
const std::vector<std::string>& output_tensor_names,
const std::vector<std::string>& target_tensor_names,
std::vector<Tensor>* outputs, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
return RunInternal(run_options, inputs, output_tensor_names,
target_tensor_names, outputs, thread_pool_options);
}
Status MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) override {
absl::MutexLock lock(&callables_lock_);
*out_handle = next_callable_handle_++;
assert(callables_.find(*out_handle) == callables_.end());
callables_[*out_handle] = {callable_options};
return absl::OkStatus();
}
Status RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) override {
return RunCallable(handle, feed_tensors, fetch_tensors, run_metadata, {});
}
Status RunCallable(
CallableHandle handle, const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata,
const thread::ThreadPoolOptions& thread_pool_options) override {
Callable callable;
{
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callable = it->second;
}
if (callable.callable_options.feed_size() != feed_tensors.size())
return errors::InvalidArgument("Invalid number of feed tensors");
std::vector<std::pair<std::string, Tensor>> inputs;
for (const auto& it :
llvm::zip(callable.callable_options.feed(), feed_tensors)) {
inputs.emplace_back(std::make_pair(std::get<0>(it), std::get<1>(it)));
}
std::vector<std::string> output_tensor_names;
for (const auto& tensor_name : callable.callable_options.fetch()) {
output_tensor_names.emplace_back(tensor_name);
}
std::vector<std::string> target_node_names;
for (const auto& node_name : callable.callable_options.target()) {
target_node_names.emplace_back(node_name);
}
return Run(inputs, output_tensor_names, target_node_names, fetch_tensors);
}
Status ReleaseCallable(CallableHandle handle) override {
absl::MutexLock lock(&callables_lock_);
auto it = callables_.find(handle);
if (it == callables_.end())
return errors::InvalidArgument("No such callable handle: ", handle);
callables_.erase(it);
return absl::OkStatus();
}
Status Close() override {
absl::MutexLock lock(&session_state_lock_);
session_state_ = SessionState::kClosed;
return absl::OkStatus();
}
Status ListDevices(std::vector<DeviceAttributes>* response) override {
return errors::Unimplemented("TfrtSession::ListDevices is Unimplemented.");
}
Status LocalDeviceManager(const DeviceMgr** output) override {
*output = device_manager_.get();
return absl::OkStatus();
}
Status Finalize() override { return absl::OkStatus(); }
private:
tfrt::HostContext* GetHostContext() {
return runtime_->core_runtime()->GetHostContext();
}
tensorflow::tfrt_stub::GraphExecutionOptions GetGraphExecutionOptions()
const {
::tensorflow::tfrt_stub::GraphExecutionOptions options(runtime_);
auto& compile_options = options.compile_options;
compile_options.variable_device =
DeviceNameUtils::FullName("localhost", 0,
0, "CPU", 0);
compile_options.enable_grappler = true;
compile_options.device_target = device_target_;
compile_options.tpu_fuse_ops = tpu_use_tpu_runner_;
compile_options.hoist_invariant_ops = true;
compile_options.sink_in_invariant_ops = false;
compile_options.cost_threshold = 1024;
if (use_gpu_) {
options.enable_tfrt_gpu = true;
options.enable_grappler_function_optimizer = true;
}
compile_options.use_tpu_host_allocator_for_inputs = tpu_use_tpu_runner_;
options.compile_options.backend_compiler = backend_compiler_;
options.model_metadata = options_.config.experimental().session_metadata();
options.enable_mlrt = enable_mlrt_;
return options;
}
Status CheckNotClosedLocked() const
TF_EXCLUSIVE_LOCKS_REQUIRED(session_state_lock_) {
if (session_state_ == SessionState::kClosed) {
return errors::Cancelled("Session has been closed.");
}
return absl::OkStatus();
}
struct Callable {
CallableOptions callable_options;
};
enum class SessionState {
kInitialized,
kCreated,
kClosed,
};
mutable absl::Mutex session_state_lock_;
SessionState session_state_ TF_GUARDED_BY(session_state_lock_) =
SessionState::kInitialized;
std::unique_ptr<::tensorflow::tfrt_stub::GraphExecutor> graph_executor_;
tensorflow::tfrt_stub::Runtime* runtime_ = nullptr;
const TfrtDeviceInfraTarget device_target_;
const bool tpu_use_tpu_runner_;
const bool use_gpu_;
TfrtSessionInterOpThreadPools inter_op_thread_pools_;
mutable absl::Mutex callables_lock_;
CallableHandle next_callable_handle_ TF_GUARDED_BY(callables_lock_) = 0;
absl::flat_hash_map<CallableHandle, Callable> callables_
TF_GUARDED_BY(callables_lock_);
bool enable_mlrt_ = false;
SessionOptions options_ = SessionOptions();
tensorflow::BackendCompiler* backend_compiler_ = nullptr;
std::unique_ptr<StaticDeviceMgr> device_manager_;
};
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>
CreateRunHandlerWorkQueue(const TfrtThreadpoolOptions& session_options) {
int num_complementary_threads =
std::max(1, session_options.num_main_threads / 2);
tfrt::tf::RunHandlerThreadWorkQueue::Options options;
options.num_main_threads =
session_options.num_main_threads;
options.num_complementary_threads = num_complementary_threads;
options.init_timeout_ms =
absl::ToInt64Milliseconds(session_options.init_timeout);
options.max_concurrent_handler =
session_options.max_concurrent_handler;
options.num_sub_thread_pool =
session_options.num_sub_thread_pool;
std::vector<int> num_threads;
const int num_threads_per_pool =
options.num_main_threads / options.num_sub_thread_pool;
num_threads.resize(options.num_sub_thread_pool - 1, num_threads_per_pool);
num_threads.push_back(options.num_main_threads -
(options.num_sub_thread_pool - 1) *
num_threads_per_pool);
options.num_threads_in_sub_thread_pool = num_threads;
options.sub_thread_request_percentage = {1.0};
options.use_adaptive_waiting_time = true;
LOG_FIRST_N(INFO, 10) << "RunHandlerThreadWorkQueue Options: " << options;
return std::make_unique<tfrt::tf::RunHandlerThreadWorkQueue>(options);
}
}
class TfrtSessionFactory::ThreadPoolManager {
public:
absl::StatusOr<TfrtSessionInterOpThreadPools> UpdateAndGetInterOpThreadPools(
const SessionOptions& options) {
if (options.config.inter_op_parallelism_threads() > 0) {
LOG(WARNING) << "TFRT session does not support positive "
"inter_op_parallelism_threads for now";
}
if (options.config.use_per_session_threads()) {
return errors::InvalidArgument(
"TFRT session does not yet support use_per_session_threads()");
}
auto session_inter_op_thread_pool_size =
options.config.session_inter_op_thread_pool_size();
if (session_inter_op_thread_pool_size > 0) {
TfrtSessionInterOpThreadPools inter_op_thread_pools{
session_inter_op_thread_pool_size, false};
for (const auto& it :
llvm::enumerate(options.config.session_inter_op_thread_pool())) {
const ThreadPoolOptionProto& pool_options = it.value();
auto pool_index = it.index();
auto num_threads = pool_options.num_threads();
if (num_threads != 0) {
TF_ASSIGN_OR_RETURN(
auto* thread_pool,
GetOrCreateThreadPool(options.env, pool_options, pool_index));
inter_op_thread_pools.SetThreadPool(pool_index, thread_pool);
} else {
inter_op_thread_pools.SetThreadPool(pool_index,
GlobalThreadPool(options));
}
}
return inter_op_thread_pools;
} else if (options.config.inter_op_parallelism_threads() < 0) {
return TfrtSessionInterOpThreadPools{0,
true};
} else if (session_inter_op_thread_pool_size == 0) {
TfrtSessionInterOpThreadPools session_thread_pool_options{
1, false};
session_thread_pool_options.SetThreadPool(0, GlobalThreadPool(options));
return session_thread_pool_options;
} else {
return errors::InvalidArgument(
"session_inter_op_thread_pool_size must be >= 0");
}
}
private:
class ThreadPoolWithNumThreads {
public:
ThreadPoolWithNumThreads(int num_thread,
std::unique_ptr<thread::ThreadPool> thread_pool)
: num_threads_(num_thread),
thread_pool_(std::move(thread_pool)),
thread_pool_interface_wrapper_(
ABSL_DIE_IF_NULL(thread_pool_)->AsEigenThreadPool()) {}
int num_threads() const { return num_threads_; }
ThreadPoolInterfaceWrapper* thread_pool_interface_wrapper() {
return &thread_pool_interface_wrapper_;
}
private:
int num_threads_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
ThreadPoolInterfaceWrapper thread_pool_interface_wrapper_;
};
ThreadPoolInterfaceWrapper* GlobalThreadPool(const SessionOptions& options) {
static thread::ThreadPool* const thread_pool =
NewThreadPoolFromSessionOptions(options);
static auto* const wrapper =
new ThreadPoolInterfaceWrapper{thread_pool->AsEigenThreadPool()};
return wrapper;
}
absl::StatusOr<ThreadPoolInterfaceWrapper*> GetOrCreateThreadPool(
Env* env, const ThreadPoolOptionProto& pool_options, int pool_index) {
const int32_t num_threads = pool_options.num_threads();
CHECK_GT(num_threads, 0);
const std::string& name = pool_options.global_name();
if (name.empty()) {
return errors::InvalidArgument(
"TFRT session does not yet support session local thread pool");
}
absl::MutexLock lock(&mutex_);
auto it = named_thread_pools_.find(name);
if (it != named_thread_pools_.end()) {
if (it->second->num_threads() != num_threads) {
return errors::InvalidArgument(
"TfrtSession thread pool ", name,
" configured previously with num_threads=",
it->second->num_threads(),
"; cannot re-configure with num_threads=", num_threads);
}
return it->second->thread_pool_interface_wrapper();
}
auto thread_pool = std::make_unique<thread::ThreadPool>(
env, ThreadOptions(), absl::StrCat("TfrtSessionInter", pool_index),
num_threads, false,
nullptr);
auto ret = named_thread_pools_.emplace(
name, std::make_unique<ThreadPoolWithNumThreads>(
num_threads, std::move(thread_pool)));
CHECK(ret.second);
return ret.first->second->thread_pool_interface_wrapper();
}
mutable absl::Mutex mutex_;
absl::flat_hash_map<std::string, std::unique_ptr<ThreadPoolWithNumThreads>>
named_thread_pools_ ABSL_GUARDED_BY(mutex_);
};
TfrtSessionFactory::TfrtSessionFactory()
: thread_pool_manager_(std::make_unique<ThreadPoolManager>()) {}
class InitializerRegistry {
public:
static InitializerRegistry& Get() {
static auto* reg = new InitializerRegistry();
return *reg;
}
void Register(TfrtSessionFactory::RuntimeInitializer initializer) {
DCHECK(initializer_ == nullptr);
initializer_ = initializer;
}
absl::Status RunInitializer(tfrt_stub::Runtime* runtime) {
LOG(INFO) << "Running Initializer within TfrtSessionFactory.";
TF_RETURN_IF_ERROR(initializer_ ? initializer_(runtime) : absl::OkStatus());
return absl::OkStatus();
}
private:
TfrtSessionFactory::RuntimeInitializer initializer_;
};
void TfrtSessionFactory::RegisterInitializer(RuntimeInitializer initializer) {
InitializerRegistry::Get().Register(std::move(initializer));
}
Status TfrtSessionFactory::InitializeLocked(const TfrtSessionOptions& options) {
mutex_.AssertHeld();
if (options.use_tpu) {
DCHECK(!options.backend_compiler);
DCHECK(!options.use_gpu);
device_target_ = TfrtDeviceInfraTarget::kTpurt;
tpu_use_tpu_runner_ = true;
} else if (options.use_gpu) {
DCHECK(!options.backend_compiler);
device_target_ = TfrtDeviceInfraTarget::kGpu;
use_gpu_ = true;
} else if (options.backend_compiler) {
backend_compiler_ = options.backend_compiler;
}
LOG(INFO) << "Start initializing TfrtSession";
if (options.runtime != nullptr) {
runtime_ = options.runtime;
} else if (runtime_ == nullptr) {
owned_runtime_ = tensorflow::tfrt_stub::Runtime::Create(
CreateRunHandlerWorkQueue(options.threadpool_options));
runtime_ = owned_runtime_.get();
}
enable_mlrt_ = options.enable_mlrt;
return absl::OkStatus();
}
bool TfrtSessionFactory::AcceptsOptions(const SessionOptions& options) {
if (options.target == "tfrt_session") return true;
if (options.target.empty()) {
return options.config.experimental().use_tfrt() ||
GetDefaultLocalSessionImpl() == LocalSessionImpl::kTfrtSession;
}
return false;
}
Status TfrtSessionFactory::NewSession(const SessionOptions& options,
Session** out_session)
TF_LOCKS_EXCLUDED(mutex_) {
if (options.config.intra_op_parallelism_threads() != 0) {
LOG(WARNING) << "TFRT session ignores intra_op_parallelism_threads. "
"Intra-op thread "
"pool can only be configured by `Run()`";
}
*out_session = nullptr;
absl::MutexLock lock(&mutex_);
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
device_manager_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
if (!IsInitialized()) {
TF_RETURN_IF_ERROR(InitializeLocked({}));
TF_RETURN_IF_ERROR(InitializerRegistry::Get().RunInitializer(runtime_));
}
TF_ASSIGN_OR_RETURN(
auto inter_op_thread_pools,
thread_pool_manager_->UpdateAndGetInterOpThreadPools(options));
auto* backend_compiler = (options.config.experimental().enable_multi_host() ||
options.config.experimental().tfrt_use_ifrt())
? backend_compiler_
: nullptr;
*out_session =
new TfrtSession(options, runtime_, device_target_, tpu_use_tpu_runner_,
use_gpu_, std::move(inter_op_thread_pools), enable_mlrt_,
backend_compiler, std::move(device_manager_));
return absl::OkStatus();
}
namespace {
static TfrtSessionFactory* session_factory = nullptr;
}
tfrt_stub::Runtime* TfrtSessionFactory::GetRuntime() {
DCHECK(session_factory != nullptr);
absl::MutexLock lock(&session_factory->mutex_);
return session_factory->runtime_;
}
Status InitializeTfrtSession(const TfrtSessionOptions& options) {
DCHECK(session_factory != nullptr);
absl::MutexLock lock(&session_factory->mutex_);
DCHECK(!session_factory->IsInitialized());
return UpdateTfrtSessionOptionsLocked(options);
}
Status UpdateTfrtSessionOptionsLocked(const TfrtSessionOptions& options) {
DCHECK(session_factory != nullptr);
session_factory->mutex_.AssertHeld();
return session_factory->InitializeLocked(options);
}
static const bool kFactoryRgistration = [] {
session_factory = new TfrtSessionFactory();
LOG(INFO) << "Registering TfrtSession";
SessionFactory::Register("tfrt_session", session_factory);
return true;
}();
} | #include "tensorflow/core/tfrt/tfrt_session/tfrt_session.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool_options.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace {
class TfrtSessionEnvironment : public ::testing::Environment {
public:
void SetUp() override {
TfrtSessionOptions options{
.threadpool_options = tensorflow::TfrtThreadpoolOptions{
.num_main_threads = tensorflow::port::MaxParallelism(),
.init_timeout = absl::Milliseconds(100),
.max_concurrent_handler = 128,
.num_sub_thread_pool = 1}};
TF_ASSERT_OK(InitializeTfrtSession(options));
}
};
class TfrtSessionTest : public ::testing::Test {
protected:
void SetUp() override {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
inputs_.push_back(std::make_pair(
"input1", test::AsTensor<int32_t>({1, 1, 1}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input2", test::AsTensor<int32_t>({2, 2, 2}, TensorShape{1, 3})));
inputs_.push_back(std::make_pair(
"input3", test::AsTensor<int32_t>({3, 3, 3}, TensorShape{1, 3})));
}
std::unique_ptr<Session> session_;
std::vector<std::pair<std::string, Tensor>> inputs_;
std::vector<std::string> output_tensor_names_{"result1", "result21",
"result31"};
std::vector<std::string> target_node_names_{"result22", "result32"};
};
TEST_F(TfrtSessionTest, NoTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
auto* model_metadata =
options.config.mutable_experimental()->mutable_session_metadata();
model_metadata->set_name("toy_v1");
model_metadata->set_version(0);
auto session = absl::WrapUnique(NewSession(options));
ASSERT_TRUE(session != nullptr);
tensorflow::GraphDef graph_def;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
node: {
name: "input"
op: "Placeholder"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
}
node: {
name: "sleep_seconds"
op: "Const"
attr: {
key: "dtype"
value: { type: DT_INT32 }
}
attr: {
key: "value"
value: {
tensor: {
tensor_shape: {}
dtype: DT_INT32
int_val: 2
}
}
}
}
node: {
name: "sleep"
op: "SleepIdentityOp"
input: "sleep_seconds:0"
input: "input:0"
attr: {
key: "T"
value: { type: DT_INT32 }
}
})pb"
,
&graph_def));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session->Run(
RunOptions{},
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata));
ASSERT_EQ(outputs.size(), 1);
test::ExpectEqual(outputs[0], test::AsTensor<int32_t>({1}, TensorShape{1}));
RunOptions run_options;
run_options.set_timeout_in_ms(1);
auto status = session->Run(
run_options,
{{"input", test::AsTensor<int32_t>({1}, TensorShape{1})}},
{"sleep"},
{}, &outputs, &run_metadata);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(), ::testing::HasSubstr("Deadline exceeded"));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyInter) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool inter_op_thread_pool(
"tf_inter",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = &inter_op_thread_pool,
.intra_op_threadpool = nullptr};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ThreadPoolOptions_OnlyIntra) {
std::vector<Tensor> outputs;
RunMetadata run_metadata;
tfrt_stub::TfThreadPool intra_op_thread_pool("tf_intra",
1);
thread::ThreadPoolOptions thread_pool_options{
.inter_op_threadpool = nullptr,
.intra_op_threadpool = &intra_op_thread_pool};
TF_ASSERT_OK(session_->Run(RunOptions{}, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata, thread_pool_options));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, RunInCallerThreadSessionOptions) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_inter_op_parallelism_threads(-1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(
{}, {}, {},
{"init"}, nullptr, &run_metadata));
}
TEST_F(TfrtSessionTest, RunInCallerThreadRunOptions) {
std::vector<Tensor> outputs;
RunOptions run_options;
run_options.set_inter_op_thread_pool(-1);
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(run_options, inputs_, output_tensor_names_,
{}, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, DeviceManager) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_inter_op_parallelism_threads(-1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
const DeviceMgr* device_manager;
TF_ASSERT_OK(session_->LocalDeviceManager(&device_manager));
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
RunMetadata run_metadata;
TF_ASSERT_OK(session_->Run(
{}, {}, {},
{"init"}, nullptr, &run_metadata));
const DeviceMgr* device_manager_final;
TF_ASSERT_OK(session_->LocalDeviceManager(&device_manager_final));
ASSERT_EQ(device_manager, device_manager_final);
}
TEST_F(TfrtSessionTest, IntraOpThreadPoolOptionWarning) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.set_intra_op_parallelism_threads(1);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
}
TEST_F(TfrtSessionTest, Callable) {
CallableOptions callable_options;
std::vector<Tensor> feed_tensors;
for (auto& input : inputs_) {
callable_options.add_feed(input.first);
feed_tensors.emplace_back(input.second);
}
for (auto& output : output_tensor_names_) {
callable_options.add_fetch(output);
}
for (auto& target : target_node_names_) {
callable_options.add_target(target);
}
Session::CallableHandle callable_handle;
TF_ASSERT_OK(session_->MakeCallable(callable_options, &callable_handle));
std::vector<Tensor> outputs;
RunMetadata run_metadata;
TF_ASSERT_OK(session_->RunCallable(callable_handle, feed_tensors, &outputs,
&run_metadata));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
TF_ASSERT_OK(session_->ReleaseCallable(callable_handle));
}
TEST_F(TfrtSessionTest, Finalize) { TF_ASSERT_OK(session_->Finalize()); }
TEST_F(TfrtSessionTest, WithTargetNodes) {
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_, target_node_names_,
&outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, CreateWithEmptyGraphIsNoop) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Create(GraphDef()));
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Create(meta_graph_def.graph_def()));
}
TEST_F(TfrtSessionTest, CreateAgainError) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr(
"A Graph has already been created for this session."));
}
TEST_F(TfrtSessionTest, CreateAfterCloseError) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Create(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, ExtendWhenNotCreated) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
TF_ASSERT_OK(session_->Extend(meta_graph_def.graph_def()));
TF_ASSERT_OK(session_->Run({}, {},
{"init"}, nullptr));
std::vector<Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs_, output_tensor_names_,
{}, &outputs));
ASSERT_EQ(outputs.size(), 3);
test::ExpectEqual(outputs[0],
test::AsTensor<int32_t>({6}, TensorShape{1, 1}));
test::ExpectEqual(outputs[1],
test::AsTensor<int32_t>({12}, TensorShape{1, 1}));
test::ExpectEqual(outputs[2],
test::AsTensor<int32_t>({18}, TensorShape{1, 1}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(session_->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", tensorflow::tfrt_stub::CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(session_->Run(inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(tensorflow::tfrt_stub::GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(TfrtSessionTest, ExtendAfterCreate_ErrorWithStaticGraphOptimization) {
SessionOptions options;
options.config.mutable_experimental()->set_use_tfrt(true);
options.config.mutable_experimental()->set_optimize_for_static_graph(true);
session_.reset(NewSession(options));
ASSERT_TRUE(session_ != nullptr);
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
TF_ASSERT_OK(session_->Create(graph_def));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
auto status = session_->Extend(extension);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("Extending the graph is not supported when"));
}
TEST_F(TfrtSessionTest, ExtendAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
auto status = session_->Extend(meta_graph_def.graph_def());
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, RunAfterCloseError) {
TF_ASSERT_OK(session_->Close());
std::vector<Tensor> outputs;
auto status = session_->Run(inputs_, output_tensor_names_,
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.ToString(),
::testing::HasSubstr("Session has been closed."));
}
TEST_F(TfrtSessionTest, InitializeTwiceCrashes) {
TfrtSessionOptions options;
auto second_initialize = [](TfrtSessionOptions options) {
auto status = InitializeTfrtSession(options);
TF_ASSERT_OK(status);
};
ASSERT_DEBUG_DEATH(second_initialize(options), "");
}
TEST_F(TfrtSessionTest, GetRuntime) {
auto runtime = TfrtSessionFactory::GetRuntime();
EXPECT_NE(runtime, nullptr);
}
TEST_F(TfrtSessionTest, RegisterTwiceCrashes) {
TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); });
ASSERT_DEBUG_DEATH(TfrtSessionFactory::RegisterInitializer(
[](tfrt_stub::Runtime*) { return absl::OkStatus(); }),
"");
}
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
testing::AddGlobalTestEnvironment(new tensorflow::TfrtSessionEnvironment());
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/tfrt_session/tfrt_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/tfrt_session/tfrt_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f83f267d-434a-4484-9e69-3ad4689bb579 | cpp | tensorflow/tensorflow | stream_ops_util | tensorflow/core/tfrt/kernels/stream_ops_util.cc | tensorflow/core/tfrt/kernels/stream_ops_util_test.cc | #include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>>
UnbatchStreamResults(const tensorflow::Tensor& step_ids,
absl::Span<const tensorflow::Tensor> tensors) {
std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>> responses;
if (step_ids.dims() > 0) {
if (step_ids.dtype() != tensorflow::DT_INT64 || step_ids.dims() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected a 1-D int64 tensor for batched step ids but got dtype=",
tensorflow::DataTypeString(step_ids.dtype()),
" shape=", step_ids.shape().DebugString()));
}
const int batch_size = step_ids.dim_size(0);
for (int i = 0; i < tensors.size(); ++i) {
const tensorflow::TensorShape& shape = tensors[i].shape();
if (shape.dims() < 1 || shape.dim_size(0) != batch_size) {
return absl::InvalidArgumentError(absl::StrCat(
"All inputs to PwStreamResults inside tf.batch_function are "
"required to be batched (batch_size=",
batch_size, ") but input #", i, " has shape ",
shape.DebugString()));
}
}
std::vector<int> sizes;
absl::flat_hash_set<int64_t> unique_step_ids;
for (int i = 0; i < step_ids.NumElements(); ++i) {
const int64_t request_id = step_ids.flat<int64_t>()(i);
const int64_t step_id =
static_cast<uint64_t>(request_id) >> (64 - kStepIdBitSize);
VLOG(1) << "PwStreamResults op is unbatching request_id=" << request_id
<< ", step_id=" << step_id;
if (step_id <= 0) {
return absl::InternalError(
absl::StrCat("Invalid step id=", step_id,
"; this usually indicates that `PwStreamResults` "
"was called from an unsupported nested context"));
}
if (i != 0 && request_id == step_ids.flat<int64_t>()(0)) {
break;
}
if (!responses.empty() && responses.back().first == step_id) {
sizes.back()++;
} else {
responses.push_back({step_id, {}});
sizes.push_back(1);
const bool inserted = unique_step_ids.insert(step_id).second;
if (!inserted) {
return absl::InternalError(absl::StrCat(
"Non-contiguous step ids found in the step id batch: ",
step_ids.DebugString(batch_size)));
}
}
}
int offset = 0;
for (int i = 0; i < responses.size(); ++i) {
auto& outputs = responses[i].second;
outputs.resize(tensors.size());
const int limit = offset + sizes[i];
for (int j = 0; j < tensors.size(); ++j) {
outputs[j] = tensors[j].Slice(offset, limit);
}
offset = limit;
}
} else {
const int64_t step_id = step_ids.flat<int64_t>()(0);
if (step_id <= 0) {
return absl::InternalError(
"Invalid step id; this usually indicates that `PwStreamResults` was "
"called from an unsupported nested context");
}
responses.push_back({step_id, std::vector<tensorflow::Tensor>(
tensors.begin(), tensors.end())});
}
return responses;
}
}
} | #include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::tensorflow::test::AsScalar;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::status::IsOkAndHolds;
int64_t RequestId(int64_t step_id, uint32_t id) {
return (step_id << kStepIdBitSize) | id;
}
TEST(UnbatchStreamResultsTest, ScalarStepId) {
const tensorflow::Tensor step_ids = AsScalar<int64_t>(1);
const std::vector<tensorflow::Tensor> tensors = {
AsScalar<int32_t>(1),
AsTensor<int32_t>({2, 3}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsScalar<int32_t>(1)),
TensorEq(AsTensor<int32_t>({2, 3})))))));
}
TEST(UnbatchStreamResultsTest, Batched) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({1, 2, 3, 4}),
AsTensor<int32_t>({5, 6, 7, 8}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({1, 2})),
TensorEq(AsTensor<int32_t>({5, 6})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({3})),
TensorEq(AsTensor<int32_t>({7})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({4})),
TensorEq(AsTensor<int32_t>({8})))))));
}
TEST(UnbatchStreamResultsTest, BatchedUnordered) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(2, 0), RequestId(1, 0), RequestId(1, 1), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({20, 10, 10, 30}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 10})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({20})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
TEST(UnbatchStreamResultsTest, PaddingOneExample) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 0), RequestId(1, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 10, 10, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10})))))));
}
TEST(UnbatchStreamResultsTest, PaddingMultipleExamples) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 20, 30, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 20})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/stream_ops_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/stream_ops_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af8c8af1-7b5b-4942-b777-1f91251c6e5a | cpp | tensorflow/tensorflow | ifrt_program_ops | tensorflow/core/tfrt/ops/ifrt_program_ops.cc | tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace tfrt_stub {
REGISTER_OP("IfrtCall")
.Input("args: Tin")
.Output("results: Tout")
.Attr("Tin: list(type) >= 0")
.Attr("Tout: list(type) >= 0")
.Attr("program_id: int")
.Attr("variable_arg_indices: list(int)")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
Calls an IFRT program identified by the given program id.
This op looks up a `ServingExecutable` from `ServingExecutableRegistry` using
the program id, calls the executable with the op's inputs as arguments, and
returns its results as the op's outputs.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
program_id: int64 id that can be used to look up compiled programs from
ServingExecutableRegistry`.
variable_arg_indices: must be in sorted ascending order. The argument at position
variable_arg_indices[k] in tpu program is already loaded as an ifrt array and
the input `args[variable_arg_indices[k]]` is the key to look for this loaded array.
)");
REGISTER_OP("IfrtLoadVariable")
.Input("variable: Tin")
.Output("array_key: Tout")
.Output("tensor: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.Attr("used_by_host: bool")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
This op loads a restored variable tensor as a tensor future. It is areplacement of `tf.ReadVariableOp`.
This op returns a scalar string tensor containing the restored variable name, which
is composed from `container_name` and `shared_name` from a `var_handle` and can be
used as a key within the runtime, as well as a future for the tensor.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
variable: the variable handle of the variable tensor to be loaded.
array_key: the key to be used to look up the loaded array by the 'IfrtCall' op.
tensor: the future of the loaded tensor. The future contains a valid tensor if `use_by_host` is true.
'used_by_host': a boolean indicating whether the variable is used by the host OP
or excelusively by the TPU.
)");
}
} | #include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using tensorflow::ifrt_serving::ServingExecutableRegistry;
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper;
using tensorflow::test::AsTensor;
using tensorflow::test::TensorEq;
using ::testing::Return;
class IfrtCallOpTest : public OpsTestBase {
protected:
Status Init(int64_t program_id, int num_inputs, DataType input_type,
const std::vector<int>& variable_arg_indices,
const std::vector<DataType>& output_type_list) {
TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall")
.Input(FakeInput(num_inputs, input_type))
.Attr("program_id", program_id)
.Attr("variable_arg_indices", variable_arg_indices)
.Attr("Tout", output_type_list)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IfrtCallOpTest, Basic) {
int64_t program_id = 123;
TF_ASSERT_OK(Init(
program_id,
2,
DT_INT32,
{},
{DT_INT32}));
tsl::test_util::MockServingDeviceSelector selector;
IfrtServingExecutableTestHelper helper(&selector);
EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
TF_ASSERT_OK_AND_ASSIGN(
ServingExecutableRegistry::Handle handle,
ServingExecutableRegistry::Register(program_id, std::move(executable)));
auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); });
AddInputFromArray<int32_t>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<int32_t>(TensorShape({3, 1}), {1, 2, 3});
for (int i = 0; i < helper.num_cores() + 1; ++i) {
TF_ASSERT_OK(RunOpKernel());
}
Tensor expected_out = AsTensor<int32_t>({14}, TensorShape({1, 1}));
EXPECT_THAT(*GetOutput(0), TensorEq(expected_out));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ops/ifrt_program_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
066c5d82-ae42-4a0c-8c65-e9015f33cc6c | cpp | tensorflow/tensorflow | gpu_runner | tensorflow/core/tfrt/gpu/kernel/gpu_runner.cc | tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc | #include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/compiler/jit/pjrt_compile_util.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_launch_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/framework/device_id_manager.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/gpu_variables_table.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/kernel_registry.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace gpu {
namespace {
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorToDevice(
const tfrt_stub::FallbackTensor& tensor, tfrt::HostContext* host_ctx,
Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
attr.set_use_pjrt_allocator(true);
tensorflow::Tensor dst(gpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
host_ctx, [result = result.CopyRef(), gpu_device, pjrt_device_context,
src, dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyCPUTensorToDevice(
&src, gpu_device, &dst, [&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> TransferTensorFromDevice(
const tfrt_stub::FallbackTensor& tensor, tfrt::HostContext* host_ctx,
Device* cpu_device, Device* gpu_device) {
const tensorflow::Tensor& src = tensor.tensor();
tensorflow::AllocatorAttributes attr;
tensorflow::Tensor dst(cpu_device->GetAllocator(attr), src.dtype(),
src.shape());
if (src.shape().num_elements() == 0) {
return tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(dst);
}
auto result =
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>();
DeviceContext* pjrt_device_context =
gpu_device->tensorflow_accelerator_device_info()->pjrt_context;
bool enqueued = tfrt::EnqueueBlockingWork(
host_ctx, [result = result.CopyRef(), gpu_device, pjrt_device_context,
src, dst = std::move(dst)]() mutable {
tensorflow::Notification n;
tensorflow::Status status;
pjrt_device_context->CopyDeviceTensorToCPU(
&src, "tensor_name", gpu_device, &dst,
[&status, &n](Status s) mutable {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
result.SetError(absl::InternalError(status.message()));
} else {
result.emplace(std::move(dst));
}
});
if (!enqueued) {
return tfrt::MakeErrorAsyncValueRef(absl::InternalError(
"Failed to enqueue blocking task to transfer tensor."));
}
return result;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
PopulateResultsFromPjRtExecutableOutputs(
const XlaCompiler::CompilationResult& compilation_result,
std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs,
Device* device, int num_outputs) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
fallback_tensor_results;
for (int i = 0; i < num_outputs; ++i) {
const DataType& dtype = compilation_result.outputs[i].type;
CHECK(!compilation_result.outputs[i].is_constant);
CHECK(dtype != DT_RESOURCE);
xla::PjRtBuffer* output_buffer = executable_outputs[i].get();
if (output_buffer->IsTuple()) {
return absl::InvalidArgumentError(
"Tuple PJRT buffer output is not supported.");
}
absl::Span<const int64_t> dims;
std::optional<std::vector<int64_t>> logical_dims_storage;
if (output_buffer->has_dynamic_dimensions()) {
TF_ASSIGN_OR_RETURN(std::vector<int64_t> logical_dims,
output_buffer->logical_dimensions());
logical_dims_storage.emplace(std::move(logical_dims));
dims = *logical_dims_storage;
} else {
dims = output_buffer->dimensions();
}
TensorShape tensor_shape;
for (int i = 0; i < dims.size(); ++i) {
TF_RETURN_IF_ERROR(tensor_shape.AddDimWithStatus(dims[i]));
}
TF_ASSIGN_OR_RETURN(
Tensor output_tensor,
MakeTensorFromPjRtBuffer(dtype, tensor_shape,
std::move(executable_outputs[i])));
auto result = tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
output_tensor);
fallback_tensor_results.emplace_back(std::move(result));
}
return fallback_tensor_results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferOutputsToHostIfNeeded(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
absl::Span<const int64_t> used_output_indices, Device* cpu_device,
Device* gpu_device, tfrt::HostContext* host_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
for (int i = 0, j = 0; i < outputs.size(); ++i) {
if (j < used_output_indices.size() && i == used_output_indices[j]) {
CHECK(outputs[i].IsAvailable());
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> output_on_cpu =
TransferTensorFromDevice(outputs[i].get(), host_ctx, cpu_device,
gpu_device);
results.push_back(std::move(output_on_cpu));
++j;
} else {
results.push_back(std::move(outputs[i]));
}
}
return results;
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
TransferVariablesAndInputs(int device_idx,
absl::Span<const tfrt_stub::FallbackTensor> args,
absl::Span<const int64_t> resource_indices,
Device* cpu_device,
const absl::flat_hash_map<int, Device*>& gpu_devices,
tfrt::gpu::GpuVariablesTable& vars_table,
bool variables_are_shared,
tfrt::HostContext* host_ctx) {
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
tsl::PlatformDeviceId platform_device_id;
DeviceType device_type(DEVICE_GPU);
TF_RETURN_IF_ERROR(tsl::DeviceIdManager::TfToPlatformDeviceId(
device_type, tsl::TfDeviceId(device_idx), &platform_device_id));
TF_ASSIGN_OR_RETURN(const std::vector<tsl::TfDeviceId> devices_on_platform,
tsl::DeviceIdManager::GetTfDevicesOnPlatform(
device_type, platform_device_id));
absl::flat_hash_set<int64_t> resource_indices_set(resource_indices.begin(),
resource_indices.end());
const int cache_copy_idx =
variables_are_shared ? platform_device_id.value() : device_idx;
for (int i = 0, resource_idx = 0; i < args.size(); ++i) {
if (resource_indices_set.contains(i)) {
VLOG(2) << "Transfer resource arg[" << i << "].";
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor;
auto cached_device_variable =
vars_table.GetDeviceVariable(args[i], cache_copy_idx);
if (cached_device_variable) {
VLOG(2) << "Cache hit for resource arg[" << i << "].";
device_tensor = cached_device_variable.CopyRef();
} else {
VLOG(2) << "Cache miss for resource arg[" << i << "].";
int gpu_device_idx;
if (variables_are_shared) {
const int idx = resource_idx % devices_on_platform.size();
gpu_device_idx = devices_on_platform[idx].value();
} else {
gpu_device_idx = device_idx;
}
VLOG(2) << "Transfer the resource arg[" << i << "] to device "
<< gpu_device_idx << ".";
device_tensor = TransferTensorToDevice(args[i], host_ctx,
gpu_devices.at(gpu_device_idx));
vars_table.AddOrUpdateDeviceVariable(args[i], cache_copy_idx,
std::move(device_tensor));
device_tensor =
vars_table.GetDeviceVariable(args[i], cache_copy_idx).CopyRef();
}
results.push_back(device_tensor);
++resource_idx;
} else {
VLOG(2) << "Transfer input arg[" << i << "].";
tfrt::AsyncValueRef<tfrt_stub::FallbackTensor> device_tensor =
TransferTensorToDevice(args[i], host_ctx, gpu_devices.at(device_idx));
results.push_back(device_tensor);
}
}
return results;
}
absl::StatusOr<uint64_t> GenerateFingerprint(
const std::string& function_name,
const tfd::KernelFallbackCompatRequestState* fallback_request_state) {
const FunctionLibraryDefinition* flib_def =
fallback_request_state->cpu_function_library_runtime()
->GetFunctionLibraryDefinition();
const FunctionDef* fdef = flib_def->Find(function_name);
if (!fdef) {
return absl::InternalError(
absl::StrCat("Failed to find the function ", function_name));
}
return tsl::Fingerprint64(
absl::StrCat(fallback_request_state->session_metadata().name(),
fallback_request_state->session_metadata().version(),
tsl::LegacyUnredactedDebugString(fdef->signature())));
}
std::vector<XlaCompiler::Argument> BuildXlaCompilerArguments(
absl::Span<const tfrt_stub::FallbackTensor> inputs) {
std::vector<XlaCompiler::Argument> out;
out.resize(inputs.size());
for (int input_num = 0; input_num < inputs.size(); ++input_num) {
const tensorflow::Tensor& input = inputs[input_num].tensor();
CHECK_GT(input.NumElements(), 0);
CHECK(input.dtype() != DT_RESOURCE);
XlaCompiler::Argument& arg = out[input_num];
arg.kind = XlaCompiler::Argument::kParameter;
arg.type = input.dtype();
arg.shape = input.shape();
}
return out;
}
Status CompileProgram(const GpuRunInputs& run_inputs, int device_idx,
const XlaCompiler::CompilationResult** compilation_result,
xla::PjRtClient** pjrt_client,
xla::PjRtLoadedExecutable** pjrt_executable) {
std::vector<XlaCompiler::Argument> xla_compiler_args =
BuildXlaCompilerArguments(run_inputs.args);
DeviceBase* device = run_inputs.gpu_devices.at(device_idx);
FunctionLibraryRuntime* flr =
run_inputs.fallback_request_state->process_function_library_runtime()
.GetFLR(run_inputs.gpu_devices.at(device_idx)->name());
XlaPlatformInfo platform_info =
XlaPlatformInfoFromDevice(run_inputs.gpu_devices.at(device_idx));
NameAttrList function;
function.set_name(run_inputs.func_name);
ResourceMgr* rm = tfrt_global::GetTFGlobalResourceMgr();
return CompileToPjRtLoadedExecutable(
device, platform_info, function, xla_compiler_args,
DeviceCompileMode::kStrict,
false,
false, flr, rm, compilation_result,
pjrt_client, pjrt_executable);
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
ExecuteProgram(
const GpuRunInputs& run_inputs,
const llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>&
transferred_args,
const XlaCompiler::CompilationResult* compilation_result,
xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* pjrt_executable,
int device_idx) {
std::vector<const Tensor*> inputs;
for (const auto& arg : transferred_args) {
if (arg.IsError()) {
return absl::InternalError(
absl::StrCat("Data transfer failed: ", arg.GetError().message()));
}
inputs.push_back(&arg->tensor());
}
if (compilation_result->collective_info.has_value()) {
return absl::UnimplementedError(
"Execution with collectives is not supported.");
}
TF_ASSIGN_OR_RETURN(
xla::PjRtDevice * pjrt_device,
pjrt_client->LookupAddressableDevice(xla::PjRtLocalDeviceId(device_idx)));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<xla::PjRtBuffer>> executable_outputs,
RunPjRtExecutable(0, inputs,
{}, {},
DeviceType(DEVICE_GPU),
true, *compilation_result,
pjrt_device, pjrt_client, pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results,
PopulateResultsFromPjRtExecutableOutputs(
*compilation_result, executable_outputs,
run_inputs.gpu_devices.at(device_idx), run_inputs.num_outputs));
return TransferOutputsToHostIfNeeded(
results, run_inputs.used_output_indices, run_inputs.cpu_device,
run_inputs.gpu_devices.at(device_idx), run_inputs.host_ctx);
}
}
absl::StatusOr<
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>>
GpuRunner::Run(GpuRunInputs run_inputs) {
TF_ASSIGN_OR_RETURN(uint64_t fingerprint,
GenerateFingerprint(run_inputs.func_name,
run_inputs.fallback_request_state));
tsl::DeviceReservation device_reservation =
serving_device_selector_->ReserveDevice(absl::StrCat(fingerprint));
const int device_idx = device_reservation.device_index();
VLOG(1) << "GpuRunner selected device " << device_idx << ".";
const XlaCompiler::CompilationResult* compilation_result;
xla::PjRtClient* pjrt_client;
xla::PjRtLoadedExecutable* pjrt_executable;
TF_RETURN_IF_ERROR(CompileProgram(run_inputs, device_idx, &compilation_result,
&pjrt_client, &pjrt_executable));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>>
transferred_args,
TransferVariablesAndInputs(
device_idx, run_inputs.args, run_inputs.resource_indices,
run_inputs.cpu_device, run_inputs.gpu_devices, vars_table_,
false, run_inputs.host_ctx));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4>
transferred_args_to_wait;
for (const auto& arg : transferred_args) {
if (!arg.IsAvailable()) {
transferred_args_to_wait.push_back(arg.CopyRCRef());
}
}
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> results;
results.reserve(run_inputs.num_outputs);
for (size_t i = 0; i < run_inputs.num_outputs; ++i) {
results.emplace_back(
tfrt::MakeUnconstructedAsyncValueRef<tfrt_stub::FallbackTensor>());
}
tfrt::RunWhenReady(
transferred_args_to_wait,
[run_inputs = std::move(run_inputs),
transferred_args = std::move(transferred_args), results = results,
compilation_result, pjrt_client, pjrt_executable, device_idx]() mutable {
auto execution_outputs =
ExecuteProgram(run_inputs, transferred_args, compilation_result,
pjrt_client, pjrt_executable, device_idx);
CHECK_EQ(results.size(), execution_outputs->size());
if (!execution_outputs.ok()) {
for (size_t i = 0; i < results.size(); ++i) {
results[i].SetError(
absl::InternalError(execution_outputs.status().message()));
}
return;
}
for (int i = 0; i < results.size(); ++i) {
auto& result = results[i];
auto& output_av = (*execution_outputs)[i];
output_av.AndThen([result = result, output_av = output_av] {
result.emplace(std::move(output_av.get().tensor()));
});
}
});
return results;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/tfrt/gpu/kernel/gpu_runner.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace gpu {
namespace {
constexpr int kNumVirtualGpuDevices = 1;
constexpr char kFunctionName[] = "foo";
StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return graph;
}
StatusOr<FunctionDef> SampleFunctionAddXY(const std::string& name) {
TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY());
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef));
return fdef;
}
Status GetDevices(const tensorflow::tfd::KernelFallbackCompatRequestState*
fallback_request_state,
Device** cpu_device,
absl::flat_hash_map<int, Device*>& gpu_devices) {
*cpu_device = fallback_request_state->device_manager().HostCPU();
if (!*cpu_device) {
return absl::InternalError(
"Fallback request state must have a valid host cpu device.");
}
for (Device* device :
fallback_request_state->device_manager().ListDevices()) {
if (device->device_type() != DEVICE_GPU) continue;
if (!gpu_devices.try_emplace(device->parsed_name().id, device).second) {
return absl::InternalError(absl::StrCat(
"A device with the same device ID already exists when adding ",
device->name()));
}
}
if (gpu_devices.empty()) {
return absl::InternalError("No GPU device is found.");
}
for (const auto& [id, device] : gpu_devices) {
if (id >= gpu_devices.size()) {
return absl::InternalError("Device IDs are not consecutive.");
}
}
return OkStatus();
}
template <typename T>
Tensor CreateTensor(const TensorShape& input_shape,
gtl::ArraySlice<T> input_data,
Allocator* allocator = nullptr) {
Tensor tensor(DataTypeToEnum<T>::value, input_shape);
test::FillValues<T>(&tensor, input_data);
return tensor;
}
class GpuRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::SessionOptions session_options;
TF_ASSERT_OK_AND_ASSIGN(FunctionDef fdef,
SampleFunctionAddXY(kFunctionName));
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options, fdef_lib));
std::function<void(std::function<void()>)> runner =
[](const std::function<void()>& f) { f(); };
tfrt_stub::OpKernelRunnerTable runner_table;
tfd::FallbackResourceArray resource_array;
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner, &fallback_state_->device_manager(), 0,
&runner_table, &resource_array,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
auto host_allocator = tfrt::CreateMallocAllocator();
auto work_queue = tfrt::CreateMultiThreadedWorkQueue(
2, 2);
host_context_ = std::make_unique<tfrt::HostContext>(
[&](const tfrt::DecodedDiagnostic& diag) {}, std::move(host_allocator),
std::move(work_queue));
tfrt::RequestContextBuilder req_ctx_builder =
tfrt::RequestContextBuilder(host_context_.get(), nullptr);
tfrt::Expected<tfrt::RCReference<tfrt::RequestContext>> req_ctx(
std::move(req_ctx_builder).build());
ASSERT_TRUE(!!req_ctx);
exec_ctx_ = std::make_unique<tfrt::ExecutionContext>(std::move(*req_ctx));
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
serving_device_selector_ = std::make_unique<GpuServingDeviceSelector>(
kNumVirtualGpuDevices, std::move(policy));
gpu_runner_ = std::make_unique<GpuRunner>(serving_device_selector_.get());
}
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<tfrt::HostContext> host_context_;
std::unique_ptr<tfrt::ExecutionContext> exec_ctx_;
std::unique_ptr<GpuServingDeviceSelector> serving_device_selector_;
std::unique_ptr<GpuRunner> gpu_runner_;
};
TEST_F(GpuRunnerTest, Basic) {
GpuRunInputs run_inputs;
llvm::SmallVector<tfrt_stub::FallbackTensor> args;
Tensor tensor1 = CreateTensor<int32>(TensorShape({1, 2}), {1, 2});
Tensor tensor2 = CreateTensor<int32>(TensorShape({1, 2}), {3, 4});
args.push_back(tfrt_stub::FallbackTensor(tensor1));
args.push_back(tfrt_stub::FallbackTensor(tensor2));
run_inputs.args = &args;
run_inputs.num_outputs = 1;
run_inputs.resource_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.used_output_indices = tfrt::ArrayRef<int64_t>(0);
run_inputs.func_name = kFunctionName;
absl::flat_hash_map<int, Device*> gpu_devices;
ASSERT_OK(GetDevices(fallback_request_state_.get(), &run_inputs.cpu_device,
gpu_devices));
run_inputs.gpu_devices = &gpu_devices;
run_inputs.fallback_request_state = fallback_request_state_.get();
run_inputs.exec_ctx = exec_ctx_.get();
TF_ASSERT_OK_AND_ASSIGN(
llvm::SmallVector<tfrt::AsyncValueRef<tfrt_stub::FallbackTensor>> outputs,
gpu_runner_->Run(run_inputs));
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4> outputs_to_wait;
for (const auto& output : outputs) {
if (!output.IsAvailable()) {
outputs_to_wait.push_back(output.CopyRCRef());
}
}
exec_ctx_->host()->Await(outputs_to_wait);
ASSERT_EQ(outputs.size(), 1);
auto expected = CreateTensor<int32>(TensorShape({1, 2}), {4, 6});
test::ExpectTensorEqual<int32>(expected, outputs[0].get().tensor());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/gpu/kernel/gpu_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/gpu/kernel/gpu_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
402174c6-7916-42b0-b0e0-b7990900b8d6 | cpp | tensorflow/tensorflow | saved_model | tensorflow/compiler/mlir/tfrt/saved_model/saved_model.cc | tensorflow/compiler/mlir/tfrt/tests/saved_model/saved_model_test.cc | #include "tensorflow/compiler/mlir/tfrt/saved_model/saved_model.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
namespace tensorflow {
namespace {
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
llvm::StringRef ProcessIndexPath(mlir::ArrayAttr index_path) {
if (index_path.size() == 1 && mlir::isa<mlir::StringAttr>(index_path[0])) {
return mlir::cast<mlir::StringAttr>(index_path[0]).getValue();
}
return "";
}
absl::StatusOr<std::pair<tensorflow::DataType, tensorflow::PartialTensorShape>>
ProcessTensorSpec(mlir::TensorType type) {
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(
ConvertScalarTypeToDataType(type.getElementType(), &dtype));
if (!type.hasRank())
return std::make_pair(dtype, tensorflow::PartialTensorShape());
auto shape = type.getShape();
llvm::SmallVector<int64_t, 4> dims;
dims.assign(shape.begin(), shape.end());
return std::make_pair(dtype, tensorflow::PartialTensorShape(dims));
}
}
Status MapFunctionSignaturesFromTFSavedModelMLIR(
mlir::ModuleOp module,
llvm::function_ref<void(const TFRTSavedModelSignatureInfo&)> map_fn) {
mlir::SymbolTable symbol_table(module);
tensorflow::Status status = absl::OkStatus();
module.walk([&symbol_table, map_fn, &status](mlir::func::FuncOp func) {
auto func_names = mlir::tf_saved_model::GetExportedNames(func);
if (func_names.empty()) return mlir::WalkResult::advance();
auto func_type = func.getFunctionType();
llvm::SmallVector<llvm::StringRef, 4> input_names;
llvm::SmallVector<
std::pair<tensorflow::DataType, tensorflow::PartialTensorShape>, 4>
input_specs;
llvm::SmallVector<llvm::StringRef, 4> input_devices;
llvm::SmallVector<mlir::Operation*, 4> bound_inputs;
for (unsigned i = 0, e = func.getNumArguments(); i != e; ++i) {
if (auto input_index_path = func.getArgAttrOfType<mlir::ArrayAttr>(
i, kTfSavedModelIndexPathAttr)) {
input_names.push_back(ProcessIndexPath(input_index_path));
auto statusor_spec = ProcessTensorSpec(
mlir::cast<mlir::TensorType>(func_type.getInput(i)));
if (!statusor_spec.ok()) {
status = std::move(statusor_spec).status();
return mlir::WalkResult::interrupt();
}
input_specs.push_back(std::move(statusor_spec).value());
if (auto input_device =
func.getArgAttrOfType<mlir::StringAttr>(i, "tf.device")) {
input_devices.push_back(input_device.getValue());
} else {
input_devices.push_back("");
}
}
if (auto* bound_input =
mlir::tf_saved_model::LookupBoundInput(func, i, symbol_table)) {
bound_inputs.push_back(bound_input);
}
}
llvm::SmallVector<llvm::StringRef, 4> output_names;
llvm::SmallVector<
std::pair<tensorflow::DataType, tensorflow::PartialTensorShape>, 4>
output_specs;
for (unsigned i = 0, e = func.getNumResults(); i != e; ++i) {
if (auto output_index_path = func.getResultAttrOfType<mlir::ArrayAttr>(
i, kTfSavedModelIndexPathAttr)) {
output_names.push_back(ProcessIndexPath(output_index_path));
auto statusor_spec = ProcessTensorSpec(
mlir::cast<mlir::TensorType>(func_type.getResult(i)));
if (!statusor_spec.ok()) {
status = std::move(statusor_spec).status();
return mlir::WalkResult::interrupt();
}
output_specs.push_back(std::move(statusor_spec).value());
}
}
for (auto func_name : func_names) {
TFRTSavedModelSignatureInfo sig_info;
sig_info.func_name = func_name;
sig_info.input_names = input_names;
sig_info.input_specs = input_specs;
sig_info.input_devices = input_devices;
sig_info.output_names = output_names;
sig_info.output_specs = output_specs;
sig_info.bound_inputs = bound_inputs;
map_fn(sig_info);
}
return mlir::WalkResult::advance();
});
return status;
}
} | #include "tensorflow/compiler/mlir/tfrt/saved_model/saved_model.h"
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tsl/platform/statusor.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace {
TEST(SavedModelTest, MapSignatures) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/test.mlir");
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::vector<std::string> inputs;
std::vector<std::pair<tensorflow::DataType, tensorflow::PartialTensorShape>>
in_specs;
std::vector<std::string> outputs;
std::vector<std::pair<tensorflow::DataType, tensorflow::PartialTensorShape>>
out_specs;
std::vector<mlir::Operation*> bound_inputs;
TF_ASSERT_OK(MapFunctionSignaturesFromTFSavedModelMLIR(
module.get(), [&](const TFRTSavedModelSignatureInfo& sig_info) {
if (sig_info.func_name != "serving_default") return;
transform(sig_info.input_names, std::back_inserter(inputs),
[](llvm::StringRef x) { return x.str(); });
in_specs.assign(sig_info.input_specs.begin(),
sig_info.input_specs.end());
transform(sig_info.output_names, std::back_inserter(outputs),
[](llvm::StringRef x) { return x.str(); });
out_specs.assign(sig_info.output_specs.begin(),
sig_info.output_specs.end());
bound_inputs.assign(sig_info.bound_inputs.begin(),
sig_info.bound_inputs.end());
}));
ASSERT_EQ(inputs.size(), 1);
EXPECT_EQ(inputs[0], "x");
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs[0], "r");
ASSERT_EQ(in_specs.size(), 1);
ASSERT_EQ(in_specs[0].first, tensorflow::DT_INT32);
ASSERT_TRUE(in_specs[0].second.IsIdenticalTo(PartialTensorShape({1, 3})));
ASSERT_EQ(out_specs.size(), 1);
ASSERT_EQ(out_specs[0].first, tensorflow::DT_INT32);
ASSERT_TRUE(out_specs[0].second.IsIdenticalTo(PartialTensorShape({1, 1})));
ASSERT_EQ(bound_inputs.size(), 2);
auto global_tensor =
llvm::cast<mlir::tf_saved_model::GlobalTensorOp>(bound_inputs[0]);
auto asset = llvm::cast<mlir::tf_saved_model::AssetOp>(bound_inputs[1]);
EXPECT_EQ(global_tensor.getSymName(), "y");
EXPECT_EQ(asset.getSymName(), "z");
}
TEST(SavedModelTest, CompileToBEF) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/test.mlir");
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
tfrt::BefBuffer bef_buffer;
auto runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&bef_buffer, model_context));
}
TEST(SavedModelTest, ConvertTfMlirToBefWithXlaFuncExport) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"xla_launch.mlir");
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
tfrt::BefBuffer bef_buffer;
auto runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.compile_options.device_target = TfrtDeviceInfraTarget::kGpu;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&bef_buffer, model_context,
fallback_state.get()));
EXPECT_EQ(fallback_state->process_function_library_runtime()
.GetFunctionLibraryDefinition()
->num_functions(),
3);
}
TEST(SavedModelTest, ConvertTfMlirToBefExportingXlaReduceWindow) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"xla_launch_xla_reduce_window.mlir");
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
tfrt::BefBuffer bef_buffer;
auto runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.compile_options.device_target = TfrtDeviceInfraTarget::kGpu;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&bef_buffer, model_context,
fallback_state.get()));
EXPECT_EQ(fallback_state->process_function_library_runtime()
.GetFunctionLibraryDefinition()
->num_functions(),
2);
}
TEST(SavedModelTest, AddXlaFunctionsOutputFunctionNames) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"xla_launch_xla_reduce_window.mlir");
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
tfrt::BefBuffer bef_buffer;
auto runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.compile_options.device_target = TfrtDeviceInfraTarget::kGpu;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
std::vector<std::string> function_names;
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&bef_buffer, model_context,
fallback_state.get(), &function_names));
EXPECT_THAT(function_names, ::testing::SizeIs(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/saved_model/saved_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/tests/saved_model/saved_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c3a7bc1-a537-48b5-8488-5855798de70c | cpp | tensorflow/tensorflow | serialize_utils | tensorflow/core/tfrt/saved_model/utils/serialize_utils.cc | tensorflow/core/tfrt/saved_model/utils/serialize_utils_test.cc | #include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/ToolOutputFile.h"
#include "mlir/Support/FileUtilities.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
absl::Status SerializeBEF(const tfrt::BefBuffer &bef,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os()).write(reinterpret_cast<const char *>(bef.data()), bef.size());
output->keep();
LOG(INFO) << "Completed serializing BEF to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<tfrt::BefBuffer> DeserializeBEFBuffer(
const std::string &filepath) {
std::string data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &data));
tfrt::BefBuffer bef(data.begin(), data.end());
LOG(INFO) << "Successfully loaded serialized BEF from: " << filepath;
return bef;
}
absl::Status SerializeMLRTBytecode(const mlrt::bc::Buffer &bytecode,
const std::string &filepath) {
std::string errorMessage;
auto output = mlir::openOutputFile(filepath, &errorMessage);
(output->os())
.write(reinterpret_cast<const char *>(bytecode.data()), bytecode.size());
output->keep();
LOG(INFO) << "Completed serializing MLRTBytecode to: " << filepath;
return absl::OkStatus();
}
absl::StatusOr<mlrt::bc::Buffer> DeserializeMlrtBytecodeBuffer(
const std::string &filepath) {
std::string bytecode_data;
TF_CHECK_OK(ReadFileToString(tsl::Env::Default(), filepath, &bytecode_data));
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
allocator.Allocate(bytecode_data.length(), alignof(char));
memcpy(buffer.data(), bytecode_data.data(), bytecode_data.length());
LOG(INFO) << "Successfully loaded serialized MLRTBytecode from: " << filepath;
return buffer;
}
}
} | #include "tensorflow/core/tfrt/saved_model/utils/serialize_utils.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_util.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/platform/env.h"
#include "tfrt/bef/bef_buffer.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(SerializeBEFTest, HandlesCompleteProcess) {
tfrt::BefBuffer old_bef;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
tfrt::ResourceContext resource_context;
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK(ConvertTfMlirToBef(options.compile_options, module.get(),
&old_bef, model_context));
const std::string filepath =
io::JoinPath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
std::string("serialized_bef.mlir.bef"));
TF_ASSERT_OK(tensorflow::tfrt_stub::SerializeBEF(old_bef, filepath));
ASSERT_NE(old_bef.size(), 0);
TF_ASSERT_OK_AND_ASSIGN(const tfrt::BefBuffer bef,
DeserializeBEFBuffer(filepath));
ASSERT_TRUE(old_bef.size() == bef.size());
std::unique_ptr<Runtime> default_runtime =
DefaultTfrtRuntime(1);
SavedModel::Options default_options =
DefaultSavedModelOptions(default_runtime.get());
TF_EXPECT_OK(tfrt::CreateBefFileFromBefBuffer(
*default_options.graph_execution_options.runtime, bef)
.status());
}
TEST(SerializeMLRTTest, HandlesSerializeAndDeserializeProcess) {
mlrt::bc::Buffer old_bytecode;
const std::string saved_model_mlir_path =
"third_party/tensorflow/compiler/mlir/tfrt/tests/saved_model/testdata/"
"test.mlir";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
mlir::OwningOpRef<mlir::ModuleOp> module_with_op_keys;
std::unique_ptr<Runtime> runtime =
tensorflow::tfrt_stub::Runtime::Create(1);
tfrt_stub::GraphExecutionOptions options(runtime.get());
options.enable_mlrt = true;
tfrt::ResourceContext resource_context;
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<tfrt_stub::FallbackState> fallback_state,
tfrt_stub::FallbackState::Create(SessionOptions(), FunctionDefLibrary()));
tfrt_stub::ModelRuntimeContext model_context(
&options, options.compile_options.saved_model_dir, &resource_context);
TF_ASSERT_OK_AND_ASSIGN(
old_bytecode, mlrt_compiler::ConvertTfMlirToBytecode(
options.compile_options, *fallback_state, module.get(),
model_context, &module_with_op_keys));
const std::string aot_package_path =
GetAotPackagePath(getenv("TEST_UNDECLARED_OUTPUTS_DIR"));
tsl::Env* env = tsl::Env::Default();
TF_ASSERT_OK(env->RecursivelyCreateDir(aot_package_path));
const std::string filepath =
io::JoinPath(aot_package_path, std::string("serialized_mlrt.mlir.mlrt"));
TF_ASSERT_OK(
tensorflow::tfrt_stub::SerializeMLRTBytecode(old_bytecode, filepath));
ASSERT_NE(old_bytecode.size(), 0);
mlrt::bc::Buffer bytecode;
TF_ASSERT_OK_AND_ASSIGN(bytecode, DeserializeMlrtBytecodeBuffer(filepath));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
TF_ASSERT_OK_AND_ASSIGN(
bytecode,
LoadMlrtAndMlir(options.compile_options, module_with_op_keys.get(),
getenv("TEST_UNDECLARED_OUTPUTS_DIR"),
fallback_state.get()));
ASSERT_TRUE(old_bytecode.size() == bytecode.size());
EXPECT_STREQ(old_bytecode.data(), bytecode.data());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/saved_model/utils/serialize_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/saved_model/utils/serialize_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a3f8588-637c-45e9-a35c-80b3a7189e17 | cpp | tensorflow/tensorflow | cost_recorder | tensorflow/core/tfrt/fallback/cost_recorder.cc | tensorflow/core/tfrt/fallback/cost_recorder_test.cc | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace tfrt_stub {
void CostRecorder::RecordCost(int64_t op_key, uint64_t execution_time) {
mutex_lock l(op_cost_map_mutex_);
op_cost_map_[op_key].first += execution_time;
op_cost_map_[op_key].second += 1;
}
uint64_t CostRecorder::GetCost(int64_t op_key) const {
tf_shared_lock l(op_cost_map_mutex_);
const auto iter = op_cost_map_.find(op_key);
if (iter == op_cost_map_.end()) return std::numeric_limits<uint32_t>::max();
const auto total_cost = iter->second.first;
const auto num_ops = iter->second.second;
auto r =
std::max(static_cast<uint64_t>(1),
static_cast<uint64_t>(total_cost / num_ops));
VLOG(2) << "Get cost for op_key=" << op_key << ", cost=" << r;
return r;
}
Status CostRecorder::WriteToFile() const {
OpCostMapProto op_cost_map_proto;
{
tf_shared_lock l(op_cost_map_mutex_);
for (const auto& [op_key, op_cost] : op_cost_map_) {
const uint64_t avg_op_cost = op_cost.first / op_cost.second;
(*op_cost_map_proto.mutable_op_cost_map())[op_key] = avg_op_cost;
}
}
std::string measured_cost_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(MesuredCostPathEnvVarName(), "",
&measured_cost_path));
return tensorflow::WriteTextProto(tensorflow::Env::Default(),
measured_cost_path, op_cost_map_proto);
}
size_t CostRecorder::size() const {
tf_shared_lock l(op_cost_map_mutex_);
return op_cost_map_.size();
}
}
} | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr int64_t kTestOpKey = 1;
constexpr uint64_t kTestCost = 1234;
constexpr uint64_t kTestAvgCost = 1851;
TEST(CostRecorderTest, RecordCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, kTestCost);
EXPECT_EQ(recorder.size(), 1);
}
TEST(CostRecorderTest, GetCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
EXPECT_EQ(recorder.size(), 1);
EXPECT_EQ(recorder.GetCost(kTestOpKey), kTestAvgCost);
}
TEST(CostRecorderTest, GetCostDefaultValueTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
EXPECT_EQ(recorder.GetCost(kTestOpKey),
std::numeric_limits<uint32_t>::max());
}
TEST(CostRecorderTest, WriteToFileTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv("TF_TFRT_MEASURED_COST_PATH", measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map_size(), 0);
}
TEST(CostRecorderTest, ProtoRecordsTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
ASSERT_EQ(recorder.size(), 1);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv(CostRecorder::MesuredCostPathEnvVarName(),
measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map().find(kTestOpKey)->second,
kTestAvgCost);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/cost_recorder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/cost_recorder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a547991a-c996-49e1-9bda-5e264c5885da | cpp | tensorflow/tensorflow | fallback_state | tensorflow/core/tfrt/fallback/fallback_state.cc | tensorflow/core/tfrt/fallback/fallback_state_test.cc | #include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <new>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_execution_state.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/tpu/virtual_device.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
string DeviceName(absl::string_view name_prefix, absl::string_view device_type,
int32_t task_id, size_t device_id) {
return strings::StrCat(absl::StripSuffix(name_prefix, "0"), task_id,
"/device:", device_type, ":", device_id);
}
DeviceAttributes BuildDeviceAttributes(absl::string_view name_prefix,
const char *device_type, int32_t task_id,
size_t device_id) {
const DeviceAttributes attrs = Device::BuildDeviceAttributes(
DeviceName(name_prefix, device_type, task_id, device_id),
DeviceType(device_type), Bytes(16ULL << 30), DeviceLocality(),
strings::StrCat("device: ", device_type, " device"));
return attrs;
}
}
absl::StatusOr<std::unique_ptr<FallbackState>> FallbackState::Create(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithCpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithMockGpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_attrs =
BuildDeviceAttributes("/job:localhost/replica:0/task:0", "GPU", 0, 0);
devices.push_back(
std::make_unique<VirtualDevice>(session_options.env, device_attrs));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithDeviceMgr(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib,
absl::Nonnull<DynamicDeviceMgr *> device_mgr) {
return std::make_unique<FallbackState>(session_options, device_mgr, fdef_lib);
}
FallbackState::FallbackState(const SessionOptions &session_options,
std::variant<std::vector<std::unique_ptr<Device>>,
absl::Nonnull<DynamicDeviceMgr *>>
device_mgr,
const tensorflow::FunctionDefLibrary &fdef_lib)
: session_options_(session_options),
device_manager_(
std::holds_alternative<std::vector<std::unique_ptr<Device>>>(
device_mgr)
? std::move(
std::get<std::vector<std::unique_ptr<Device>>>(device_mgr))
: std::vector<std::unique_ptr<Device>>()),
device_manager_ptr_(
std::holds_alternative<absl::Nonnull<DynamicDeviceMgr *>>(device_mgr)
? std::get<absl::Nonnull<DynamicDeviceMgr *>>(device_mgr)
: &device_manager_),
func_lib_def_(OpRegistry::Global(), fdef_lib),
pflr_(device_manager_ptr_, session_options.env, &session_options.config,
TF_GRAPH_DEF_VERSION, &func_lib_def_,
session_options.config.graph_options().optimizer_options(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr *device_mgr,
tsl::core::RefCountPtr<Rendezvous> *r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}) {
for (auto *d : device_manager_ptr_->ListDevices()) {
device_set_.AddDevice(d);
}
device_set_.set_client_device(device_manager().HostCPU());
}
absl::StatusOr<std::unique_ptr<GraphExecutionState>>
FallbackState::CreateGraphExecutionState(GraphDef graph_def,
bool run_placer) const {
GraphExecutionStateOptions options;
options.device_set = &device_set_;
options.session_options = &session_options_;
options.session_handle = "tfrt_fallback_handle";
options.run_placer = run_placer;
std::unique_ptr<GraphExecutionState> execution_state;
TF_RETURN_IF_ERROR(GraphExecutionState::MakeForBaseGraph(
std::move(graph_def), options, &execution_state));
return execution_state;
}
absl::Status FallbackState::AddFunctionDef(const FunctionDef &func_def) {
return func_lib_def_.AddFunctionDef(func_def);
}
}
} | #include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/nullability.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Not;
TEST(FallbackStateTest, CreateWithCpuDeviceVector) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
std::variant<std::vector<std::unique_ptr<Device>>,
absl::Nonnull<DynamicDeviceMgr*>>
device_variant = std::move(devices);
auto fallback_state = std::make_unique<tfrt_stub::FallbackState>(
session_options, std::move(device_variant), fdef_lib);
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDevices(), 0);
EXPECT_EQ(device_manager.NumDeviceType("CPU"), 1);
}
TEST(FallbackStateTest, CreateWithDynamicDeviceMgr) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto static_device_mgr =
std::make_unique<DynamicDeviceMgr>(std::move(devices));
absl::Nonnull<DynamicDeviceMgr*> device_mgr_ptr(static_device_mgr.get());
auto fallback_state = std::make_unique<tfrt_stub::FallbackState>(
session_options, device_mgr_ptr, fdef_lib);
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDevices(), 0);
EXPECT_EQ(device_manager.NumDeviceType("CPU"), 1);
}
TEST(FallbackStateTest, CreateRendezvous) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
"dummy_fn",
{},
{},
{},
{});
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::Create({}, flib));
const ProcessFunctionLibraryRuntime& pflr =
fallback_state->process_function_library_runtime();
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:localhost/replica:0/task:0";
opts.remote_execution = true;
auto status = pflr.RunSync(opts, pflr.GetHandle("dummy_fn"), {}, nullptr);
EXPECT_THAT(status, Not(StatusIs(error::FAILED_PRECONDITION,
HasSubstr("rendezvous"))));
}
TEST(FallbackStateTest, CreateGraphExecutionState) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tfrt_stub::FallbackState::CreateWithCpuDevice(session_options, fdef_lib));
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
fallback_state->CreateGraphExecutionState(std::move(graphdef)));
}
TEST(FallbackStateTest, CreateWithMockGpuDevice) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::CreateWithMockGpuDevice(
session_options, fdef_lib));
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDeviceType("GPU"), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/fallback_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/fallback_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55717161-4c5c-47f4-9796-477dd280f8de | cpp | tensorflow/tensorflow | op_kernel_runner | tensorflow/core/tfrt/fallback/op_kernel_runner.cc | tensorflow/core/tfrt/fallback/op_kernel_runner_test.cc | #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
Status CheckOpDefCompatibility(const tensorflow::OpDef& op_def) {
auto check_arg_def = [&](const auto& arg_def) {
if (arg_def.is_ref())
return tensorflow::errors::Internal(
"TFRT kernel fallback error: Unsupported ref args in ",
op_def.name());
return absl::OkStatus();
};
for (const auto& arg_def : op_def.input_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
for (const auto& arg_def : op_def.output_arg())
TF_RETURN_IF_ERROR(check_arg_def(arg_def));
return absl::OkStatus();
}
absl::StatusOr<tensorflow::NodeDef> BuildNodeDef(
const tensorflow::OpDef& op_def, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder) {
tensorflow::NodeDef node_def;
node_def.set_name(std::string(node_name));
node_def.set_op(op_def.name());
for (int i = 0; i < num_args; ++i) {
node_def.add_input("dummy_input");
}
auto* attr_value_map = node_def.mutable_attr();
TF_RETURN_IF_ERROR(attr_builder(attr_value_map));
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value()) {
attr_value_map->insert({attr_def.name(), attr_def.default_value()});
}
}
return node_def;
}
tensorflow::Status CreateOpKernel(
tensorflow::FunctionLibraryRuntime* flr, tensorflow::NodeDef ndef,
std::unique_ptr<tensorflow::OpKernel>* result) {
std::shared_ptr<const tensorflow::NodeProperties> props;
TF_RETURN_IF_ERROR(tensorflow::NodeProperties::CreateFromNodeDef(
std::move(ndef), flr->GetFunctionLibraryDefinition(), &props));
tensorflow::OpKernel* k = nullptr;
TF_RETURN_IF_ERROR(flr->CreateKernel(props, &k));
result->reset(k);
return absl::OkStatus();
}
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name,
absl::string_view device_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::DeviceMgr& device_manager,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime) {
tensorflow::Device* device = nullptr;
Status s = device_manager.LookupDevice(device_name, &device);
if (!s.ok()) {
LOG_EVERY_N_SEC(WARNING, 30)
<< "Failed to find device " << device_name
<< " when creating OpKernel: " << op_name << ". Error: " << s
<< ", fallback to host device instead";
device = device_manager.HostCPU();
}
return Create(op_name, node_name, num_args, attr_builder,
process_function_library_runtime, device);
}
absl::StatusOr<OpKernelRunner> OpKernelRunner::Create(
absl::string_view op_name, absl::string_view node_name, int num_args,
const std::function<Status(tensorflow::AttrValueMap*)>& attr_builder,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tensorflow::Device* device) {
const OpDef* op_def = nullptr;
TF_RETURN_IF_ERROR(tensorflow::OpRegistry::Global()->LookUpOpDef(
std::string(op_name), &op_def));
TF_RETURN_IF_ERROR(CheckOpDefCompatibility(*op_def));
VLOG(1) << "KernelFallbackExecuteCompat creating op from OpDef: "
<< op_def->DebugString();
TF_ASSIGN_OR_RETURN(auto node_def,
BuildNodeDef(*op_def, node_name, num_args, attr_builder));
VLOG(1) << "KernelFallbackExecuteCompat created NodeDef: "
<< node_def.DebugString();
tensorflow::FunctionLibraryRuntime* function_library_runtime = nullptr;
function_library_runtime =
process_function_library_runtime.GetFLR(device->name());
std::unique_ptr<OpKernel> op_kernel;
TF_RETURN_IF_ERROR(CreateOpKernel(function_library_runtime,
std::move(node_def), &op_kernel));
return OpKernelRunner(device, function_library_runtime, std::move(op_kernel));
}
OpKernelRunner::OpKernelRunner(
tensorflow::Device* device,
tensorflow::FunctionLibraryRuntime* function_library_runtime,
std::unique_ptr<tensorflow::OpKernel> op_kernel)
: op_kernel_(std::move(op_kernel)), info_(std::make_unique<Info>()) {
DCHECK(device);
DCHECK(function_library_runtime);
info_->device = device;
info_->function_library_runtime = function_library_runtime;
info_->resource_manager = device->resource_manager();
info_->is_async = (op_kernel_->AsAsync() != nullptr);
const auto& input_memory_types = op_kernel_->input_memory_types();
auto& input_alloc_attrs = info_->input_alloc_attrs;
auto& output_alloc_attrs = info_->output_alloc_attrs;
input_alloc_attrs.resize(op_kernel_->num_inputs());
for (size_t i = 0, e = op_kernel_->num_inputs(); i < e; ++i) {
input_alloc_attrs[i].set_on_host(input_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
const auto& output_memory_types = op_kernel_->output_memory_types();
output_alloc_attrs.resize(op_kernel_->num_outputs());
for (size_t i = 0, e = output_alloc_attrs.size(); i < e; ++i) {
output_alloc_attrs[i].set_on_host(output_memory_types[i] ==
tensorflow::HOST_MEMORY);
}
input_alloc_attrs_ = input_alloc_attrs;
output_alloc_attrs_ = output_alloc_attrs;
}
void OpKernelRunner::RunAsync(OpKernelContext* context,
AsyncOpKernel::DoneCallback done_callback) const {
DVLOG(1) << "KernelFallbackExecuteCompat Running Async Op: "
<< op_kernel_->def().DebugString()
<< ", on Device: " << context->device()->name();
AsyncOpKernel* async = op_kernel_->AsAsync();
DCHECK(async);
async->ComputeAsync(context, std::move(done_callback));
}
}
} | #include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner_cache.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::IsNull;
using ::testing::SizeIs;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
constexpr const char* kDeviceType = "GPU";
#else
constexpr const char* kDeviceType = "CPU";
#endif
class TestOpKernel : public OpKernel {
public:
using OpKernel::OpKernel;
~TestOpKernel() override = default;
void Compute(OpKernelContext* context) override {
context->set_output(0, context->input(0));
}
};
REGISTER_KERNEL_BUILDER(Name("TestOp").Device(DEVICE_CPU), TestOpKernel);
REGISTER_OP("TestOp").Input("x: int32").Output("y: int32");
TEST(OpKernelRunnerTest, Create) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
TF_ASSERT_OK_AND_ASSIGN(
auto runner,
OpKernelRunner::Create(
"TestOp", "TestOp_node_name",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner.op_kernel()->name(), "TestOp_node_name");
}
TEST(OpKernelRunnerTest, OpKernelRunnerCache) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
FallbackState::Create(session_options, fdef_lib));
OpKernelRunnerCache cache;
tfrt::Location loc(nullptr, 100);
TF_ASSERT_OK_AND_ASSIGN(
auto* runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
TF_ASSERT_OK_AND_ASSIGN(
runner,
cache.GetOrCreate(
loc,
"TestOp",
"/job:localhost/replica:0/task:0/device:CPU:0",
1,
[](tensorflow::AttrValueMap*) { return absl::OkStatus(); },
fallback_state->device_manager(),
fallback_state->process_function_library_runtime()));
ASSERT_TRUE(runner);
EXPECT_EQ(runner->op_kernel()->name(), "TestOp_100_0");
}
TEST(OpKernelRunnerTest, OpKernelRunState) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({kDeviceType, 1});
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::GetFactory(kDeviceType)
->CreateDevices(options,
"/job:a/replica:0/task:0",
&devices));
ASSERT_EQ(devices.size(), 1);
OpKernelContext::Params params;
params.device = devices[0].get();
params.ensure_eigen_gpu_device();
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ASSERT_THAT(params.eigen_gpu_device, ::testing::NotNull());
#endif
Tensor a(DT_FLOAT, TensorShape({}));
Tensor b(DT_INT32, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> inputs{TensorValue(&a),
TensorValue(&b)};
params.inputs = inputs;
Tensor c(DT_UINT8, TensorShape({}));
absl::InlinedVector<TensorValue, 4UL> new_inputs{TensorValue(&c)};
OpKernelRunState run_state(new_inputs, params);
EXPECT_THAT(run_state.input_tf_tensors, SizeIs(1));
EXPECT_THAT(run_state.input_tf_tensor_values, SizeIs(1));
EXPECT_EQ(run_state.params.inputs.data(),
run_state.input_tf_tensor_values.data());
EXPECT_THAT(run_state.params.eigen_gpu_device, IsNull());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/op_kernel_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/op_kernel_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9023f77c-6d2f-4bb9-b69d-4e2e666b234d | cpp | tensorflow/tensorflow | stream | tensorflow/core/tfrt/runtime/stream.cc | tensorflow/core/tfrt/runtime/stream_test.cc | #include "tensorflow/core/tfrt/runtime/stream.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/utility/utility.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/platform/random.h"
#include "tsl/platform/threadpool_interface.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::optional<StreamCallbackId>> CreateStreamCallbackId(
absl::string_view model_name, mlir::ModuleOp module) {
mlir::Builder builder(module.getContext());
std::vector<mlir::TF::PwStreamResultsOp> ops;
module->walk([&](mlir::TF::PwStreamResultsOp op) { ops.push_back(op); });
if (ops.empty()) {
return std::nullopt;
}
auto& stream_interface = GetGlobalStreamCallbackRegistry().stream_interface();
auto controller_address = stream_interface.controller_address();
auto controller_address_attr = builder.getStringAttr(controller_address);
auto model_name_attr = builder.getStringAttr(model_name);
const StreamCallbackId callback_id(
static_cast<int64_t>(tsl::random::New64()));
auto callback_id_attr = builder.getI64IntegerAttr(callback_id.id);
for (auto op : ops) {
op->setAttr("_controller_address", controller_address_attr);
op->setAttr("_model_name", model_name_attr);
op->setAttr("_callback_id", callback_id_attr);
}
return callback_id;
}
absl::Status StreamCallbackRegistry::CallbackState::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamedResult result) {
{
absl::MutexLock lock(&mu_);
if (closed_) {
return absl::InternalError(
"Failed to invole the callback that is closed.");
}
++num_outstanding_;
}
thread_pool->Schedule([this, result = std::move(result)]() mutable {
InvokeCallback(std::move(result));
absl::MutexLock lock(&mu_);
--num_outstanding_;
});
return absl::OkStatus();
}
void StreamCallbackRegistry::CallbackState::Close() {
{
absl::MutexLock lock(&mu_);
closed_ = true;
auto not_running = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) {
return num_outstanding_ == 0;
};
mu_.Await(absl::Condition(¬_running));
}
}
void StreamCallbackRegistry::CallbackState::InvokeCallback(
StreamedResult result) {
absl::Duration dequeue_latency = absl::Now() - result.enqueued_time;
interface().RecordDequeueLatency(model_name_, dequeue_latency);
tsl::profiler::TraceMe trace_me("StreamCallbackInvocation");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_.id},
{"step_id", step_id_.id},
});
});
absl::Time start_time = absl::Now();
callback_(std::move(result.tensors));
interface().RecordCallbackLatency(model_name_, absl::Now() - start_time);
}
absl::StatusOr<ScopedStreamCallback> StreamCallbackRegistry::Register(
absl::string_view model_name, StreamCallbackId callback_id, StepId step_id,
absl::AnyInvocable<
void(absl::flat_hash_map<std::string, tensorflow::Tensor>)>
callback) {
absl::MutexLock l(&mu_);
const auto [it, inserted] =
stream_callbacks_.insert({std::make_pair(callback_id, step_id), nullptr});
if (!inserted) {
return absl::AlreadyExistsError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id, " already exists"));
}
it->second = std::make_unique<CallbackState>(this, model_name, callback_id,
step_id, std::move(callback));
return ScopedStreamCallback(this, callback_id, step_id);
}
absl::Status StreamCallbackRegistry::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamCallbackId callback_id,
StepId step_id, StreamedResult result) {
absl::MutexLock lock(&mu_);
auto iter = stream_callbacks_.find({callback_id, step_id});
if (iter == stream_callbacks_.end()) {
return absl::NotFoundError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id,
" does not exist; this usually indicates that a streaming signature "
"was called by a non-streaming request"));
}
auto* state = iter->second.get();
DCHECK(state);
return state->Invoke(thread_pool, std::move(result));
}
std::unique_ptr<StreamCallbackRegistry::CallbackState>
StreamCallbackRegistry::Unregister(StreamCallbackId callback_id,
StepId step_id) {
absl::MutexLock l(&mu_);
const auto it = stream_callbacks_.find({callback_id, step_id});
if (it == stream_callbacks_.end()) {
return nullptr;
}
auto state = std::move(it->second);
stream_callbacks_.erase(it);
return state;
}
ScopedStreamCallback::ScopedStreamCallback(ScopedStreamCallback&& other)
: registry_(other.registry_),
callback_id_(other.callback_id_),
step_id_(other.step_id_) {
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
}
ScopedStreamCallback& ScopedStreamCallback::operator=(
ScopedStreamCallback&& other) {
Unregister();
registry_ = other.registry_;
callback_id_ = other.callback_id_;
step_id_ = other.step_id_;
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
return *this;
}
void ScopedStreamCallback::Unregister() {
if (!callback_id_.has_value()) {
return;
}
tsl::profiler::TraceMe trace_me("ScopedStreamCallback::Unregister");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_->id},
{"step_id", step_id_.id},
});
});
DCHECK(registry_);
auto state = registry_->Unregister(*callback_id_, step_id_);
DCHECK(state);
state->Close();
callback_id_.reset();
}
StreamInterfaceFactory& GetGlobalStreamInterfaceFactory() {
static auto* stream_interface_factory = new StreamInterfaceFactory;
return *stream_interface_factory;
}
StreamCallbackRegistry& GetGlobalStreamCallbackRegistry() {
static auto* stream_callback_registry =
new StreamCallbackRegistry(GetGlobalStreamInterfaceFactory()
.CreateControllerStreamInterface()
.value());
return *stream_callback_registry;
}
}
} | #include "tensorflow/core/tfrt/runtime/stream.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/tfrt/runtime/step_id.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::tensorflow::test::AsTensor;
using ::testing::AnyOf;
using ::testing::ElementsAreArray;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::status::StatusIs;
TEST(StreamTest, Simple) {
StreamCallbackId callback_id(1234);
StepId step_id(5678);
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> outputs;
ScopedStreamCallback scoped_stream_callback;
{
TF_ASSERT_OK_AND_ASSIGN(
scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
outputs.push_back(std::move(arg));
}));
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected =
{{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}},
{{"c", AsTensor<int32_t>({300})}}};
auto thread = absl::WrapUnique(tsl::Env::Default()->StartThread(
tsl::ThreadOptions(), "fake_stream_client", [&]() {
for (const auto& map : expected) {
TfThreadPool thread_pool("test", 4);
CHECK_OK(GetGlobalStreamCallbackRegistry().Invoke(
&thread_pool, callback_id, step_id, {map, absl::Now()}));
}
}));
}
EXPECT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["a"]),
ElementsAreArray({100}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["b"]),
ElementsAreArray({200}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]["c"]),
ElementsAreArray({300}));
ScopedStreamCallback scoped_stream_callback_copy;
scoped_stream_callback_copy = std::move(scoped_stream_callback);
auto status = GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
outputs.push_back(std::move(arg));
});
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(StreamTest, MultipleWriters) {
StreamCallbackId callback_id(1234);
StepId step_id(5678);
std::vector<absl::flat_hash_map<std::string, std::vector<int32_t>>> outputs;
{
TfThreadPool thread_pool("test", 4);
TF_ASSERT_OK_AND_ASSIGN(
auto scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
absl::flat_hash_map<std::string, std::vector<int32_t>> out;
for (const auto& p : arg) {
out[p.first] = GetTfTensorData<int32_t>(p.second);
}
outputs.push_back(std::move(out));
}));
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected =
{{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}},
{{"c", AsTensor<int32_t>({300})}}};
for (const auto& p : expected) {
tsl::Env::Default()->SchedClosure([&, callback_id, step_id, p]() {
TfThreadPool thread_pool("test", 4);
GetGlobalStreamCallbackRegistry()
.Invoke(&thread_pool, callback_id, step_id, {p, absl::Now()})
.IgnoreError();
});
}
absl::SleepFor(absl::Microseconds(100));
}
LOG(INFO) << "StreamCallback receives " << outputs.size() << " outputs.";
for (const auto& output : outputs) {
EXPECT_THAT(
output,
AnyOf(UnorderedElementsAre(Pair("a", ElementsAreArray({100})),
Pair("b", ElementsAreArray({200}))),
UnorderedElementsAre(Pair("c", ElementsAreArray({300})))));
}
}
class TestStreamControllerInterface : public StreamControllerInterface {
public:
TestStreamControllerInterface()
: StreamControllerInterface("test_controller_address") {}
};
TEST(StreamControllerInterface, Initialize) {
GetGlobalStreamInterfaceFactory().RegisterController(
[]() { return std::make_unique<TestStreamControllerInterface>(); });
TF_ASSERT_OK_AND_ASSIGN(
auto controller_interface,
GetGlobalStreamInterfaceFactory().CreateControllerStreamInterface());
EXPECT_EQ(controller_interface->controller_address(),
"test_controller_address");
}
class TestStreamWorkerInterface : public StreamWorkerInterface {
public:
explicit TestStreamWorkerInterface(std::string worker_address)
: StreamWorkerInterface(worker_address) {}
absl::Status InvokeStreamCallback(
const StreamCallbackId& callback_id,
const std::vector<std::string>& names,
const std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>&
responses) override {
return absl::OkStatus();
}
};
TEST(StreamWorkerInterface, Initialize) {
GetGlobalStreamInterfaceFactory().RegisterWorker(
[](absl::string_view address)
-> absl::StatusOr<std::unique_ptr<TestStreamWorkerInterface>> {
return std::make_unique<TestStreamWorkerInterface>(
"test_worker_address");
});
TF_ASSERT_OK_AND_ASSIGN(
auto worker_interface,
GetGlobalStreamInterfaceFactory().CreateWorkerStreamInterface()(
"test_worker_address"));
EXPECT_EQ(worker_interface->controller_address(), "test_worker_address");
}
TEST(StepId, Generate) {
StepId step_id(1234);
EXPECT_EQ(step_id.id, 1234);
StepIdGenerator step_id_generator;
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(1));
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(2));
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(3));
}
TEST(StepId, GlobalInitial) {
EXPECT_EQ(GetGlobalInitialStepId(), 0);
TEST_ScopedInitialStepId test_id(127);
EXPECT_EQ(GetGlobalInitialStepId(), 127);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e6fafa5-5d2d-4a50-9c2d-5cb980bc6d2e | cpp | tensorflow/tensorflow | runtime | tensorflow/lite/delegates/gpu/gl/runtime.cc | tensorflow/core/tfrt/runtime/runtime_test.cc | #include "tensorflow/lite/delegates/gpu/gl/runtime.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_call.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_errors.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_texture.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct TextureF16Maker {
absl::Status operator()(const uint3& size) const {
return CreateReadOnlyImageTextureF16(size, data, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadOnlyImageTextureF16(size, data, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadOnlyImageTextureF16(uint2(static_cast<uint32_t>(size), 1U),
data, gl_texture);
}
absl::Span<const uint16_t> data;
GlTexture* gl_texture;
};
struct TextureF32Maker {
absl::Status operator()(const uint3& size) const {
return CreateReadOnlyImageTexture(size, data, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadOnlyImageTexture(size, data, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadOnlyImageTexture(uint2(static_cast<uint32_t>(size), 1U),
data, gl_texture);
}
absl::Span<const float> data;
GlTexture* gl_texture;
};
absl::Status MakeGlTexture(const Object& object, const ObjectData& data,
GlTexture* gl_texture) {
if (object.access == AccessType::READ_WRITE ||
object.access == AccessType::WRITE) {
return absl::InvalidArgumentError("Read-write textures are not supported");
}
if (object.data_type != DataType::FLOAT16 &&
object.data_type != DataType::FLOAT32) {
return absl::InvalidArgumentError(
"Textures support float16 or float32 only.");
}
switch (object.data_type) {
case DataType::FLOAT16: {
if (data.size() % 2 != 0) {
return absl::InvalidArgumentError("Texture size is not aligned");
}
return std::visit(
TextureF16Maker{
.data = absl::MakeConstSpan(
reinterpret_cast<const uint16_t*>(data.data()),
data.size() / 2),
.gl_texture = gl_texture,
},
object.size);
}
case DataType::FLOAT32: {
if (data.size() % sizeof(float) != 0) {
return absl::InvalidArgumentError("Texture size is not aligned");
}
return std::visit(
TextureF32Maker{
.data = absl::MakeConstSpan(
reinterpret_cast<const float*>(data.data()),
data.size() / sizeof(float)),
.gl_texture = gl_texture,
},
object.size);
}
default:
return absl::InvalidArgumentError("Unsupported textures data type.");
}
}
struct TextureRefMaker {
absl::Status operator()(const uint3& size) const {
return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadWriteRgbaImageTexture(
type, uint2(static_cast<uint32_t>(size), 1U), gl_texture);
}
DataType type;
GlTexture* gl_texture;
};
absl::Status MakeGlTextureRef(const Object& object, GlTexture* gl_texture) {
return std::visit(TextureRefMaker{object.data_type, gl_texture}, object.size);
}
absl::Status MakeGlBuffer(const Object& object, const ObjectData& data,
GlBuffer* gl_buffer) {
if (data.size() % SizeOf(object.data_type) != 0) {
return absl::InvalidArgumentError("Buffer size is not aligned");
}
return CreateReadOnlyShaderStorageBuffer(absl::MakeConstSpan(data),
gl_buffer);
}
absl::Status MakeBindingFunc(const Object& object, uint32_t id,
const ObjectManager* objects,
std::function<absl::Status()>* binding_func) {
const uint32_t binding = object.binding;
switch (object.object_type) {
case ObjectType::BUFFER: {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
size_t size_in_bytes = ByteSizeOf(object);
if (ptr->bytes_size() < size_in_bytes) {
return absl::FailedPreconditionError(
absl::StrCat("Buffer ", id, " size in bytes ", ptr->bytes_size(),
" < requested size_in_bytes ", size_in_bytes));
}
*binding_func = [=]() { return ptr->BindToIndex(binding); };
break;
}
case ObjectType::TEXTURE: {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
*binding_func = [=]() { return ptr->BindAsReadWriteImage(binding); };
break;
}
case ObjectType::UNKNOWN:
return absl::InvalidArgumentError("Unknown object type");
}
return absl::OkStatus();
}
absl::Status MakeLateBindingFunc(const Object& object, uint32_t id,
const ObjectManager* objects,
std::function<absl::Status()>* binding_func) {
const uint32_t binding = object.binding;
switch (object.object_type) {
case ObjectType::BUFFER: {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
*binding_func = [=]() {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
if (!ptr->is_valid()) {
return absl::InvalidArgumentError("Buffer is not initialized.");
}
size_t size_in_bytes = ByteSizeOf(object);
if (ptr->bytes_size() < size_in_bytes) {
return absl::FailedPreconditionError(
absl::StrCat("Buffer ", id, " size in bytes ", ptr->bytes_size(),
" < requested size_in_bytes ", size_in_bytes));
}
return ptr->BindToIndex(binding);
};
break;
}
case ObjectType::TEXTURE: {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
*binding_func = [=]() {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
if (!ptr->is_valid()) {
return absl::InvalidArgumentError("Texture is not initialized.");
}
return ptr->BindAsReadWriteImage(binding);
};
break;
}
case ObjectType::UNKNOWN:
return absl::InvalidArgumentError("Unknown object type");
}
return absl::OkStatus();
}
}
Runtime::Runtime(const RuntimeOptions& options, const GpuInfo& gpu_info,
CommandQueue* command_queue,
const ObjectManager* external_objects)
: options_(options),
gpu_info_(gpu_info),
external_objects_(external_objects),
command_queue_(command_queue) {
programs_.reserve(256);
if (options_.bundle_readonly_objects) {
shared_readonly_buffer_ = std::make_unique<SharedBufferData>();
}
}
absl::Status Runtime::AddProgram(const GlShader& shader,
const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& num_workgroups) {
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
for (auto& parameter : parameters) {
RETURN_IF_ERROR(program.SetParameter(parameter));
}
programs_.emplace_back(
CompiledProgramDescriptor{std::move(program), num_workgroups, {}});
for (auto& object : objects) {
auto& program = programs_.back();
BindFunc binding_func;
if (IsRef(object)) {
absl::Status status = MakeLateBindingFunc(
object, GetRef(object), external_objects_, &binding_func);
if (!status.ok()) {
if (absl::IsNotFound(status)) {
program.refs.push_back(object);
continue;
}
return status;
}
} else {
uint32_t id;
RETURN_IF_ERROR(AllocateConstObject(object, &id));
RETURN_IF_ERROR(
MakeBindingFunc(object, id, &const_objects_, &binding_func));
}
program.bindings.push_back(std::move(binding_func));
}
return absl::OkStatus();
}
absl::Status Runtime::AllocateInternalObject(const Object& object) {
const ObjectRef ref = GetRef(object);
switch (object.object_type) {
case ObjectType::BUFFER: {
GlBuffer gl_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<uint8_t>(
ByteSizeOf(object), &gl_buffer));
RETURN_IF_ERROR(
internal_objects_.RegisterBuffer(ref, std::move(gl_buffer)));
break;
}
case ObjectType::TEXTURE: {
GlTexture gl_texture;
RETURN_IF_ERROR(MakeGlTextureRef(object, &gl_texture));
RETURN_IF_ERROR(
internal_objects_.RegisterTexture(ref, std::move(gl_texture)));
break;
}
default:
return absl::InternalError("Unexpected internal object type");
}
return absl::OkStatus();
}
absl::Status Runtime::AllocateConstObject(const Object& object, uint32_t* id) {
const ObjectData* data = GetData(object);
if (data == nullptr) {
return absl::InternalError(
"Unable to allocate reference as a const object");
}
*id = next_const_id_++;
switch (object.object_type) {
case ObjectType::BUFFER: {
GlBuffer gl_buffer;
if (!shared_readonly_buffer_ ||
!shared_readonly_buffer_->Add(*data, &gl_buffer)) {
RETURN_IF_ERROR(MakeGlBuffer(object, *data, &gl_buffer));
}
RETURN_IF_ERROR(const_objects_.RegisterBuffer(*id, std::move(gl_buffer)));
break;
}
case ObjectType::TEXTURE: {
GlTexture gl_texture;
RETURN_IF_ERROR(MakeGlTexture(object, *data, &gl_texture));
RETURN_IF_ERROR(
const_objects_.RegisterTexture(*id, std::move(gl_texture)));
break;
}
case ObjectType::UNKNOWN:
return absl::InternalError("Unknown object type");
}
return absl::OkStatus();
}
absl::Status Runtime::PrepareForExecution() {
if (shared_readonly_buffer_ && !shared_readonly_buffer_->empty()) {
GlBuffer shared_buffer;
RETURN_IF_ERROR(
shared_readonly_buffer_->CreateSharedGlBuffer(&shared_buffer));
shared_readonly_buffer_.reset(nullptr);
RETURN_IF_ERROR(const_objects_.RegisterBuffer(next_const_id_++,
std::move(shared_buffer)));
}
if (options_.reuse_internal_objects) {
std::vector<Object> shared_objects;
RETURN_IF_ERROR(AssignInternalObjects(&shared_objects));
for (const Object& object : shared_objects) {
RETURN_IF_ERROR(AllocateInternalObject(object));
}
}
for (auto& program : programs_) {
for (auto& object : program.refs) {
BindFunc binding;
ObjectRef ref = GetRef(object);
absl::Status status =
MakeBindingFunc(object, ref, &internal_objects_, &binding);
if (!status.ok()) {
if (absl::IsNotFound(status)) {
RETURN_IF_ERROR(AllocateInternalObject(object));
RETURN_IF_ERROR(
MakeBindingFunc(object, ref, &internal_objects_, &binding));
} else {
return status;
}
}
program.bindings.push_back(std::move(binding));
}
program.refs.clear();
}
return absl::OkStatus();
}
namespace {
const size_t kNotAssigned = std::numeric_limits<size_t>::max();
struct CombinedUsageRecords {
std::vector<TensorUsageRecord<size_t>> buffers;
std::vector<TensorUsageRecord<size_t>> textures_1d;
std::vector<TensorUsageRecord<uint2>> textures_2d;
std::vector<TensorUsageRecord<uint3>> textures_3d;
std::vector<size_t> usage_refs;
};
template <typename TensorSizeT>
void UpdateUsageRecord(TensorUsageRecord<TensorSizeT>* usage_rec,
size_t task_id) {
usage_rec->first_task = std::min(usage_rec->first_task, task_id);
usage_rec->last_task = std::max(usage_rec->last_task, task_id);
}
struct AddUsageRecordForTextureFunc {
void operator()(const uint3& size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_3d.size();
usage_records->textures_3d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_3d[usage_ref], program_id);
}
}
void operator()(const uint2& size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_2d.size();
usage_records->textures_2d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_2d[usage_ref], program_id);
}
}
void operator()(size_t size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_1d.size();
usage_records->textures_1d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_1d[usage_ref], program_id);
}
}
CombinedUsageRecords* usage_records;
const ObjectRef& object_ref;
const size_t program_id;
};
absl::Status AddUsageRecord(CombinedUsageRecords* usage_records,
const Object& object, const size_t program_id) {
auto ref = GetRef(object);
if (ref >= usage_records->usage_refs.size()) {
usage_records->usage_refs.resize(ref + 1, kNotAssigned);
}
auto& usage_ref = usage_records->usage_refs[ref];
if (object.object_type == ObjectType::BUFFER) {
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->buffers.size();
usage_records->buffers.emplace_back(
NumElements(object.size),
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->buffers[usage_ref], program_id);
}
return absl::OkStatus();
}
if (object.object_type == ObjectType::TEXTURE) {
std::visit(AddUsageRecordForTextureFunc{usage_records, ref, program_id},
object.size);
return absl::OkStatus();
}
return absl::InternalError("Unexpected object type");
}
absl::Status ApplyBuffersAssignment(
const ObjectsAssignment<size_t>& assignment,
const std::vector<size_t>& global_ref_to_usage_rec,
const std::vector<Object*>& global_ref_to_object_ptr,
std::vector<ObjectRef>* global_ref_to_shared_ref,
std::vector<Object>* shared_objects) {
std::vector<ObjectRef> assigned_id_to_shared_ref(
assignment.object_sizes.size(), kInvalidObjectRef);
for (size_t global_ref = 0; global_ref < global_ref_to_usage_rec.size();
++global_ref) {
const auto& usage_rec_id = global_ref_to_usage_rec[global_ref];
Object* object = global_ref_to_object_ptr[global_ref];
if (usage_rec_id == kNotAssigned || object == nullptr ||
object->object_type != ObjectType::BUFFER) {
continue;
}
size_t assigned_id = assignment.object_ids[usage_rec_id];
ObjectRef shared_ref = assigned_id_to_shared_ref[assigned_id];
if (shared_ref == kInvalidObjectRef) {
shared_ref = shared_objects->size();
Object shared_object = *object;
shared_object.access = AccessType::READ_WRITE;
shared_object.object = shared_ref;
shared_object.size = assignment.object_sizes[assigned_id];
shared_objects->push_back(std::move(shared_object));
assigned_id_to_shared_ref[assigned_id] = shared_ref;
}
(*global_ref_to_shared_ref)[global_ref] = shared_ref;
}
return absl::OkStatus();
}
template <typename ObjectSizeT>
absl::Status ApplyTexturesAssignment(
const ObjectsAssignment<ObjectSizeT>& assignment,
const std::vector<size_t>& global_ref_to_usage_rec,
const std::vector<Object*>& global_ref_to_object_ptr,
std::vector<ObjectRef>* global_ref_to_shared_ref,
std::vector<Object>* shared_objects) {
std::vector<ObjectRef> assigned_id_to_shared_ref(
assignment.object_sizes.size(), kInvalidObjectRef);
for (size_t global_ref = 0; global_ref < global_ref_to_usage_rec.size();
++global_ref) {
const auto& usage_rec_id = global_ref_to_usage_rec[global_ref];
Object* object = global_ref_to_object_ptr[global_ref];
if (usage_rec_id == kNotAssigned || object == nullptr ||
object->object_type != ObjectType::TEXTURE ||
!std::holds_alternative<ObjectSizeT>(object->size)) {
continue;
}
size_t assigned_id = assignment.object_ids[usage_rec_id];
ObjectRef shared_ref = assigned_id_to_shared_ref[assigned_id];
if (shared_ref == kInvalidObjectRef) {
shared_ref = shared_objects->size();
Object shared_object = *object;
shared_object.access = AccessType::READ_WRITE;
shared_object.object = shared_ref;
shared_object.size = assignment.object_sizes[assigned_id];
shared_objects->push_back(std::move(shared_object));
assigned_id_to_shared_ref[assigned_id] = shared_ref;
}
(*global_ref_to_shared_ref)[global_ref] = shared_ref;
}
return absl::OkStatus();
}
}
absl::Status Runtime::AssignInternalObjects(
std::vector<Object>* shared_objects) {
std::map<DataType, CombinedUsageRecords> usage_records_by_data_type;
std::vector<Object*> global_ref_to_object_ptr;
for (size_t i = 0; i < programs_.size(); ++i) {
for (auto& object : programs_[i].refs) {
auto ref = GetRef(object);
if (ref >= global_ref_to_object_ptr.size()) {
global_ref_to_object_ptr.resize(ref + 1, nullptr);
}
if (global_ref_to_object_ptr[ref] == nullptr) {
global_ref_to_object_ptr[ref] = &object;
}
RETURN_IF_ERROR(AddUsageRecord(
&usage_records_by_data_type[object.data_type], object, i));
}
}
std::vector<ObjectRef> global_ref_to_shared_ref(
global_ref_to_object_ptr.size(), kInvalidObjectRef);
for (const auto& it : usage_records_by_data_type) {
const CombinedUsageRecords& usage_records = it.second;
if (!usage_records.buffers.empty()) {
ObjectsAssignment<size_t> buffer_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.buffers,
MemoryStrategy::GREEDY_BEST,
&buffer_assignment));
RETURN_IF_ERROR(ApplyBuffersAssignment(
buffer_assignment, usage_records.usage_refs, global_ref_to_object_ptr,
&global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_1d.empty()) {
ObjectsAssignment<size_t> texture_1d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_1d,
MemoryStrategy::GREEDY_BEST,
&texture_1d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_1d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_2d.empty()) {
ObjectsAssignment<uint2> texture_2d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_2d,
MemoryStrategy::GREEDY_IN_ORDER,
&texture_2d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_2d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_3d.empty()) {
ObjectsAssignment<uint3> texture_3d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_3d,
MemoryStrategy::GREEDY_IN_ORDER,
&texture_3d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_3d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
}
for (size_t i = 0; i < programs_.size(); ++i) {
for (auto& object : programs_[i].refs) {
object.object = global_ref_to_shared_ref[GetRef(object)];
}
}
return absl::OkStatus();
}
absl::Status Runtime::Execute() {
for (const auto& descriptor : programs_) {
for (auto& b : descriptor.bindings) {
RETURN_IF_ERROR(b());
}
RETURN_IF_ERROR(command_queue_->Dispatch(descriptor.program,
descriptor.num_workgroups));
}
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/tfrt/runtime/runtime.h"
#include <gtest/gtest.h>
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(RuntimeTest, GlobalRuntimeWorks) {
EXPECT_EQ(GetGlobalRuntime(), nullptr);
SetGlobalRuntime(Runtime::Create(4));
EXPECT_NE(GetGlobalRuntime(), nullptr);
EXPECT_EQ(GetGlobalRuntime(), GetGlobalRuntime());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fdff665-ed54-4a1b-8aa2-c0280a997a05 | cpp | tensorflow/tensorflow | tf_threadpool_concurrent_work_queue | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc | #include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <utility>
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
using ::tensorflow::thread::ThreadPoolInterface;
absl::StatusOr<std::unique_ptr<WorkQueueInterface>>
TfThreadPoolWorkQueue::InitializeRequest(int64_t request_id) const {
return {std::make_unique<TfThreadPoolWorkQueue>(
request_id, intra_op_threadpool_, inter_op_threadpool_)};
}
void TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) {
auto* copy = new tfrt::TaskFunction(
tensorflow::tfrt_stub::WrapWork(id(), "inter", std::move(work)));
inter_op_threadpool_->Schedule([copy] {
(*copy)();
delete copy;
});
}
std::optional<tfrt::TaskFunction> TfThreadPoolWorkQueue::AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) {
AddTask(std::move(work));
return std::nullopt;
}
void TfThreadPoolWorkQueue::Quiesce() {
}
void TfThreadPoolWorkQueue::Await(
tfrt::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) {
tfrt::latch values_remaining(values.size());
for (auto& value : values) {
value->AndThen([&values_remaining]() { values_remaining.count_down(); });
}
values_remaining.wait();
}
bool TfThreadPoolWorkQueue::IsInWorkerThread() const {
return true;
}
std::unique_ptr<TfThreadPoolWorkQueue> CreateDefaultTfThreadPoolWorkQueue(
int num_inter_op_threads, int num_intra_op_threads) {
struct ThreadPools {
TfThreadPool inter_op_threadpool;
TfThreadPool intra_op_threadpool;
ThreadPools(int num_inter_op_threads, int num_intra_op_threads)
: inter_op_threadpool("default_work_queue_inter", num_inter_op_threads),
intra_op_threadpool("default_work_queue_intra",
num_intra_op_threads) {}
};
class Wrapper : public TfThreadPoolWorkQueue {
public:
explicit Wrapper(std::unique_ptr<ThreadPools> thread_pools)
: TfThreadPoolWorkQueue(
&thread_pools->intra_op_threadpool,
&thread_pools->inter_op_threadpool),
thread_pools_(std::move(thread_pools)) {}
~Wrapper() override = default;
private:
std::unique_ptr<ThreadPools> thread_pools_;
};
return std::make_unique<Wrapper>(std::make_unique<ThreadPools>(
num_inter_op_threads, num_intra_op_threads));
}
}
} | #include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
const int32_t kNumThreads = 2;
class TfThreadpoolWorkQueueTest : public ::testing::Test {
protected:
TfThreadpoolWorkQueueTest()
: tf_threadpool_cwq_(CreateDefaultTfThreadPoolWorkQueue(
kNumThreads,
kNumThreads)) {}
std::unique_ptr<TfThreadPoolWorkQueue> tf_threadpool_cwq_;
};
TEST_F(TfThreadpoolWorkQueueTest, GetParallelismLevelOk) {
EXPECT_GT(tf_threadpool_cwq_->GetParallelismLevel(), 0);
}
TEST_F(TfThreadpoolWorkQueueTest, GetNameOk) {
EXPECT_EQ(tf_threadpool_cwq_->name(), "TfThreadPoolWorkQueue");
}
TEST_F(TfThreadpoolWorkQueueTest, InitializeRequestOk) {
tfrt::RequestContextBuilder ctx_builder(nullptr,
nullptr);
auto queue = tf_threadpool_cwq_->InitializeRequest(0);
TF_ASSERT_OK(queue.status());
EXPECT_NE(*queue, nullptr);
EXPECT_NE((*queue)->GetIntraOpThreadPool(), nullptr);
}
TEST_F(TfThreadpoolWorkQueueTest, IsInWorkerThreadOk) {
EXPECT_TRUE(tf_threadpool_cwq_->IsInWorkerThread());
}
TEST_F(TfThreadpoolWorkQueueTest, RunningBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningNonBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningMixedTask) {
tfrt::latch latch(20);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd13b244-2252-4f7c-8e58-0f8f18afb81d | cpp | tensorflow/tensorflow | work_queue_interface | tensorflow/core/tfrt/runtime/work_queue_interface.cc | tensorflow/core/tfrt/runtime/work_queue_interface_test.cc | #include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tfrt/host_context/execution_context.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
class DefaultWorkQueueWrapper : public WorkQueueInterface {
public:
explicit DefaultWorkQueueWrapper(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue)
: WorkQueueInterface(0),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(0, intra_thread_pool),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(int64_t request_id,
tfrt::ConcurrentWorkQueue* work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(request_id, intra_thread_pool),
work_queue_(work_queue) {}
~DefaultWorkQueueWrapper() override = default;
private:
std::string name() const override { return work_queue_->name(); }
void AddTask(tfrt::TaskFunction work) override {
work_queue_->AddTask(WrapWork(id(), "inter", std::move(work)));
}
std::optional<tfrt::TaskFunction> AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) override {
return work_queue_->AddBlockingTask(
WrapWork(id(), "blocking", std::move(work)), allow_queuing);
}
void Await(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) override {
work_queue_->Await(values);
}
void Quiesce() override { work_queue_->Quiesce(); }
int GetParallelismLevel() const override {
return work_queue_->GetParallelismLevel();
}
bool IsInWorkerThread() const override {
return work_queue_->IsInWorkerThread();
}
absl::StatusOr<std::unique_ptr<WorkQueueInterface>> InitializeRequest(
int64_t request_id) const override {
return {std::make_unique<DefaultWorkQueueWrapper>(request_id, work_queue_,
GetIntraOpThreadPool())};
}
private:
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue_owner_;
tfrt::ConcurrentWorkQueue* work_queue_ = nullptr;
};
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue));
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue),
intra_thread_pool);
}
}
} | #include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <thread>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(DefaultWorkQueueWrapperTest, Name) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->name(), work_queue_ptr->name());
}
TEST(DefaultWorkQueueWrapperTest, AddTask_OnlyTask) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
work_queue_wrapper->AddTask(
tfrt::TaskFunction([av] { av->emplace<int>(0); }));
work_queue_wrapper->Await(std::move(av));
}
TEST(DefaultWorkQueueWrapperTest, AddBlockingTask_TaskAndAllowQueueing) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
std::thread thread{[&] {
auto work = work_queue_wrapper->AddBlockingTask(
tfrt::TaskFunction([&] { av->emplace<int>(0); }),
true);
}};
work_queue_wrapper->Await(std::move(av));
thread.join();
}
TEST(DefaultWorkQueueWrapperTest, GetParallelismLevel) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->GetParallelismLevel(),
work_queue_ptr->GetParallelismLevel());
}
TEST(DefaultWorkQueueWrapperTest, IsInWorkerThread) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->IsInWorkerThread(),
work_queue_ptr->IsInWorkerThread());
}
TEST(DefaultWorkQueueWrapperTest, IntraOpThreadPool) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
TfThreadPool intra_op_thread_pool("tf_intra",
1);
auto work_queue_wrapper =
WrapDefaultWorkQueue(std::move(work_queue), &intra_op_thread_pool);
TF_ASSERT_OK_AND_ASSIGN(auto queue, work_queue_wrapper->InitializeRequest(
0));
EXPECT_NE(queue, nullptr);
EXPECT_EQ(queue->GetIntraOpThreadPool(), &intra_op_thread_pool);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/work_queue_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/work_queue_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b63e28df-3ad9-4432-842c-d1391ddc96bc | cpp | tensorflow/tensorflow | fallback_tensor | tensorflow/core/tfrt/utils/fallback_tensor.cc | tensorflow/core/tfrt/utils/fallback_tensor_test.cc | #include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include <utility>
#include "tensorflow/core/common_runtime/dma_helper.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
class ImmutableTensorBuffer final : public tensorflow::TensorBuffer {
public:
static tensorflow::core::RefCountPtr<ImmutableTensorBuffer> Create(
tensorflow::Tensor tensor);
explicit ImmutableTensorBuffer(tensorflow::Tensor tensor)
: tensorflow::TensorBuffer(tensor.data()), tensor_(std::move(tensor)) {
if (auto* buf = tensorflow::DMAHelper::buffer(&tensor_)) {
root_buffer_ = buf->root_buffer();
} else {
root_buffer_ = this;
}
}
~ImmutableTensorBuffer() override = default;
size_t size() const override {
return tensorflow::DMAHelper::buffer(&tensor_)->size();
}
bool OwnsMemory() const override { return false; }
tensorflow::TensorBuffer* root_buffer() override { return root_buffer_; }
void FillAllocationDescription(AllocationDescription* proto) const override {}
bool GetAllocatedBytes(size_t*) const override { return false; }
private:
tensorflow::Tensor tensor_;
tensorflow::TensorBuffer* root_buffer_ = nullptr;
};
tensorflow::core::RefCountPtr<ImmutableTensorBuffer>
ImmutableTensorBuffer::Create(tensorflow::Tensor tensor) {
return tensorflow::core::RefCountPtr<ImmutableTensorBuffer>(
new ImmutableTensorBuffer(std::move(tensor)));
}
}
ImmutableTensor ImmutableTensor::Create(tensorflow::Tensor tensor) {
auto dtype = tensor.dtype();
auto shape = tensor.shape();
auto immutable_buffer = ImmutableTensorBuffer::Create(std::move(tensor));
return ImmutableTensor(
tensorflow::Tensor(dtype, shape, std::move(immutable_buffer)));
}
}
} | #include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(FallbackTensorTest, ImmutableTensor) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1);
ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = immutable_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne());
EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes());
}
TEST(FallbackTensorTest, StringImmutableTensor) {
tensorflow::tstring scalar = "string";
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
ASSERT_EQ(immutable_tensor.tensor().NumElements(), 1);
ASSERT_EQ(immutable_tensor.tensor().dtype(), tensorflow::DT_STRING);
auto flat = immutable_tensor.tensor().flat<tensorflow::tstring>();
EXPECT_EQ(flat(0), "string");
EXPECT_FALSE(immutable_tensor.tensor().RefCountIsOne());
EXPECT_EQ(tensor.TotalBytes(), immutable_tensor.tensor().TotalBytes());
}
TEST(FallbackTensorTest, FallbackTensor) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
{
FallbackTensor fallback_tensor(tensor);
EXPECT_FALSE(fallback_tensor.is_immutable());
ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1);
ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = fallback_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
FallbackTensor copy(fallback_tensor);
FallbackTensor assign;
assign = fallback_tensor;
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
fallback_tensor = {};
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
}
auto immutable_tensor = ImmutableTensor::Create(tensor);
{
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
ASSERT_EQ(fallback_tensor.tensor().NumElements(), 1);
ASSERT_EQ(fallback_tensor.tensor().dtype(), tensorflow::DT_INT32);
auto flat = fallback_tensor.tensor().flat<int32_t>();
EXPECT_EQ(flat(0), 123);
FallbackTensor copy(fallback_tensor);
FallbackTensor assign;
assign = fallback_tensor;
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
fallback_tensor = {};
ASSERT_EQ(copy.tensor().NumElements(), 1);
ASSERT_EQ(copy.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(copy.tensor().flat<int32_t>()(0), 123);
ASSERT_EQ(assign.tensor().NumElements(), 1);
ASSERT_EQ(assign.tensor().dtype(), tensorflow::DT_INT32);
EXPECT_EQ(assign.tensor().flat<int32_t>()(0), 123);
}
}
TEST(FallbackTensorTest, FallbackTensorCopy) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
{
FallbackTensor fallback_tensor(tensor);
EXPECT_FALSE(fallback_tensor.is_immutable());
auto copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
}
auto immutable_tensor = ImmutableTensor::Create(tensor);
{
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
auto copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
}
}
TEST(FallbackTensorTest, FallbackTensorCopyRootBuffer) {
int32_t scalar = 123;
tensorflow::Tensor tensor(scalar);
auto immutable_tensor = ImmutableTensor::Create(tensor);
FallbackTensor fallback_tensor(&immutable_tensor);
EXPECT_TRUE(fallback_tensor.is_immutable());
EXPECT_EQ(fallback_tensor.buffer()->root_buffer(),
tensorflow::DMAHelper::buffer(&tensor));
FallbackTensor copy = fallback_tensor;
EXPECT_TRUE(copy.is_immutable());
EXPECT_EQ(copy.buffer()->root_buffer(),
tensorflow::DMAHelper::buffer(&tensor));
}
TEST(FallbackTensorTest, EmptyTensor) {
tensorflow::Tensor tensor(tensorflow::DT_FLOAT,
tensorflow::TensorShape({1, 0}));
FallbackTensor fallback_tensor(tensor);
auto copy = fallback_tensor;
ASSERT_FALSE(copy.buffer());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/fallback_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/fallback_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3e3da25-86c9-4535-8cb3-d5716a14c2c4 | cpp | tensorflow/tensorflow | tfrt_graph_execution_state | tensorflow/core/tfrt/utils/tfrt_graph_execution_state.cc | tensorflow/core/tfrt/utils/tfrt_graph_execution_state_test.cc | #include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/upgrade_graph.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::flat_hash_set<std::string> FindFunctionsToOptimize(
const GraphDef& graph_def) {
static const auto* const kOpWhitelist = new absl::flat_hash_set<std::string>{
"PartitionedCall", "StatefulPartitionedCall", "BatchFunction"};
absl::flat_hash_map<
std::string ,
absl::flat_hash_set<std::string> >
function_to_ops;
auto build_map = [&](const auto& node_defs) {
for (const auto& node_def : node_defs) {
for (const auto& p : node_def.attr()) {
const AttrValue& attr_value = p.second;
if (!attr_value.has_func()) continue;
function_to_ops[attr_value.func().name()].insert(node_def.op());
}
}
};
build_map(graph_def.node());
for (const auto& function_def : graph_def.library().function()) {
build_map(function_def.node_def());
}
absl::flat_hash_set<std::string> functions_to_optimize;
for (const auto& p : function_to_ops) {
const std::string& function_name = p.first;
const absl::flat_hash_set<std::string>& ops = p.second;
if (std::all_of(ops.begin(), ops.end(), [](const auto& op) {
return kOpWhitelist->contains(op);
})) {
functions_to_optimize.insert(function_name);
}
}
return functions_to_optimize;
}
absl::StatusOr<absl::flat_hash_set<std::string>> PreprocessGraph(
tensorflow::GraphDef& graph_def, bool run_placer_grappler_on_functions) {
if (VLOG_IS_ON(1)) {
DumpGraphDefToFile("before_generate_resource_shared_name_graph_def",
graph_def);
}
TF_RETURN_IF_ERROR(tensorflow::GenerateResourceSharedNameIfEmpty(
graph_def, tensorflow::OpRegistry::Global()));
if (VLOG_IS_ON(2)) {
DumpGraphDefToFile("after_generate_resource_shared_name_graph_def",
graph_def);
}
if (run_placer_grappler_on_functions) {
return FindFunctionsToOptimize(graph_def);
}
return absl::flat_hash_set<std::string>();
}
}
absl::StatusOr<std::unique_ptr<TfrtGraphExecutionState>>
TfrtGraphExecutionState::Create(const TfrtGraphExecutionState::Options& options,
tensorflow::GraphDef graph_def,
const FallbackState& fallback_state) {
TF_ASSIGN_OR_RETURN(
auto functions_to_optimize,
PreprocessGraph(graph_def, options.run_placer_grappler_on_functions));
TF_ASSIGN_OR_RETURN(auto graph_execution_state,
fallback_state.CreateGraphExecutionState(
std::move(graph_def), options.run_placer_on_graph));
return std::make_unique<TfrtGraphExecutionState>(
options, std::move(graph_execution_state), fallback_state,
std::move(functions_to_optimize));
}
namespace {
CallableOptions PopulateCallableOptions(
CallableOptions& callable_options,
absl::Span<const std::string> feed_tensor_names,
absl::Span<const std::string> fetch_tensor_names,
absl::Span<const std::string> target_tensor_names) {
callable_options.mutable_feed()->Reserve(feed_tensor_names.size());
for (const auto& feed : feed_tensor_names) {
callable_options.add_feed(feed);
}
callable_options.mutable_fetch()->Reserve(fetch_tensor_names.size());
for (const auto& fetch : fetch_tensor_names) {
callable_options.add_fetch(fetch);
}
callable_options.mutable_target()->Reserve(target_tensor_names.size());
for (const auto& target : target_tensor_names) {
callable_options.add_target(target);
}
return callable_options;
}
tensorflow::GraphDef CreateGraphDefFromGraphAndFlibDef(
const tensorflow::Graph& graph,
const tensorflow::FunctionLibraryDefinition& flib_def) {
tensorflow::GraphDef graph_def;
graph.ToGraphDef(&graph_def);
*graph_def.mutable_library() = flib_def.ToProto();
return graph_def;
}
absl::StatusOr<std::unique_ptr<tensorflow::Graph>> CreatePrunedGraph(
tensorflow::GraphDef graph_def, const CallableOptions& callable_options) {
VLOG(1) << "Creating pruned graph: " << callable_options.DebugString();
TF_RETURN_IF_ERROR(PruneGraphDef(graph_def, callable_options));
if (VLOG_IS_ON(2)) {
DumpGraphDefToFile("before_eliminate_ref_variables_graph_def", graph_def);
}
TF_RETURN_IF_ERROR(EliminateRefVariablesFromV1ControlFlow(graph_def));
RemoveInputShapesInFunctions(graph_def);
auto pruned_graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
tensorflow::GraphConstructorOptions options;
options.allow_internal_ops = true;
options.add_default_attributes = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(options, std::move(graph_def),
pruned_graph.get()));
return pruned_graph;
}
NodeDef CreateNewIdentityNode(const NodeDef& node,
const std::string& input_name,
const std::string& identity_name) {
NodeDef identity;
identity.set_name(identity_name);
identity.set_op("Identity");
identity.add_input(input_name);
identity.set_device(node.device());
for (const auto& name_and_attr : node.attr()) {
if (name_and_attr.first == "T") {
identity.mutable_attr()->insert(name_and_attr);
break;
}
}
return identity;
}
}
absl::StatusOr<TfrtGraphExecutionState::OptimizationResult>
TfrtGraphExecutionState::CreateOptimizedGraph(
tensorflow::GraphImportConfig& graph_import_config) {
OptimizationResult result;
tensorflow::BuildGraphOptions build_graph_options;
std::vector<std::string> inputs;
inputs.reserve(graph_import_config.inputs.size());
for (const auto& input : graph_import_config.inputs) {
inputs.push_back(input.first);
}
PopulateCallableOptions(build_graph_options.callable_options, inputs,
graph_import_config.outputs,
graph_import_config.control_outputs);
auto graph_def = CreateGraphDefFromGraphAndFlibDef(graph(), flib_def());
if (VLOG_IS_ON(1)) {
DumpGraphDefToFile("before_pruning", graph_def);
}
TF_ASSIGN_OR_RETURN(result.graph,
CreatePrunedGraph(std::move(graph_def),
build_graph_options.callable_options));
DCHECK(result.graph);
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_pruning", *result.graph);
}
const auto functionalization_start_time = absl::Now();
TF_RETURN_IF_ERROR(tensorflow::UpgradeLegacyGraph(
result.graph.get(),
const_cast<tensorflow::FunctionLibraryDefinition*>(
&result.graph->flib_def()),
false));
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_functionalization", *result.graph);
}
auto grappler_start_time = absl::Now();
result.functionalization_duration =
grappler_start_time - functionalization_start_time;
auto status_or_optimized_graph =
OptimizeGraph(*result.graph, build_graph_options);
if (status_or_optimized_graph.ok()) {
result.graph = std::move(status_or_optimized_graph.value());
} else {
LOG(WARNING) << "TFRT failed to optimize graph: "
<< status_or_optimized_graph.status();
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_grappler", *result.graph);
}
result.grappler_duration = absl::Now() - grappler_start_time;
return result;
}
Status TfrtGraphExecutionState::Extend(const GraphDef& graph) {
std::unique_ptr<GraphExecutionState> new_state;
absl::MutexLock lock(&graph_execution_state_mu_);
TF_RETURN_IF_ERROR(graph_execution_state_->Extend(graph, &new_state));
graph_execution_state_.swap(new_state);
auto* graph_def = graph_execution_state_->original_graph_def();
DCHECK_NE(graph_def, nullptr);
TF_ASSIGN_OR_RETURN(
functions_to_optimize_,
PreprocessGraph(*graph_def, options_.run_placer_grappler_on_functions));
return absl::OkStatus();
}
namespace {
absl::StatusOr<const NodeDef*> FindLoopCondFromExitNode(
const NodeDef& exit_node,
const absl::flat_hash_map<std::string, NodeDef*>& name_to_node) {
const NodeDef* switch_node = nullptr;
for (const std::string& tensor_name : exit_node.input()) {
const std::string node_name = grappler::NodeName(tensor_name);
if (!name_to_node.contains(node_name)) {
return errors::InvalidArgument("Graph does not contain input ", node_name,
" of exit node ", exit_node.name());
}
const NodeDef* node = name_to_node.at(node_name);
if (node->op() == "Switch") {
switch_node = node;
break;
}
}
if (switch_node == nullptr) {
return errors::InvalidArgument("Exit node ", exit_node.name(),
" does not have a Switch node as its ",
"predecessor.");
}
for (const std::string& tensor_name : switch_node->input()) {
const std::string node_name = grappler::NodeName(tensor_name);
if (!name_to_node.contains(node_name)) {
return errors::InvalidArgument("Graph does not contain input ", node_name,
" of switch node ", switch_node->name());
}
const NodeDef* node = name_to_node.at(node_name);
if (node->op() == "LoopCond") {
return node;
}
}
return errors::InvalidArgument("Switch node ", switch_node->name(),
" does not have a LoopCond node as its ",
"predecessor.");
}
}
Status PruneGraphDef(GraphDef& graph_def,
const CallableOptions& callable_options) {
absl::flat_hash_map<std::string, NodeDef*> name_to_node;
absl::flat_hash_set<const NodeDef*> exit_nodes;
for (auto& node : *graph_def.mutable_node()) {
name_to_node[node.name()] = &node;
if (node.op() == "Exit") {
exit_nodes.insert(&node);
}
if (node.op() == "_Send" || node.op() == "_Recv") {
return errors::InvalidArgument(
"TFRT prune graphdef cannot handle graphs contains _Send and _Recv "
"ops.");
}
}
absl::flat_hash_map<const NodeDef*, absl::flat_hash_set<const NodeDef*>>
loop_cond_to_exit_nodes;
for (const NodeDef* exit_node : exit_nodes) {
TF_ASSIGN_OR_RETURN(const NodeDef* loop_cond_node,
FindLoopCondFromExitNode(*exit_node, name_to_node));
loop_cond_to_exit_nodes[loop_cond_node].insert(exit_node);
}
std::vector<const NodeDef*> queue;
absl::flat_hash_set<std::string> fetch_node_names;
for (const std::string& tensor_name : callable_options.fetch()) {
const NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain fetch node ",
tensor_name, ".");
}
queue.push_back(node);
fetch_node_names.insert(node->name());
}
for (const std::string& tensor_name : callable_options.target()) {
const NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain target node ",
tensor_name, ".");
}
queue.push_back(node);
fetch_node_names.insert(node->name());
}
absl::flat_hash_set<NodeDef*> feed_node_defs;
for (const std::string& tensor_name : callable_options.feed()) {
NodeDef* node = name_to_node[grappler::NodeName(tensor_name)];
if (!node) {
return errors::InvalidArgument("Graph does not contain feed node ",
tensor_name, ".");
}
if (node->op() == "Const") {
node->clear_input();
}
queue.push_back(node);
feed_node_defs.insert(node);
}
absl::flat_hash_set<const NodeDef*> visited;
std::vector<NodeDef> keep;
while (!queue.empty()) {
const NodeDef* node = queue.back();
queue.pop_back();
if (!visited.insert(node).second) {
continue;
}
keep.push_back(*node);
if (node->op() == "LoopCond") {
for (const NodeDef* exit_node : loop_cond_to_exit_nodes[node]) {
queue.push_back(exit_node);
}
}
for (const std::string& tensor_name : node->input()) {
const NodeDef* in = name_to_node[grappler::NodeName(tensor_name)];
if (!in) {
return errors::InvalidArgument("Graph does not contain input ",
grappler::NodeName(tensor_name),
" of node ", node->name(), ".");
}
queue.push_back(in);
}
}
graph_def.clear_node();
for (auto& node : keep) {
if (fetch_node_names.contains(node.name())) {
if (node.op() == "Exit") {
auto renamed_exit_node = node;
renamed_exit_node.set_name(
absl::StrCat(renamed_exit_node.name(), "/tfrt_renamed"));
node.set_op("Identity");
*node.mutable_input(0) = renamed_exit_node.name();
*graph_def.add_node() = std::move(renamed_exit_node);
}
}
*graph_def.add_node() = std::move(node);
}
return absl::OkStatus();
}
Status EliminateRefVariablesFromV1ControlFlow(tensorflow::GraphDef& graph_def) {
auto* op_factory = OpRegistry::Global();
absl::flat_hash_set<std::string> ref_nodes;
for (const auto& node : graph_def.node()) {
if (node.op() == "RefEnter" || node.op() == "RefSwitch") {
ref_nodes.insert(node.name());
}
}
tensorflow::GraphDef updated_graph_def;
absl::flat_hash_set<std::string> new_identities;
for (auto& node : *graph_def.mutable_node()) {
std::string* ref_input_name = nullptr;
if (node.op() == "RefEnter") {
node.set_op("Enter");
if (node.input_size() != 1) {
return errors::InvalidArgument("RefEnter node ", node.name(),
" does not have exactly 1 input.");
}
ref_input_name = node.mutable_input(0);
} else if (node.op() == "RefSwitch") {
node.set_op("Switch");
if (node.input_size() != 2) {
return errors::InvalidArgument("RefSwitch node", node.name(),
" does not have exactly 2 inputs.");
}
ref_input_name = node.mutable_input(0);
} else {
std::string ref_input;
for (const auto& tensor_name : node.input()) {
std::string input = grappler::NodeName(tensor_name);
if (ref_nodes.contains(input)) {
ref_input = std::move(input);
break;
}
}
if (!ref_input.empty()) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_factory->LookUpOpDef(node.op(), &op_def));
for (const auto& input_arg : op_def->input_arg()) {
if (input_arg.is_ref()) {
return errors::Unimplemented(
"Cannot in-place update ref node ", ref_input,
" to the non-ref counterpart since its user node ", node.name(),
" requires its input to be refs.");
}
}
}
}
if (ref_input_name != nullptr) {
std::string identity_name =
absl::StrCat(grappler::NodeName(*ref_input_name), "/identity");
if (!new_identities.contains(identity_name)) {
*updated_graph_def.add_node() =
CreateNewIdentityNode(node, *ref_input_name, identity_name);
new_identities.insert(identity_name);
}
*ref_input_name = std::move(identity_name);
}
*updated_graph_def.add_node() = std::move(node);
}
graph_def.mutable_node()->Swap(updated_graph_def.mutable_node());
return absl::OkStatus();
}
void RemoveInputShapesInFunctions(tensorflow::GraphDef& graph_def) {
for (tensorflow::FunctionDef& function_def :
*graph_def.mutable_library()->mutable_function()) {
function_def.mutable_attr()->erase("_input_shapes");
}
}
namespace {
Status OptimizeFunctions(
FunctionDefLibrary& flib_proto, const FunctionLibraryDefinition& flib,
const FallbackState& fallback_state,
const absl::flat_hash_set<std::string>& functions_to_optimize) {
for (FunctionDef& fdef : *flib_proto.mutable_function()) {
if (!functions_to_optimize.contains(fdef.signature().name())) {
continue;
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(fdef, AttrSlice(), &flib, &fbody));
tensorflow::Graph* graph = fbody->graph;
tensorflow::GraphDef graph_def;
graph->ToGraphDef(&graph_def);
*graph_def.mutable_library() = flib.ToProto();
TF_ASSIGN_OR_RETURN(
auto graph_execution_state,
fallback_state.CreateGraphExecutionState(std::move(graph_def)));
std::unique_ptr<tensorflow::Graph> optimized_graph;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> optimized_flib;
tensorflow::BuildGraphOptions build_graph_options;
std::vector<std::string> args;
args.reserve(fbody->arg_nodes.size());
for (const auto& arg : fbody->arg_nodes) args.push_back(arg->name());
std::vector<std::string> rets;
rets.reserve(fbody->ret_nodes.size());
for (const auto& ret : fbody->ret_nodes) rets.push_back(ret->name());
std::vector<std::string> control_rets;
control_rets.reserve(fbody->control_ret_nodes.size());
for (const auto& control_ret : fbody->control_ret_nodes) {
control_rets.push_back(control_ret->name());
}
PopulateCallableOptions(build_graph_options.callable_options, args, rets,
control_rets);
auto status = graph_execution_state->OptimizeGraph(
build_graph_options, *graph_execution_state->full_graph(), &flib,
&optimized_graph, &optimized_flib);
if (!status.ok()) {
LOG(ERROR) << "TFRT failed to optimize graph (converted from function: "
<< fdef.signature().name() << "): " << status;
continue;
}
TF_RETURN_IF_ERROR(
optimized_graph->AddFunctionLibrary(optimized_flib->ToProto()));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*optimized_graph,
fdef.signature().name(), &new_fdef));
fdef = std::move(new_fdef);
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<tensorflow::Graph>>
TfrtGraphExecutionState::OptimizeGraph(
const tensorflow::Graph& graph,
const tensorflow::BuildGraphOptions& build_graph_options) {
std::unique_ptr<tensorflow::Graph> optimized_graph;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> optimized_flib;
{
absl::MutexLock lock(&graph_execution_state_mu_);
TF_RETURN_IF_ERROR(graph_execution_state_->OptimizeGraph(
build_graph_options, graph, &graph.flib_def(), &optimized_graph,
&optimized_flib));
}
FunctionDefLibrary optimized_flib_proto = optimized_flib->ToProto();
if (options_.run_placer_grappler_on_functions) {
TF_RETURN_IF_ERROR(OptimizeFunctions(optimized_flib_proto, *optimized_flib,
fallback_state_,
functions_to_optimize_));
optimized_graph->mutable_flib_def()->Clear();
}
TF_RETURN_IF_ERROR(optimized_graph->AddFunctionLibrary(optimized_flib_proto));
return optimized_graph;
}
}
} | #include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::EqualsProto;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::proto::IgnoringFieldPaths;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
class PruneGraphDefTest : public grappler::GrapplerTest {};
TEST_F(PruneGraphDefTest, ConstFeedWithInput) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("b");
callable_options.add_fetch("c");
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareGraphs(expected, graphdef);
}
Status LessThanTenCond(const Scope& scope, const std::vector<Output>& inputs,
Output* output) {
*output = ops::Less(scope, inputs[0], 10);
return scope.status();
}
Status AddOneBody(const Scope& scope, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(scope, {inputs[0], 1}));
return scope.status();
}
TEST_F(PruneGraphDefTest, InsertIdentityForLoopExitFeed) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("input");
callable_options.add_fetch("while/Exit");
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
for (const auto& node : graphdef.node()) {
if (node.op() == "Exit") {
EXPECT_EQ(node.name(), "while/Exit/tfrt_renamed");
}
if (node.name() == "while/Exit") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "while/Exit/tfrt_renamed");
}
}
}
TEST_F(PruneGraphDefTest, EliminateRefEntersFromControlFlow) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs1;
std::vector<Output> outputs2;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs1));
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop2",
&outputs2));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Enter") {
node.set_op("RefEnter");
}
name_to_node.insert({node.name(), node});
}
}
TF_ASSERT_OK(EliminateRefVariablesFromV1ControlFlow(graphdef));
int num_identity_op = 0;
int num_enter_op = 0;
int num_ref_enter_op = 0;
for (const auto& node : graphdef.node()) {
if (node.op() == "Identity") {
num_identity_op++;
EXPECT_EQ(node.name(), "input/identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_THAT(node.attr(), ElementsAre(Pair("T", _)));
} else if (node.op() == "RefEnter") {
num_ref_enter_op++;
} else if (node.op() == "Enter") {
EXPECT_EQ(num_identity_op, 1);
num_enter_op++;
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input/identity");
EXPECT_THAT(
node, IgnoringFieldPaths({"input", "op"},
EqualsProto(name_to_node.at(node.name()))));
} else {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
}
name_to_node.erase(node.name());
}
EXPECT_EQ(num_identity_op, 1);
EXPECT_EQ(num_enter_op, 2);
EXPECT_EQ(num_ref_enter_op, 0);
EXPECT_THAT(name_to_node, IsEmpty());
}
TEST_F(PruneGraphDefTest, EliminateRefSwitchesFromControlFlow) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output cond_a = ops::Placeholder(scope.WithOpName("cond_a"), DT_BOOL);
Output cond_b = ops::Placeholder(scope.WithOpName("cond_b"), DT_BOOL);
Output input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
ops::Switch switch_a(scope.WithOpName("switch_a"), input, cond_a);
ops::Switch switch_b(scope.WithOpName("switch_b"), input, cond_b);
Output switch_a_true =
ops::Identity(scope.WithOpName("switch_a_true"), switch_a.output_true);
Output switch_b_true =
ops::Identity(scope.WithOpName("switch_b_true"), switch_b.output_true);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Switch") {
node.set_op("RefSwitch");
}
name_to_node.insert({node.name(), node});
}
}
TF_ASSERT_OK(EliminateRefVariablesFromV1ControlFlow(graphdef));
int num_identity_op = 0;
int num_switch_op = 0;
int num_ref_switch_op = 0;
for (const auto& node : graphdef.node()) {
if (node.name() == "switch_a_true" || node.name() == "switch_b_true") {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
} else if (node.op() == "Identity") {
num_identity_op++;
EXPECT_EQ(node.name(), "input/identity");
ASSERT_EQ(node.input().size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_THAT(node.attr(), ElementsAre(Pair("T", _)));
} else if (node.op() == "RefSwitch") {
num_ref_switch_op++;
} else if (node.op() == "Switch") {
EXPECT_EQ(num_identity_op, 1);
num_switch_op++;
ASSERT_EQ(node.input().size(), 2);
EXPECT_TRUE(node.input(0) == "input/identity" ||
node.input(1) == "input/identity");
EXPECT_THAT(
node, IgnoringFieldPaths({"input", "op"},
EqualsProto(name_to_node.at(node.name()))));
} else {
EXPECT_THAT(node, EqualsProto(name_to_node.at(node.name())));
}
name_to_node.erase(node.name());
}
EXPECT_EQ(num_identity_op, 1);
EXPECT_EQ(num_switch_op, 2);
EXPECT_EQ(num_ref_switch_op, 0);
EXPECT_THAT(name_to_node, IsEmpty());
}
TEST_F(PruneGraphDefTest, EliminateRefVariablesFromV1ControlFlowFailed) {
GraphDef graphdef;
absl::flat_hash_map<std::string, NodeDef> name_to_node;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output cond = ops::Placeholder(scope.WithOpName("cond"), DT_BOOL);
Output input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
ops::Switch switch_op(scope.WithOpName("switch"), input, cond);
Output var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign =
ops::Assign(scope.WithOpName("assign"), var, switch_op.output_true);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
for (auto& node : *graphdef.mutable_node()) {
if (node.op() == "Switch") {
node.set_op("RefSwitch");
}
name_to_node.insert({node.name(), node});
}
}
const auto status = EliminateRefVariablesFromV1ControlFlow(graphdef);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.ToString(), HasSubstr("requires its input to be refs"));
}
TEST_F(PruneGraphDefTest, KeepLoopStructureComplete) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
std::vector<Output> inputs;
inputs.push_back(ops::Placeholder(scope.WithOpName("input"), DT_INT32));
std::vector<Output> outputs;
TF_ASSERT_OK(ops::BuildWhileLoop(scope.NewSubScope("while"), inputs,
LessThanTenCond, AddOneBody, "test_loop",
&outputs));
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
CallableOptions callable_options;
callable_options.add_feed("input");
callable_options.add_fetch("while/LoopCond");
GraphDef original_graphdef = graphdef;
TF_ASSERT_OK(PruneGraphDef(graphdef, callable_options));
EXPECT_THAT(graphdef,
IgnoringRepeatedFieldOrdering(EqualsProto(original_graphdef)));
}
class OptimizeGraphTest : public grappler::GrapplerTest {};
TEST_F(OptimizeGraphTest, OptimizeFunctions) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y_retval: float"}, {},
{{{"ArithmeticOptimizer/ConvertPow__inner_pow3"},
"Square",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"},
{{"pow3"},
"Mul",
{"ArithmeticOptimizer/ConvertPow__inner_pow3:y:0", "x"},
{{"T", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"}},
{{"y_retval", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareGraphs(expected, optimized_graph_def);
CompareFunctions(expected.library().function(0),
optimized_graph_def.library().function(0));
}
TEST_F(OptimizeGraphTest, OptimizeFunctionsUsedByFunctionNodes) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
const Tensor kOne = test::AsScalar<float>(1.0);
auto base2pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Add1Pow3", {"x: float"}, {"y: float"}, {},
{{{"one"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}},
{{"add"}, "Add", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
{{"pcall"},
"PartitionedCall",
{"add:z:0"},
{{"Tin", DataTypeSlice({DT_FLOAT})},
{"Tout", DataTypeSlice({DT_FLOAT})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"Pow3", {{"T", DT_FLOAT}})}}}},
{{"y", "pcall:output:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = pow3_fdef;
*fdef_lib.add_function() = base2pow3_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 1.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
base2pow3_fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(base2pow3_fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y_retval: float"}, {},
{{{"ArithmeticOptimizer/ConvertPow__inner_pow3"},
"Square",
{"x"},
{{"dtype", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"},
{{"pow3"},
"Mul",
{"ArithmeticOptimizer/ConvertPow__inner_pow3:y:0", "x"},
{{"T", DT_FLOAT}},
{},
"/job:localhost/replica:0/task:0/device:CPU:0"}},
{{"y_retval", "pow3:z:0"}});
const Tensor kOne = test::AsScalar<float>(1.0);
auto base2pow3_fdef = tensorflow::FunctionDefHelper::Create(
"Add1Pow3", {"x: float"}, {"y: float"}, {},
{{{"one"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kOne}}},
{{"add"}, "Add", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
{{"pcall"},
"PartitionedCall",
{"add:z:0"},
{{"Tin", DataTypeSlice({DT_FLOAT})},
{"Tout", DataTypeSlice({DT_FLOAT})},
{"f", tensorflow::FunctionDefHelper::FunctionRef(
"Pow3", {{"T", DT_FLOAT}})}}}},
{{"y", "pcall:output:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = pow3_fdef;
*fdef_lib.add_function() = base2pow3_fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 1.0, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
base2pow3_fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(base2pow3_fdef.signature().name());
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output b = pcall.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
CompareFunctions(expected.library().function(1),
optimized_graph_def.library().function(1));
ASSERT_EQ("Pow3",
optimized_graph_def.library().function(1).signature().name());
}
TEST_F(OptimizeGraphTest, DontOptimizeUnsafeFunction) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
Output cond = ops::Const(scope.WithOpName("cond"), true, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto if_op =
ops::If(scope, cond, inputs, output_dtypes, func_attr, func_attr);
Output b = if_op.output.front();
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"c"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
CompareGraphs(graphdef, optimized_graph_def);
CompareFunctions(graphdef.library().function(0),
optimized_graph_def.library().function(0));
}
TEST_F(OptimizeGraphTest, FunctionBecomeUnsafeIfAnyOpIsUnsafe) {
GraphDef graphdef;
tensorflow::FunctionDefLibrary fdef_lib;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
const Tensor kThree = test::AsScalar<float>(3.0);
auto fdef = tensorflow::FunctionDefHelper::Create(
"Pow3", {"x: float"}, {"y: float"}, {},
{{{"three"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", kThree}}},
{{"pow3"}, "Pow", {"x", "three:output:0"}, {{"T", DT_FLOAT}}}},
{{"y", "pow3:z:0"}});
tensorflow::FunctionDefLibrary fdef_lib;
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
Output cond = ops::Const(scope.WithOpName("cond"), true, {1, 1});
std::vector<tensorflow::Output> inputs = {a};
std::vector<tensorflow::DataType> output_dtypes = {
fdef.signature().output_arg(0).type()};
tensorflow::NameAttrList func_attr;
func_attr.set_name(fdef.signature().name());
auto if_op =
ops::If(scope, cond, inputs, output_dtypes, func_attr, func_attr);
Output b = if_op.output.front();
inputs = {b};
auto pcall = ops::PartitionedCall(scope, inputs, output_dtypes, func_attr);
Output c = pcall.output.front();
Output d = ops::Identity(scope.WithOpName("d"), c);
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create({}, fdef_lib));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = DT_FLOAT;
array_info.shape.set_unknown_rank(true);
graph_import_config.inputs["a"] = array_info;
graph_import_config.outputs = {"d"};
TF_ASSERT_OK_AND_ASSIGN(
auto optimized_graph,
graph_execution_state->CreateOptimizedGraph(graph_import_config));
GraphDef optimized_graph_def;
optimized_graph.graph->ToGraphDef(&optimized_graph_def);
CompareFunctions(graphdef.library().function(0),
optimized_graph_def.library().function(0));
}
class ExtendGraphTest : public grappler::GrapplerTest {};
TEST_F(ExtendGraphTest, ExtendGraph) {
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
SessionOptions session_options;
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(session_options, {}));
TfrtGraphExecutionState::Options options;
options.run_placer_grappler_on_functions = false;
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(options, graphdef, *fallback_state));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(graph_execution_state->Extend(extension));
GraphDef expected;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithOpName("b"), 0.0f, {10, 10});
TF_ASSERT_OK(scope.ToGraphDef(&expected));
}
ASSERT_NE(graph_execution_state->original_graph_def(), nullptr);
CompareGraphs(expected, *graph_execution_state->original_graph_def());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/tfrt_graph_execution_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/tfrt_graph_execution_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c8b735fe-1061-46a4-98c4-74adb45572e5 | cpp | tensorflow/tensorflow | node_io_dump_rewriter | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.cc | tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter_test.cc | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
absl::StatusOr<std::string> GetDumpDir(absl::string_view dump_dir) {
if (!dump_dir.empty()) return std::string(dump_dir);
const char* prefix = getenv("TF_DUMP_GRAPH_PREFIX");
if (prefix != nullptr) return std::string(prefix);
return errors::InvalidArgument("TF_DUMP_GRAPH_PREFIX not specified");
}
Status InsertDumpOpsForNode(Graph& graph, Node& node,
absl::string_view dump_dir) {
auto insert = [&](bool is_input, const std::vector<const Edge*> edges) {
for (const Edge* edge : edges) {
if (edge->IsControlEdge()) continue;
Node* dump_node;
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat(edge->src()->name(), "/", edge->src_output(),
"/debug_identity"),
"DebugIdentityV3")
.Attr("io_of_node", node.name())
.Attr("is_input", is_input)
.Attr("io_index",
is_input ? edge->dst_input() : edge->src_output())
.Attr("tensor_name",
absl::StrCat(edge->src()->name(), ":", edge->src_output()))
.Attr("debug_urls", {absl::StrCat("file:
.Input(edge->src(), edge->src_output())
.Finalize(&graph, &dump_node));
TF_RETURN_IF_ERROR(
graph.UpdateEdge(dump_node, 0, edge->dst(), edge->dst_input()));
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(true,
{node.in_edges().begin(), node.in_edges().end()}));
TF_RETURN_IF_ERROR(insert(
false, {node.out_edges().begin(), node.out_edges().end()}));
return absl::OkStatus();
}
}
Status InsertDumpOps(Graph& graph,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
TF_ASSIGN_OR_RETURN(auto dir, GetDumpDir(dump_dir));
auto insert = [&](Graph& graph) {
for (Node* node : graph.op_nodes()) {
if (nodes_to_dump.contains(node->name())) {
TF_RETURN_IF_ERROR(InsertDumpOpsForNode(graph, *node, dir));
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(insert(graph));
for (const auto& fname : graph.flib_def().ListFunctionNames()) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*graph.flib_def().Find(fname), AttrSlice(), &graph.flib_def(), &fbody));
TF_RETURN_IF_ERROR(insert(*fbody->graph));
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*fbody->graph, fname, &new_fdef));
TF_RETURN_IF_ERROR(
graph.mutable_flib_def()->ReplaceFunction(fname, new_fdef));
}
return absl::OkStatus();
}
Status InsertDumpOps(MetaGraphDef& meta_graph_def,
const absl::flat_hash_set<std::string>& nodes_to_dump,
absl::string_view dump_dir) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph({}, meta_graph_def.graph_def(), &graph));
TF_RETURN_IF_ERROR(InsertDumpOps(graph, nodes_to_dump, dump_dir));
graph.ToGraphDef(meta_graph_def.mutable_graph_def());
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.h"
#include <dirent.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/tfrt/saved_model/saved_model.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
const Node* FindNode(const Graph* graph, absl::string_view node_name) {
for (Node* node : graph->nodes()) {
if (node->name() == node_name) return node;
}
return nullptr;
}
const Node* GetInputNode(const Node* node, size_t index) {
const Node* input_node;
CHECK_OK(node->input_node(index, &input_node));
return input_node;
}
const Node* GetOutputNode(const Node* node, size_t index) {
for (const Edge* edge : node->out_edges()) {
if (edge->src_output() == index) return edge->dst();
}
return nullptr;
}
absl::StatusOr<std::vector<std::string>> GetFilenames(
absl::string_view dump_dir) {
auto dump_sub_dir = absl::StrCat(dump_dir, "/", kDumpSubDirName);
DIR* dir = opendir(dump_sub_dir.data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", dump_sub_dir));
}
std::vector<std::string> step_dirs;
struct dirent* entry;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type != DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found non-directory entry under dump_sub_dir: ", entry->d_name));
}
step_dirs.push_back(absl::StrCat(dump_sub_dir, "/", entry->d_name));
}
closedir(dir);
CHECK_EQ(step_dirs.size(), 1);
dir = opendir(step_dirs[0].data());
if (dir == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("can't open directory: ", step_dirs[0]));
}
std::vector<std::string> filenames;
while ((entry = readdir(dir)) != nullptr) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
if (entry->d_type == DT_DIR) {
return absl::InternalError(absl::StrCat(
"Found directory entry under step_dir: ", entry->d_name));
}
filenames.push_back(entry->d_name);
}
closedir(dir);
return filenames;
}
TEST(NodeIoDumpRewriterTest, OnGraph) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input_a = ops::Placeholder(scope.WithOpName("input_a"), DT_INT32);
auto input_b = ops::Placeholder(scope.WithOpName("input_b"), DT_INT32);
auto add = ops::Add(scope.WithOpName("add"), input_a, input_b);
auto output = ops::Identity(scope.WithOpName("output"), add);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnGraph");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(*graph, {"add"}, dump_dir));
auto* node = FindNode(graph.get(), "add");
EXPECT_EQ(node->num_inputs(), 2);
EXPECT_EQ(GetInputNode(node, 0)->name(), "input_a/0/debug_identity");
EXPECT_EQ(GetInputNode(node, 1)->name(), "input_b/0/debug_identity");
EXPECT_EQ(node->num_outputs(), 1);
EXPECT_EQ(GetOutputNode(node, 0)->name(), "add/0/debug_identity");
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV1) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v1/1");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV1");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"Add"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "another_toy", inputs, &outputs));
ASSERT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]),
::testing::ElementsAreArray({12}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "Add:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "Add:in:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "Add:in:1_"));
}
TEST(NodeIoDumpRewriterTest, OnSavedModelV2) {
std::string saved_model_dir = GetDataDependencyFilepath(
"tensorflow/core/tfrt/saved_model/tests/toy_v2");
MetaGraphDef meta_graph_def;
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(saved_model_dir, {"serve"},
&meta_graph_def));
Env* env = Env::Default();
const string dump_dir =
::tsl::io::JoinPath(::tsl::testing::TmpDir(), "OnSavedModelV2");
if (!env->FileExists(dump_dir).ok()) {
ASSERT_TRUE(env->RecursivelyCreateDir(dump_dir).ok());
}
TF_ASSERT_OK(InsertDumpOps(meta_graph_def, {"result"}, dump_dir));
auto runtime = DefaultTfrtRuntime(1);
SavedModel::Options options(runtime.get());
options.graph_execution_options.compile_options.enable_grappler = false;
TF_ASSERT_OK_AND_ASSIGN(
auto saved_model,
SavedModelImpl::LoadSavedModel(options, meta_graph_def, saved_model_dir));
std::vector<tensorflow::Tensor> inputs;
inputs.push_back(
CreateTfTensor<int32_t>({1, 3}, {1, 1, 1}));
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(saved_model->Run({}, "serving_default", inputs, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({6}));
ASSERT_OK_AND_ASSIGN(auto filenames, GetFilenames(dump_dir));
ASSERT_EQ(filenames.size(), 3);
EXPECT_TRUE(absl::StartsWith(filenames[0], "result:out:0_"));
EXPECT_TRUE(absl::StartsWith(filenames[1], "result:in:1_"));
EXPECT_TRUE(absl::StartsWith(filenames[2], "result:in:0_"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/debug/node_io_dump_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f94b5e9-140b-4097-af52-b4d19366be44 | cpp | tensorflow/tensorflow | create_pjrt_client_util | tensorflow/core/tfrt/common/create_pjrt_client_util.cc | tensorflow/core/tfrt/common/create_pjrt_client_util_test.cc | #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include <optional>
#include <set>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
absl::StatusOr<xla::PjRtClient*> GetOrCreatePjRtClient(
const DeviceType& device_type,
std::optional<std::set<int>> allowed_devices) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetOrCreatePjRtClient(device_type);
}
} | #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/types.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CreatePjRtClientTest, GetNotExistPjRtClientNotImplemented) {
EXPECT_THAT(
GetOrCreatePjRtClient(DEVICE_CPU),
StatusIs(error::NOT_FOUND,
HasSubstr(absl::StrCat("The PJRT client factory of `",
DEVICE_CPU, "` is not registered"))));
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST(CreatePjRtClientTest, GetNotExistGpuPjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
GetOrCreatePjRtClient(DEVICE_XLA_GPU));
EXPECT_THAT(pjrt_client, ::testing::NotNull());
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/create_pjrt_client_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/create_pjrt_client_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e49f4a9-c87d-4ea6-9c68-73d8c7379601 | cpp | tensorflow/tensorflow | pjrt_state | tensorflow/core/tfrt/common/pjrt_state.cc | tensorflow/core/tfrt/common/pjrt_state_test.cc | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/tf_pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
PjRtState* PjRtState::Create() { return new PjRtState(); }
absl::StatusOr<xla::PjRtClient*> PjRtState::GetPjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
absl::StatusOr<xla::PjRtClient*> PjRtState::GetOrCreatePjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
std::unique_ptr<xla::PjRtClient> pjrt_client;
xla::PjrtClientFactoryOptions options = xla::PjrtClientFactoryOptions();
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtClient> client,
xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
device_type, options));
pjrt_client = xla::TfPjRtClient::CreateTfPjRtClient(std::move(client));
clients_[device_type] = std::move(pjrt_client);
return clients_[device_type].get();
}
Status PjRtState::SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
}
clients_[device_type] = std::move(client);
return absl::OkStatus();
}
Status PjRtState::MovePjRtClientToUnused(const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
clients_.erase(it);
return absl::OkStatus();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
Status PjRtState::SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
absl::MutexLock lock(&mu_);
pjrt_gpu_client_creation_info_ = std::move(info);
return absl::OkStatus();
}
PjRtGpuClientCreationInfo* PjRtState::GetPjRtGpuClientCreationInfo() {
absl::MutexLock lock(&mu_);
return pjrt_gpu_client_creation_info_.get();
}
string PjRtState::DebugString() const { return "PjRtState"; }
} | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace {
using tensorflow::PjRtState;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class PjRtStateTestFixture : public testing::Test {
protected:
PjRtStateTestFixture() { pjrt_state_ = PjRtState::Create(); }
~PjRtStateTestFixture() override {
tensorflow::core::ScopedUnref pjrt_state_ref(pjrt_state_);
}
PjRtState* pjrt_state_;
};
TEST_F(PjRtStateTestFixture, SetAndGetPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
TEST_F(PjRtStateTestFixture, AddAlreadyExistsPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_1,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU, xla::GetTfrtCpuClient(true,
1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_2,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_NE(pjrt_client_1, pjrt_client_2);
}
TEST_F(PjRtStateTestFixture, GetNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, DeletePjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
xla::PjRtClient* pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
EXPECT_EQ(pjrt_client_ptr->platform_name(), "cpu");
}
TEST_F(PjRtStateTestFixture, DeleteNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientExist) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
auto pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
pjrt_state_->GetOrCreatePjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client_get, pjrt_client_ptr);
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientNotExist) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, pjrt_state_->GetOrCreatePjRtClient(
tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7569af85-4644-4ab1-890f-3d81139b5f07 | cpp | tensorflow/tensorflow | pjrt_gpu_client_registration | tensorflow/core/tfrt/common/pjrt_gpu_client_registration.cc | tensorflow/core/tfrt/common/pjrt_gpu_client_registration_test.cc | #include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<std::unique_ptr<xla::PjRtClient>> GetGpuClient(
const PjrtClientFactoryOptions& option) {
xla::GpuClientOptions gpu_client_options;
gpu_client_options.node_id = option.gpu_options.node_id;
gpu_client_options.num_nodes = 1;
gpu_client_options.allowed_devices = option.gpu_options.allowed_devices;
gpu_client_options.platform_name = option.gpu_options.platform_name;
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtClient> client,
xla::GetStreamExecutorGpuClient(gpu_client_options));
return std::move(client);
}
REGISTER_PJRT_CLIENT_FACTORY(gpu_client, tensorflow::DEVICE_GPU, GetGpuClient);
REGISTER_PJRT_CLIENT_FACTORY(xla_gpu_client, tensorflow::DEVICE_XLA_GPU,
GetGpuClient);
} | #include <gtest/gtest.h>
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PjrtGpuClientCreateTest, TestGpuCreateOption) {
PjrtClientFactoryOptions options = PjrtClientFactoryOptions();
TF_ASSERT_OK_AND_ASSIGN(
auto client, xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
tsl::DeviceType(tensorflow::DEVICE_GPU), options));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_gpu_client_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_gpu_client_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e75402be-4462-469a-b7e4-f4278767815f | cpp | tensorflow/tensorflow | pjrt_cpu_client_registration | tensorflow/core/tfrt/common/pjrt_cpu_client_registration.cc | tensorflow/core/tfrt/common/pjrt_cpu_client_registration_test.cc | #include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<std::unique_ptr<xla::PjRtClient>> GetCpuClient(
const PjrtClientFactoryOptions& option) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtClient> client,
xla::GetTfrtCpuClient(option.cpu_options.asynchronous));
return std::move(client);
}
REGISTER_PJRT_CLIENT_FACTORY(cpu_client, tensorflow::DEVICE_CPU, GetCpuClient);
} | #include <gtest/gtest.h>
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PjrtCpuClientCreateTest, TestCpuCreateoption) {
PjrtClientFactoryOptions options = PjrtClientFactoryOptions();
options.cpu_options.asynchronous = true;
TF_ASSERT_OK_AND_ASSIGN(
auto client, xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
tsl::DeviceType(tensorflow::DEVICE_CPU), options));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_cpu_client_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_cpu_client_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31a835cc-caae-4afb-b0a3-525ca43c83f7 | cpp | tensorflow/tensorflow | pjrt_util | tensorflow/core/tfrt/common/pjrt_util.cc | tensorflow/core/tfrt/common/pjrt_util_test.cc | #include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
Status SetPjRtClientInTFGlobalResourceManager(
const DeviceType& device_type, std::unique_ptr<xla::PjRtClient> client) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (client == nullptr) {
return errors::InvalidArgument("PJRT client is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtClient(device_type, std::move(client)));
return absl::OkStatus();
}
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtClient(device_type);
}
absl::Status SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
if (info == nullptr) {
return absl::InvalidArgumentError("PJRT client creation info is nullptr.");
}
TF_RETURN_IF_ERROR(pjrt_state->SetPjRtGpuClientCreationInfo(std::move(info)));
return absl::OkStatus();
}
absl::StatusOr<PjRtGpuClientCreationInfo*> GetPjRtGpuClientCreationInfo() {
ResourceMgr* rmgr = tfrt_global::GetTFGlobalResourceMgr();
PjRtState* pjrt_state;
TF_RETURN_IF_ERROR(rmgr->LookupOrCreate<PjRtState>(
rmgr->default_container(), kPjRtStateResourceName, &pjrt_state,
[&](PjRtState** ret) {
*ret = PjRtState::Create();
return absl::OkStatus();
}));
core::ScopedUnref pjrt_state_ref(pjrt_state);
return pjrt_state->GetPjRtGpuClientCreationInfo();
}
} | #include "tensorflow/core/tfrt/common/pjrt_util.h"
#include <memory>
#include <utility>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(PjRtUtilTest, SetGetAndDeletePjRtClient) {
TF_ASSERT_OK(SetPjRtClientInTFGlobalResourceManager(
DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetPjRtClient(DEVICE_CPU));
EXPECT_THAT(pjrt_client, ::testing::NotNull());
}
TEST(PjRtStateResourceManagerTest, SetNullPjRtClient) {
EXPECT_THAT(
SetPjRtClientInTFGlobalResourceManager(DEVICE_CPU, nullptr),
StatusIs(error::INVALID_ARGUMENT, HasSubstr("PJRT client is nullptr")));
}
TEST(PjRtGpuClientCreationInfoTest, SetAndGet) {
auto info = std::make_unique<PjRtGpuClientCreationInfo>();
info->allowed_devices.insert(123);
TF_ASSERT_OK(
SetPjRtGpuClientCreationInfoInTFGlobalResourceManager(std::move(info)));
TF_ASSERT_OK_AND_ASSIGN(PjRtGpuClientCreationInfo * retrieved_info,
GetPjRtGpuClientCreationInfo());
EXPECT_THAT(retrieved_info->allowed_devices, ElementsAre(123));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c45b2d03-f0b3-41f2-a12e-10706399826b | cpp | tensorflow/tensorflow | async_value_tensor | tensorflow/core/tfrt/common/async_value_tensor.cc | tensorflow/core/tfrt/common/async_value_tensor_test.cc | #include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/check.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/tensor.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/support/forward_decls.h"
namespace tensorflow {
namespace {
constexpr uintptr_t kTag = 0x1ULL;
}
AsyncValueTensor* AsyncValueTensor::FromTensor(
const Tensor* tensor) {
AsyncValueTensor* av_tensor =
FromOpaquePointer(const_cast<char*>(tensor->tensor_data().data()));
return av_tensor;
}
const tfrt::RCReference<tfrt::AsyncValue>& AsyncValueTensor::GetAsyncRef() {
return av_ref_;
}
void AsyncValueTensor::SetAsyncRef(tfrt::RCReference<tfrt::AsyncValue> av_ref) {
av_ref_ = std::move(av_ref);
}
std::shared_ptr<xla::PjRtBuffer> AsyncValueTensor::GetBuffer() {
return buffer_;
}
void AsyncValueTensor::SetBuffer(std::shared_ptr<xla::PjRtBuffer> buffer) {
buffer_ = std::move(buffer);
}
AsyncValueTensor* AsyncValueTensor::FromOpaquePointer(void* ptr) {
uintptr_t value = reinterpret_cast<uintptr_t>(ptr);
if (value & kTag) {
return reinterpret_cast<AsyncValueTensor*>(value & ~kTag);
} else {
return nullptr;
}
}
void* AsyncValueTensor::ToOpaquePointer(AsyncValueTensor* tensor) {
uintptr_t value = reinterpret_cast<uintptr_t>(tensor);
CHECK_EQ(value & kTag, 0);
value |= kTag;
return reinterpret_cast<AsyncValueTensor*>(value);
}
void* AsyncValueAllocator::AllocateRaw(size_t alignment, size_t num_bytes) {
return AsyncValueTensor::ToOpaquePointer(new AsyncValueTensor);
}
void AsyncValueAllocator::DeallocateRaw(void* ptr) {
delete AsyncValueTensor::FromOpaquePointer(ptr);
}
} | #include "tensorflow/core/tfrt/common/async_value_tensor.h"
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
TEST(AsyncValueTensorTest, InvalidTensor) {
tensorflow::Tensor tensor(tensorflow::DT_INT64, tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_EQ(avt, nullptr);
}
TEST(AsyncValueTensorTest, SetAndGetAsyncValue) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
tsl::AsyncValueRef<int32_t> value =
tsl::MakeConstructedAsyncValueRef<int32_t>(123);
avt->SetAsyncRef(value.CopyRCRef());
auto ret_value = avt->GetAsyncRef();
ASSERT_EQ(ret_value, value.CopyRCRef());
}
TEST(AsyncValueTensorTest, SetAndGetBuffer) {
AsyncValueAllocator allocator;
tensorflow::Tensor tensor(&allocator, tensorflow::DT_INT64,
tensorflow::TensorShape({1}));
AsyncValueTensor* avt = AsyncValueTensor::FromTensor(&tensor);
ASSERT_NE(avt, nullptr);
std::shared_ptr<xla::PjRtBuffer> buffer;
avt->SetBuffer(buffer);
auto ret_buffer = avt->GetBuffer();
ASSERT_EQ(ret_buffer, buffer);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/async_value_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/async_value_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed9a3fba-817e-4f60-bc20-9c5d849e2c26 | cpp | tensorflow/tensorflow | ifrt_serving_executable | tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/extract_callback.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include "tensorflow/compiler/tf2xla/host_compute_metadata.pb.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/pjrt_host_callback.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_tensor_utils.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
bool IsSingleDevice(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return compile_metadata.num_replicas() == 1 &&
compile_metadata.num_cores_per_replica() == 1;
}
absl::StatusOr<std::vector<DtypeAndShape>> BuildDtypeAndShape(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry) {
std::vector<DtypeAndShape> dtypes_and_shapes;
dtypes_and_shapes.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs.size(); i++) {
if (variable_index < variable_arg_indices.size() &&
i == variable_arg_indices[variable_index]) {
TF_ASSIGN_OR_RETURN(auto dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(
inputs[i].scalar<tsl::tstring>()()));
dtypes_and_shapes.push_back(std::move(dtype_and_shape));
variable_index++;
} else {
dtypes_and_shapes.push_back(DtypeAndShape{.dtype = inputs[i].dtype(),
.shape = inputs[i].shape()});
}
}
return dtypes_and_shapes;
}
absl::StatusOr<xla::DeviceAssignment> GetRuntimeXlaDeviceAssignment(
const tsl::RCReference<xla::ifrt::DeviceList>& device_list,
int num_replicas, int num_cores_per_replica) {
const int num_devices = num_replicas * num_cores_per_replica;
const absl::Span<xla::ifrt::Device* const> devices = device_list->devices();
if (devices.size() != num_devices) {
return absl::InternalError(
absl::StrCat("Device assignment has ", devices.size(),
" devices, but expected ", num_devices));
}
xla::DeviceAssignment da(num_replicas, num_cores_per_replica);
int device_index = 0;
for (int replica_idx = 0; replica_idx < num_replicas; replica_idx++) {
for (int core_idx = 0; core_idx < num_cores_per_replica;
core_idx++, device_index++) {
da(replica_idx, core_idx) = devices[device_index]->Id().value();
VLOG(3) << "Added IFRT device id: " << da(replica_idx, core_idx);
}
}
return da;
}
static constexpr absl::string_view kDeviceAssignmentAttr = "device_assignment";
static constexpr absl::string_view kEntryFuncName = "main";
absl::StatusOr<std::vector<xla::ifrt::Device*>> GetAssignedDevices(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client,
int num_replicas, int num_cores_per_replica) {
auto op = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!op) {
return absl::InternalError("Could not find entry function in MLIR Module.");
}
auto device_assignment_attr =
op->getAttrOfType<mlir::ArrayAttr>(kDeviceAssignmentAttr);
std::optional<std::vector<int>> device_assignment_attr_val;
if (device_assignment_attr && !device_assignment_attr.getValue().empty()) {
std::vector<int> coords;
coords.reserve(num_replicas * num_cores_per_replica);
for (auto coord_attr : device_assignment_attr.getValue()) {
auto coord_attr_val = mlir::dyn_cast<mlir::IntegerAttr>(coord_attr);
if (!coord_attr_val) {
return absl::InternalError(
llvm::formatv("Device assignment attribute is not an integer: {0}",
device_assignment_attr)
.str());
}
coords.push_back(coord_attr_val.getInt());
}
device_assignment_attr_val = std::move(coords);
}
return GetAssignedIfrtDevices(ifrt_client, num_replicas,
num_cores_per_replica,
device_assignment_attr_val);
}
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
IfrtServingExecutable::Create(
int64_t program_id, absl::string_view model_name,
absl::string_view signature_name, mlir::OwningOpRef<mlir::ModuleOp> module,
std::shared_ptr<xla::ifrt::Client> client,
tsl::thread::ThreadPool* thread_pool,
IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry,
const IfrtRestoreTensorRegistry* ifrt_restore,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
tensorflow::DeviceMgr* device_mgr,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn,
IfrtServingCoreSelector* ifrt_serving_core_selector,
tsl::protobuf::Message* compilation_environement_proto) {
TF_ASSIGN_OR_RETURN(
tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata,
GetCompileMetadata(*module, *client));
TF_ASSIGN_OR_RETURN(
std::vector<xla::ifrt::Device*> assigned_devices,
GetAssignedDevices(*module, *client,
original_compile_metadata.num_replicas(),
original_compile_metadata.num_cores_per_replica()));
auto executable = absl::WrapUnique(new IfrtServingExecutable(
program_id, model_name, signature_name, std::move(module),
std::move(client), thread_pool, ifrt_loaded_variable_registry,
ifrt_restore, checkpoint_loader_queue, device_mgr,
std::move(shape_representation_fn), ifrt_serving_core_selector,
std::move(original_compile_metadata),
xla::ifrt::BasicDeviceList::Create(xla::ifrt::BasicDeviceList::Devices(
assigned_devices.begin(), assigned_devices.end())),
compilation_environement_proto));
return executable;
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
IfrtServingExecutable::ConvertTensorToArray(
const tensorflow::Tensor& tensor,
const tsl::RCReference<xla::ifrt::DeviceList>& device_list,
const xla::OpSharding& sharding) {
xla::ifrt::Shape input_shape = ToIfrtShape(tensor.shape());
VLOG(2) << "Converting tensor of shape " << input_shape;
TF_ASSIGN_OR_RETURN(auto hlo_sharding, xla::HloSharding::FromProto(sharding));
return MakeArrayFromTensor(*ifrt_client_, tensor, device_list,
std::move(hlo_sharding), thread_pool_);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>> BuildFunctionDef(
mlir::ModuleOp module) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_RETURN_IF_ERROR(ExportFunctionDefs(
module,
[&](tensorflow::FunctionDef function_def) {
function_defs.push_back(std::move(function_def));
return absl::OkStatus();
},
false));
return function_defs;
}
struct HostCallbackBuilderInfo {
tensorflow::tf2xla::HostTransferMetadata device_to_host;
tensorflow::tf2xla::HostTransferMetadata host_to_device;
};
absl::StatusOr<absl::flat_hash_map<std::string, HostCallbackBuilderInfo>>
GroupHostCallbackByKey(const Tf2HloResult& tf2hlo_result) {
absl::flat_hash_map<std::string, HostCallbackBuilderInfo> host_callbacks;
for (const auto& device_to_host :
tf2hlo_result.host_compute_metadata.device_to_host()) {
auto& host_callback = host_callbacks[device_to_host.key()];
host_callback.device_to_host = device_to_host;
}
for (const auto& host_to_device :
tf2hlo_result.host_compute_metadata.host_to_device()) {
auto& host_callback = host_callbacks[host_to_device.key()];
host_callback.host_to_device = host_to_device;
}
return host_callbacks;
}
absl::StatusOr<xla::HostCallback> BuildHostCallback(
absl::string_view key, const HostCallbackBuilderInfo& builder_info,
mlir::ModuleOp module, tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
VLOG(2) << "BuildHostCallback for key: " << key;
DCHECK(device_mgr);
xla::HostCallback host_callback;
std::vector<DtypeAndShape> operand_type_and_shapes;
std::vector<DtypeAndShape> result_type_and_shapes;
auto to_xla_shape = [](tensorflow::DataType data_type,
const tensorflow::TensorShapeProto& shape)
-> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_ASSIGN_OR_RETURN(tensorflow::TensorShape tensor_shape,
tensorflow::TensorShape::BuildTensorShape(shape));
if (absl::Status status = tensorflow::TensorShapeToXLAShape(
data_type, tensor_shape, &xla_shape);
status.ok()) {
return xla_shape;
} else {
return status;
}
};
operand_type_and_shapes.reserve(builder_info.device_to_host.metadata_size());
result_type_and_shapes.reserve(builder_info.host_to_device.metadata_size());
for (const auto& metadata : builder_info.device_to_host.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.operands.push_back(
{.channel_id = channel_id, .shape = shape});
operand_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
for (const auto& metadata : builder_info.host_to_device.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.results.push_back(
{.channel_id = channel_id, .shape = std::move(shape)});
result_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> callback_module,
ExtractCallbackModule(module, key));
TF_ASSIGN_OR_RETURN(std::vector<tensorflow::FunctionDef> function_defs,
BuildFunctionDef(*callback_module));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TfHostCallback> tf_host_callback,
TfHostCallback::Create(function_defs, key, operand_type_and_shapes,
result_type_and_shapes, device_mgr));
host_callback.callback = [tf_host_callback = tf_host_callback.get()](
void** output, void** input) {
return tf_host_callback->Call(input, output);
};
tf_host_callbacks.push_back(std::move(tf_host_callback));
return host_callback;
}
absl::StatusOr<std::vector<xla::HostCallback>> BuildHostCallbacks(
const Tf2HloResult& tf2hlo_result, mlir::ModuleOp module,
tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
TF_ASSIGN_OR_RETURN(auto host_callback_maps,
GroupHostCallbackByKey(tf2hlo_result));
std::vector<xla::HostCallback> host_callbacks;
host_callbacks.reserve(host_callback_maps.size());
for (const auto& [entry_function, builder_info] : host_callback_maps) {
TF_ASSIGN_OR_RETURN(auto host_callback,
BuildHostCallback(entry_function, builder_info, module,
device_mgr, tf_host_callbacks));
host_callbacks.push_back(std::move(host_callback));
}
return host_callbacks;
}
absl::StatusOr<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::CreateExecutableSynchronously(
mlir::OwningOpRef<mlir::ModuleOp> module_copy,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
TF_ASSIGN_OR_RETURN(
Tf2HloResult tf2hlo_result,
CompileTfToHlo(*module_copy, dtypes_and_shapes, signature_name(),
*ifrt_client_, compile_metadata,
shape_representation_fn_));
const int num_replicas = tf2hlo_result.compile_metadata.num_replicas();
const int num_partitions =
tf2hlo_result.compile_metadata.num_cores_per_replica();
VLOG(2) << " Number of replcas is " << num_replicas
<< " and num_partitions is " << num_partitions;
if (num_replicas > 1) {
return absl::UnimplementedError(
absl::StrCat("Only support single replica, but replica number is ",
num_replicas, " and num_partitions is ", num_partitions));
}
xla::CompileOptions xla_compile_options;
if (compilation_environment_proto_) {
tsl::protobuf::Message* comp_env_copy =
compilation_environment_proto_->New();
comp_env_copy->CopyFrom(*compilation_environment_proto_);
TF_RETURN_IF_ERROR(
xla_compile_options.executable_build_options.mutable_comp_envs()
->AddEnv(absl::WrapUnique<tsl::protobuf::Message>(comp_env_copy)));
}
xla_compile_options.executable_build_options.set_num_replicas(num_replicas);
xla_compile_options.executable_build_options.set_num_partitions(
num_partitions);
xla_compile_options.executable_build_options.set_use_spmd_partitioning(
original_compile_metadata_.use_spmd_for_xla_partitioning());
xla_compile_options.parameter_is_tupled_arguments = false;
if (UsePortableExecution(compile_metadata)) {
xla_compile_options.compile_portable_executable = true;
} else {
TF_ASSIGN_OR_RETURN(
xla::DeviceAssignment da,
GetRuntimeXlaDeviceAssignment(assigned_device_list_, num_replicas,
num_partitions));
VLOG(2) << "Device assignment :" << da.ToString();
xla_compile_options.executable_build_options.set_device_assignment(da);
}
std::vector<std::unique_ptr<TfHostCallback>> tf_host_callbacks;
TF_ASSIGN_OR_RETURN(auto host_callbacks,
BuildHostCallbacks(tf2hlo_result, *module_copy,
device_mgr_, tf_host_callbacks));
std::vector<tsl::RCReference<xla::ifrt::LoadedHostCallback>>
loaded_host_callbacks;
loaded_host_callbacks.reserve(host_callbacks.size());
for (const auto& host_callback : host_callbacks) {
loaded_host_callbacks.push_back(
tsl::MakeRef<xla::ifrt::PjRtHostSendAndRecvLoadedHostCallback>(
ifrt_client_.get(),
std::make_unique<xla::HostCallback>(host_callback)));
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::ifrt::LoadedExecutable> ifrt_executable,
ifrt_client_->GetDefaultCompiler()->Compile(
std::make_unique<xla::ifrt::HloProgram>(
tf2hlo_result.mlir_hlo_module.get()),
std::make_unique<xla::ifrt::XlaCompileOptions>(
xla_compile_options, loaded_host_callbacks)));
SharedCachedExecutableBundle executable_bundle =
std::make_shared<CachedExecutableBundle>();
executable_bundle->ifrt_executable = std::move(ifrt_executable);
executable_bundle->compile_metadata =
std::move(tf2hlo_result.compile_metadata);
executable_bundle->host_callbacks = std::move(tf_host_callbacks);
return executable_bundle;
}
xla::ifrt::Future<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::LookUpOrCreateExecutable(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
std::vector<tensorflow::TensorShape> input_shapes;
for (const auto& dtype_and_shape : dtypes_and_shapes) {
input_shapes.push_back(dtype_and_shape.shape);
}
Key key = {.input_shapes = std::move(input_shapes)};
xla::ifrt::Promise<SharedCachedExecutableBundle> promise;
xla::ifrt::Future<SharedCachedExecutableBundle> future;
mlir::OwningOpRef<mlir::ModuleOp> module_copy;
{
absl::MutexLock lock(&mutex_);
const auto it = executable_bundles_.find(key);
if (it != executable_bundles_.end()) {
return it->second;
}
if (is_frozen_) {
xla::ifrt::Future<SharedCachedExecutableBundle> frozen_future(
absl::FailedPreconditionError(
"Cannot compile for new input shapes after the executable is "
"already frozen."));
return frozen_future;
}
promise = xla::ifrt::Future<SharedCachedExecutableBundle>::CreatePromise();
future = xla::ifrt::Future<SharedCachedExecutableBundle>(promise);
executable_bundles_.emplace(key, future);
module_copy = mlir::OwningOpRef<mlir::ModuleOp>(module_->clone());
}
LOG(INFO) << "Cache missed. Building executable";
absl::StatusOr<SharedCachedExecutableBundle> executable_bundle =
CreateExecutableSynchronously(std::move(module_copy), compile_metadata,
dtypes_and_shapes);
promise.Set(std::move(executable_bundle));
return future;
}
void IfrtServingExecutable::Freeze() {
LOG(INFO) << "Freezing executable. Program id: " << program_id_;
absl::MutexLock lock(&mutex_);
is_frozen_ = true;
module_ = nullptr;
}
bool IfrtServingExecutable::UsePortableExecution(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return IsSingleDevice(compile_metadata) && ifrt_serving_core_selector_;
}
absl::StatusOr<std::vector<tensorflow::Tensor>> IfrtServingExecutable::Execute(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices) {
for (int i = 1; i < variable_arg_indices.size(); i++) {
if (variable_arg_indices[i] <= variable_arg_indices[i - 1]) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected variable_arg_indices in ascending order. But subsequence "
"starting at ",
i - 1, ": (", variable_arg_indices[i - 1], ", ",
variable_arg_indices[i], ")", " is not in ascending order"));
}
}
if (!variable_arg_indices.empty() &&
inputs.size() <= variable_arg_indices.back()) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected at most ", inputs.size(), " inputs, but got up to ",
variable_arg_indices.back(), " variables."));
}
for (const int i : variable_arg_indices) {
if (inputs[i].dtype() != tensorflow::DT_STRING ||
!tensorflow::TensorShapeUtils::IsScalar(inputs[i].shape())) {
return absl::FailedPreconditionError(
absl::StrCat("Expected a scalar tensor as loaded variable array key, "
"but got type ",
inputs[i].dtype(), " and shape ",
inputs[i].shape().DebugString(), " at index ", i));
}
}
TF_ASSIGN_OR_RETURN(std::vector<DtypeAndShape> dtypes_and_shapes,
BuildDtypeAndShape(inputs, variable_arg_indices,
ifrt_restore_tensor_registry_));
tensorflow::tpu::TPUCompileMetadataProto compile_metadata =
original_compile_metadata_;
TF_RETURN_IF_ERROR(
UpdateCompileMetadata(compile_metadata, dtypes_and_shapes));
tsl::DeviceReservation device_reservation(kNoCoreSelectedIndex, nullptr);
tsl::RCReference<xla::ifrt::DeviceList> device_list;
if (UsePortableExecution(compile_metadata)) {
device_reservation =
ifrt_serving_core_selector_->ReserveDevice(program_id_);
compile_metadata.clear_device_assignment();
TF_ASSIGN_OR_RETURN(xla::ifrt::Device * device,
ifrt_client_->LookupDevice(xla::ifrt::DeviceId(
device_reservation.device_index())));
device_list = xla::ifrt::BasicDeviceList::Create(
xla::ifrt::BasicDeviceList::Devices({device}));
} else {
device_list = assigned_device_list_;
}
TF_ASSIGN_OR_RETURN(SharedCachedExecutableBundle executable_bundle,
LookUpOrCreateExecutable(
compile_metadata, absl::MakeSpan(dtypes_and_shapes))
.Await());
if (executable_bundle->compile_metadata.args().size() !=
dtypes_and_shapes.size()) {
return absl::InternalError(absl::StrCat(
"Expected ", executable_bundle->compile_metadata.args().size(),
" but got ", dtypes_and_shapes.size(), " arguments"));
}
TF_RETURN_IF_ERROR(AsyncLoadIfrtArray(inputs, variable_arg_indices,
*executable_bundle, device_list));
VLOG(2) << "Completed AsyncLoadIfrtArray";
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
args.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs.size(); i++) {
if (variable_index < variable_arg_indices.size() &&
i == variable_arg_indices[variable_index]) {
std::vector<int> device_ids;
device_ids.reserve(device_list->size());
for (xla::ifrt::Device* device : device_list->devices()) {
device_ids.push_back(device->Id().value());
}
TF_ASSIGN_OR_RETURN(
xla::HloSharding hlo_sharding,
xla::HloSharding::FromProto(
executable_bundle->compile_metadata.args()[i].sharding()));
IfrtLoadedVariableRegistry::Key key{
.device_ids = std::move(device_ids),
.input_name = inputs[i].scalar<tsl::tstring>()(),
.hlo_sharding = std::move(hlo_sharding),
};
TF_ASSIGN_OR_RETURN(
auto loaded_variable,
ifrt_loaded_variable_registry_.GetLoadedVariable(key));
TF_ASSIGN_OR_RETURN(tsl::RCReference<xla::ifrt::Array> single_array,
loaded_variable.array.Await());
args.push_back(std::move(single_array));
variable_index++;
} else {
TF_ASSIGN_OR_RETURN(
auto single_array,
ConvertTensorToArray(
inputs[i], device_list,
executable_bundle->compile_metadata.args()[i].sharding()));
args.push_back(single_array);
}
}
DCHECK_EQ(args.size(), dtypes_and_shapes.size());
VLOG(2) << "Start Execution";
std::optional<tsl::RCReference<xla::ifrt::DeviceList>> execution_device_list;
if (UsePortableExecution(compile_metadata)) {
execution_device_list = device_list;
}
TF_ASSIGN_OR_RETURN(
auto execution_result,
executable_bundle->ifrt_executable->Execute(
absl::MakeSpan(args), {.fill_status = true},
std::move(execution_device_list)));
auto status = execution_result.status.Await();
TF_RETURN_IF_ERROR(status);
if (executable_bundle->compile_metadata.retvals().size() !=
execution_result.outputs.size()) {
return absl::InternalError(absl::StrCat(
"Expect ", executable_bundle->compile_metadata.retvals().size(),
" but got ", execution_result.outputs.size(), " outputs"));
}
std::vector<xla::ifrt::Future<tensorflow::Tensor>> output_futures;
output_futures.reserve(execution_result.outputs.size());
for (int i = 0; i < execution_result.outputs.size(); ++i) {
tensorflow::TensorShape tensor_shape;
const tsl::RCReference<xla::ifrt::Array>& array_for_copy =
execution_result.outputs[i];
const tpu::TPUCompileMetadataProto::Retval& metadata_retval =
executable_bundle->compile_metadata.retvals()[i];
VLOG(2) << "Output sharding: " << array_for_copy->sharding().DebugString();
TF_ASSIGN_OR_RETURN(auto hlo_sharding, xla::HloSharding::FromProto(
metadata_retval.sharding()));
output_futures.push_back(MakeTensorFromArray(*ifrt_client_, *array_for_copy,
hlo_sharding, device_list,
thread_pool_));
}
std::vector<tensorflow::Tensor> outputs;
outputs.reserve(output_futures.size());
for (auto& output_future : output_futures) {
TF_ASSIGN_OR_RETURN(auto tensor, output_future.Await());
outputs.push_back(std::move(tensor));
}
return outputs;
}
absl::Status IfrtServingExecutable::AsyncLoadIfrtArray(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const CachedExecutableBundle& executable_bundle,
const tsl::RCReference<xla::ifrt::DeviceList>& devices) {
for (const int i : variable_arg_indices) {
if (inputs[i].dtype() != tensorflow::DT_STRING ||
!tensorflow::TensorShapeUtils::IsScalar(inputs[i].shape())) {
return absl::FailedPreconditionError(
absl::StrCat("Expected a scalar tensor as loaded variable array key, "
"but got type ",
inputs[i].dtype(), " and shape ",
inputs[i].shape().DebugString(), " at index ", i));
}
std::string runtime_name = inputs[i].scalar<tsl::tstring>()();
TF_ASSIGN_OR_RETURN(
xla::HloSharding hlo_sharding,
xla::HloSharding::FromProto(
executable_bundle.compile_metadata.args()[i].sharding()));
VariableDeviceShardingConfig sharding_config{
.hlo_sharding = std::move(hlo_sharding),
};
for (xla::ifrt::Device* device : devices->devices()) {
sharding_config.device_ids.push_back(device->Id().value());
}
TF_RETURN_IF_ERROR(
ifrt_serving::AsyncLoadRestoredTensorAsIfrtLoadedVariable(
runtime_name, ifrt_client_, thread_pool_,
ifrt_restore_tensor_registry_, ifrt_loaded_variable_registry_,
checkpoint_loader_queue_, sharding_config));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Return;
using ::tsl::testing::StatusIs;
struct VariableInputTestParam {
std::vector<tensorflow::Tensor> in_tensors;
std::vector<bool>
is_variable;
std::vector<tensorflow::Tensor> expected_out_tensors;
};
using VariableInputTest = ::testing::TestWithParam<VariableInputTestParam>;
class IfrtServingExecutableTest : public ::testing::Test {
protected:
explicit IfrtServingExecutableTest() {
helper_ = std::make_unique<test_utils::IfrtServingExecutableTestHelper>(
&selector_);
}
tsl::test_util::MockServingDeviceSelector selector_;
std::unique_ptr<test_utils::IfrtServingExecutableTestHelper> helper_;
};
TEST_F(IfrtServingExecutableTest, Basic) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
const auto expected_out =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, MultipleShapes) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(6)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
const auto expected_out2 =
AsTensor<int32_t>({30}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs1, outputs2;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
TF_ASSERT_OK_AND_ASSIGN(outputs2,
executable->Execute(absl::MakeSpan(inputs2), {}));
}
ASSERT_EQ(executable->num_executables(), 2);
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
EXPECT_THAT(outputs2, ElementsAre(TensorEq(expected_out2)));
}
TEST_F(IfrtServingExecutableTest, ReturnFailOnUncompiledShapeAfterFrozen) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(3)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
std::vector<tensorflow::Tensor> outputs1;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
executable->Freeze();
outputs1.clear();
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs2;
auto status = executable->Execute(absl::MakeSpan(inputs2), {});
EXPECT_THAT(status, StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_F(IfrtServingExecutableTest, Spmd) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, SpmdTwoReturns) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable_two_returns.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out0 = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
const auto expected_out1 = AsTensor<int32_t>({20, 20, 20, 20, 20, 20, 20, 20},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result,
ElementsAre(TensorEq(expected_out0), TensorEq(expected_out1)));
}
TEST_F(IfrtServingExecutableTest, NoReturn) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("executable_no_return.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
ASSERT_EQ(result.size(), 0);
}
TEST_P(VariableInputTest, InterleaveVariable) {
tsl::test_util::MockServingDeviceSelector device_selector;
test_utils::IfrtServingExecutableTestHelper helper(&device_selector);
int64_t program_id = 111111;
EXPECT_CALL(device_selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper.MakeExecutable(
program_id, GetMlirModulePath("executable_long_inputs.mlir"));
IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry =
helper.ifrt_restore_tensor_registry();
std::vector<tensorflow::Tensor> inputs;
std::vector<int> loaded_variable_indices;
for (int i = 0; i < GetParam().in_tensors.size(); i++) {
if (GetParam().is_variable[i]) {
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restore_tensor_info = {
.dtype_and_shape{.dtype = GetParam().in_tensors[i].dtype(),
.shape = GetParam().in_tensors[i].shape()},
.tensor_future = input_tensor_future};
std::string variable_name = absl::StrCat("variable_", i);
ASSERT_OK(ifrt_restore_tensor_registry->TryRegister(variable_name,
restore_tensor_info));
loaded_variable_indices.push_back(i);
input_tensor_promise.Set(GetParam().in_tensors[i]);
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = variable_name;
inputs.push_back(key_tensor);
} else {
inputs.push_back(GetParam().in_tensors[i]);
}
}
ASSERT_EQ(inputs.size(), GetParam().is_variable.size());
for (int i = 0; i < helper.num_cores(); i++) {
TF_ASSERT_OK(executable
->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices))
.status());
}
TF_ASSERT_OK_AND_ASSIGN(
auto result,
executable->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices)));
EXPECT_THAT(result,
ElementsAre(TensorEq(GetParam().expected_out_tensors[0]),
TensorEq(GetParam().expected_out_tensors[1]),
TensorEq(GetParam().expected_out_tensors[2])));
}
INSTANTIATE_TEST_SUITE_P(
VariableInputTests, VariableInputTest,
::testing::ValuesIn<VariableInputTestParam>(
{
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, true, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, false, false, true, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, true, true, false, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8323374f-8b1c-43c0-bf25-98a044b762e4 | cpp | tensorflow/tensorflow | ifrt_restore_tensor_registry | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace ifrt_serving {
absl::Status IfrtRestoreTensorRegistry::TryRegister(
absl::string_view name, RestoredTensorInfo restored_tensor_info) {
absl::MutexLock lock(&mutex_);
auto& info = restored_tensors_[name];
if (info.tensor_future.IsValid()) {
return absl::AlreadyExistsError(
absl::StrCat("Variable '", name, "' already registered."));
}
info = std::move(restored_tensor_info);
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor>
IfrtRestoreTensorRegistry::GetRestoredTensor(absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return xla::ifrt::Future<tensorflow::Tensor>(
absl::NotFoundError(absl::StrCat("Variable '", name, "' not found.")));
}
return it->second.tensor_future;
}
absl::Status IfrtRestoreTensorRegistry::SetUsedByHost(absl::string_view name) {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
it->second.used_by_host = true;
return absl::OkStatus();
}
void IfrtRestoreTensorRegistry::Freeze() {
absl::MutexLock lock(&mutex_);
xla::ifrt::Future<tensorflow::Tensor> release_tensor_future(
absl::UnavailableError("Tensor is already release."));
for (auto& [name, info] : restored_tensors_) {
if (!info.used_by_host) {
info.tensor_future = release_tensor_future;
}
}
}
absl::StatusOr<DtypeAndShape> IfrtRestoreTensorRegistry::GetDtypeAndShape(
absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
return it->second.dtype_and_shape;
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
using tsl::testing::IsOk;
using tsl::testing::StatusIs;
namespace tensorflow {
namespace ifrt_serving {
namespace {
TEST(IfrtRestoreTensorRegistryTest, RetrieveNonRegisteredTensorFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest,
RetrieveNonRegisteredTensorDTypeAndShapeFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetDtypeAndShape("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, SetNonExistedTensorAsUsedByHostFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredExistedTensorFails) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtRestoreTensorRegistryTest, SetTensorAsUsedByHost) {
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"), IsOk());
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredTensorCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_1").Await());
test::ExpectEqual(retrieved, input_tensor);
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest,
RegisteredTensorDTypeAndShapeCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest, FeezeTensorRegistry) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise1 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future1 = xla::ifrt::Future<tensorflow::Tensor>(promise1);
auto promise2 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future2 = xla::ifrt::Future<tensorflow::Tensor>(promise2);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info1 = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future1};
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info2 = {
.used_by_host = true,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future2};
IfrtRestoreTensorRegistry registry;
TF_ASSERT_OK(registry.TryRegister("input_tensor_1", restored_tensor_info1));
TF_ASSERT_OK(registry.TryRegister("input_tensor_2", restored_tensor_info2));
promise1.Set(input_tensor);
promise2.Set(input_tensor);
registry.Freeze();
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kUnavailable));
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_2").Await());
test::ExpectEqual(retrieved, input_tensor);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae917258-0a9b-4bd7-b025-c4d9b3ac9c98 | cpp | tensorflow/tensorflow | ifrt_serving_core_selector | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/framework/serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
IfrtServingCoreSelector::IfrtServingCoreSelector(
tsl::ServingDeviceSelector* device_selector, int num_cores)
: device_selector_(device_selector), num_cores_(num_cores) {}
tsl::DeviceReservation IfrtServingCoreSelector::ReserveDevice(
int64_t program_id) {
absl::MutexLock lock(&mu_);
int64_t run_count = run_counter_[program_id]++;
if (run_count < num_cores_) {
return tsl::DeviceReservation(run_count, nullptr);
}
return device_selector_->ReserveDevice(absl::StrCat(program_id));
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class IfrtServingCoreSelectorTest : public ::testing::Test {
protected:
explicit IfrtServingCoreSelectorTest() {
core_selector_ = std::make_unique<IfrtServingCoreSelector>(
&serving_device_selector_, num_cores_);
}
tsl::test_util::MockServingDeviceSelector serving_device_selector_;
std::unique_ptr<IfrtServingCoreSelector> core_selector_;
int num_cores_ = 2;
};
TEST_F(IfrtServingCoreSelectorTest, ReservedDevicesReturns) {
int64_t program_id1 = 111111;
EXPECT_CALL(serving_device_selector_,
ReserveDevice(absl::StrCat(program_id1)))
.WillOnce([this](::testing::Unused) {
return tsl::DeviceReservation(0, &serving_device_selector_);
});
for (int i = 0; i < num_cores_; ++i) {
EXPECT_THAT(core_selector_->ReserveDevice(program_id1).device_index(), i);
}
tsl::DeviceReservation reservation =
core_selector_->ReserveDevice(program_id1);
EXPECT_THAT(reservation.device_index(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
233cc73f-0e99-48c3-84bf-b56c721034a9 | cpp | tensorflow/tensorflow | sharding_utils | tensorflow/core/tpu/kernels/sharding_utils.cc | tensorflow/core/tpu/kernels/sharding_utils_test.cc | #include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tensorflow {
namespace sharding_internal {
absl::Status ValidateShapesForSlice(absl::string_view input_name,
const Tensor* input,
const std::vector<int32_t>& num_splits,
const std::vector<int32_t>& paddings) {
const auto& ishape = input->shape();
Status s;
const int rank = ishape.dims();
const auto& input_shape = ishape.dim_sizes();
if (rank <= 0 || rank > 8) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " must have rank in range (0, 8], but got ", rank, "."));
} else if (rank != num_splits.size()) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " rank must be the same as 'num_splits' length ",
num_splits.size(), ", but got rank ", rank, "."));
} else {
for (int dim = 0; dim < rank; ++dim) {
const auto input_shape_dim = input_shape[dim];
const auto paddings_dim = paddings[dim];
const auto num_splits_dim = num_splits[dim];
if ((input_shape_dim + paddings_dim) % num_splits_dim != 0) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " shape dimension ", dim, " (", input_shape_dim,
") with padding ", paddings_dim,
" must be evenly divisible by 'num_splits' ", num_splits_dim, "."));
break;
}
}
}
return s;
}
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 1> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 1>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 1> subscript;
subscript[0] = index * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 2> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 2>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 2> subscript;
subscript[1] = (index % num_partitions[1]) * slice_shape[1];
subscript[0] = (index / num_partitions[1]) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 3> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 3> subscript;
subscript[2] = (index % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / num_partitions[2]) % num_partitions[1]) * slice_shape[1];
subscript[0] =
(index / (num_partitions[2] * num_partitions[1])) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 4> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 4>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 4> subscript;
subscript[3] = (index % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / num_partitions[3]) % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / (num_partitions[3] * num_partitions[2])) % num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 5> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 5>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 5> subscript;
subscript[4] = (index % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / num_partitions[4]) % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / (num_partitions[4] * num_partitions[3])) % num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] = (index / (num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 6> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 6>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 6> subscript;
subscript[5] = (index % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / num_partitions[5]) % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / (num_partitions[5] * num_partitions[4])) % num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[5] * num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] = ((index / (num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[5] * num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 7> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 7>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 7> subscript;
subscript[6] = (index % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / num_partitions[6]) % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / (num_partitions[6] * num_partitions[5])) % num_partitions[4]) *
slice_shape[4];
subscript[3] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] = ((index / (num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 8> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 8>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 8> subscript;
subscript[7] = (index % num_partitions[7]) * slice_shape[7];
subscript[6] =
((index / num_partitions[7]) % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / (num_partitions[7] * num_partitions[6])) % num_partitions[5]) *
slice_shape[5];
subscript[4] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5])) %
num_partitions[4]) *
slice_shape[4];
subscript[3] = ((index / (num_partitions[7] * num_partitions[6] *
num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2] *
num_partitions[1])) *
slice_shape[0];
return subscript;
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace {
Eigen::ThreadPoolDevice CreateThreadPoolDevice() {
constexpr int kMaxParallelism = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "Resharding", kMaxParallelism);
Eigen::ThreadPoolDevice device(thread_pool->AsEigenThreadPool(),
kMaxParallelism);
return device;
}
TEST(XlaNDSplitterTest, NoSplits) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings(num_splits.size(), 0);
const int num_outputs = 1;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
false)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, NoSplitsWithPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings = {0, 1, 1};
const int num_outputs = 1;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(3 * 3 * 3);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 0, 0, 0, 1, 0, 0, 0},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, SplitNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({4, 4});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings(num_splits.size(), 0);
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 4, 5}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 3, 6, 7}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({8, 9, 12, 13}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({10, 11, 14, 15}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({3, 3});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
const int num_outputs = 4;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 3, 4}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 0, 5, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({6, 7, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({8, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {2, 3};
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 0, 1, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDConcatenatorTest, NoConcats) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const TensorShape output_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 1;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDConcatenatorTest, ConcatNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({4, 4});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 5, 2, 3, 6, 7, 8, 9,
12, 13, 10, 11, 14, 15},
TensorShape({4, 4})));
}
TEST(XlaNDConcatenatorTest, ConcatPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({3, 3});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {1, 1};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 2, 3, 6, 8, 9, 12},
TensorShape({3, 3})));
}
TEST(XlaNDConcatenatorTest, ConcatCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({2, 2});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {2, 2};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 2, 3}, TensorShape({2, 2})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8e4857a-743f-4562-bda6-e2d2ac928b31 | cpp | tensorflow/tensorflow | ifrt_loaded_variable_utils | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.cc | tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> LoadIfrtVariable(
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const tensorflow::Tensor& variable,
const VariableDeviceShardingConfig& sharding_config) {
return tensorflow::ifrt_serving::MakeArrayFromTensor(
*ifrt_client, variable, sharding_config.device_ids,
sharding_config.hlo_sharding, thread_pool);
}
}
absl::StatusOr<ifrt_serving::DtypeAndShape> GetDtypeAndShape(
const ResourceHandle& resource_handle) {
const std::vector<DtypeAndPartialTensorShape>& dtype_and_partial_shapes =
resource_handle.dtypes_and_shapes();
if (dtype_and_partial_shapes.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1 dtype and shape, got ", dtype_and_partial_shapes.size()));
}
ifrt_serving::DtypeAndShape dtype_and_shape;
if (!dtype_and_partial_shapes.front().shape.AsTensorShape(
&dtype_and_shape.shape)) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to convert partial shape to full tensor shape: ",
dtype_and_partial_shapes.front().shape.DebugString()));
}
dtype_and_shape.dtype = dtype_and_partial_shapes.front().dtype;
return dtype_and_shape;
}
std::string GetRuntimeNameFromVarHandle(const ResourceHandle& handle) {
return absl::StrCat(handle.container(), "__", handle.name());
}
absl::Status AsyncLoadRestoredTensorAsIfrtLoadedVariable(
absl::string_view runtime_name,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
const tsl::thread::ThreadPool& thread_pool,
const ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry,
ifrt_serving::IfrtLoadedVariableRegistry& ifrt_loaded_variable_registry,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
const VariableDeviceShardingConfig& sharding_config) {
IfrtLoadedVariableRegistry::Key loaded_variable_key{
.device_ids = sharding_config.device_ids,
.input_name = std::string(runtime_name),
.hlo_sharding = sharding_config.hlo_sharding,
};
if (ifrt_loaded_variable_registry.GetLoadedVariable(loaded_variable_key)
.ok()) {
VLOG(1) << "Found alread registered variable for " << runtime_name;
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
if (!restored_tensor_future.IsValid()) {
return absl::InternalError(absl::StrCat(
"LoadVariableOp: failed to fetch variable tensor: ", runtime_name));
}
auto loaded_variable_promise =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>::CreatePromise();
auto loaded_variable_future =
xla::ifrt::Future<tsl::RCReference<xla::ifrt::Array>>(
loaded_variable_promise);
TF_ASSIGN_OR_RETURN(
absl::StatusOr<ifrt_serving::DtypeAndShape> dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(runtime_name));
TF_RETURN_IF_ERROR(ifrt_loaded_variable_registry.TryRegisterLoadedVariable(
loaded_variable_key,
[&]() -> absl::StatusOr<
ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable> {
return ifrt_serving::IfrtLoadedVariableRegistry::LoadedVariable(
{.array = loaded_variable_future});
}));
restored_tensor_future.OnReady(
[ifrt_client = std::move(ifrt_client), &thread_pool = thread_pool,
checkpoint_loader_queue = checkpoint_loader_queue,
sharding_config = sharding_config,
loaded_variable_promise = std::move(loaded_variable_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
loaded_variable_promise.Set(restored_tensor.status());
return;
}
checkpoint_loader_queue->AddTask(
[ifrt_client = ifrt_client, &thread_pool = thread_pool,
sharding_config = std::move(sharding_config),
restored_tensor = std::move(*restored_tensor),
loaded_variable_promise =
std::move(loaded_variable_promise)]() mutable {
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
variable_array =
LoadIfrtVariable(ifrt_client, thread_pool,
restored_tensor, sharding_config);
loaded_variable_promise.Set(std::move(variable_array));
});
});
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::test::TensorEq;
using tsl::testing::StatusIs;
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableNotFoundWrongName) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfig sharding_config = {
.device_ids = {0},
.hlo_sharding = xla::HloSharding::Replicate(),
};
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(restored_tensor_registry.TryRegister("var_x_wrong",
restored_tensor_info));
promise.Set(input_tensor);
EXPECT_THAT(
AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(ShardingUtilsTest, ShardTensorToIfrtLoadedVariableSucceed) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, TensorShape({2, 2}));
Tensor variable_handle(DT_RESOURCE, TensorShape({}));
ResourceHandle resource_handle;
resource_handle.set_name("var_x");
resource_handle.set_dtypes_and_shapes({{
DT_INT32,
TensorShape({2, 2}),
}});
variable_handle.flat<ResourceHandle>()(0) = std::move(resource_handle);
IfrtRestoreTensorRegistry restored_tensor_registry;
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
constexpr int kMaxParallelism = 16;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), tsl::ThreadOptions(),
"Resharding", kMaxParallelism);
IfrtLoadedVariableRegistry loaded_variable_registry;
auto restore_work_queue = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
VariableDeviceShardingConfig sharding_config{
.device_ids = {0},
.hlo_sharding = xla::HloSharding::Replicate(),
};
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
false,
GetDtypeAndShape(variable_handle.scalar<ResourceHandle>()()).value(),
future};
TF_ASSERT_OK(
restored_tensor_registry.TryRegister("var_x", restored_tensor_info));
TF_ASSERT_OK(AsyncLoadRestoredTensorAsIfrtLoadedVariable(
"var_x", client, thread_pool, restored_tensor_registry,
loaded_variable_registry, restore_work_queue.get(), sharding_config));
promise.Set(input_tensor);
IfrtLoadedVariableRegistry::Key key{
.device_ids = {0},
.input_name = "var_x",
.hlo_sharding = sharding_config.hlo_sharding,
};
TF_ASSERT_OK_AND_ASSIGN(auto v,
loaded_variable_registry.GetLoadedVariable(key));
TF_ASSERT_OK_AND_ASSIGN(auto assembled_array, v.array.Await());
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
assembled_array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
tensorflow::Tensor host_tensor(input_tensor.dtype(), input_tensor.shape());
TF_ASSERT_OK(
disassembled_arrays[i]
->CopyToHostBuffer(host_tensor.data(), {},
xla::ifrt::ArrayCopySemantics::kAlwaysCopy)
.Await());
EXPECT_THAT(host_tensor, TensorEq(input_tensor));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca34a38a-e8e5-4fb3-b1d4-e583a80562a1 | cpp | tensorflow/tensorflow | ifrt_executable_registry | tensorflow/core/tfrt/ifrt/ifrt_executable_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
ServingExecutableRegistry::Handle::Handle(Handle&& other) {
*this = std::move(other);
}
ServingExecutableRegistry::Handle& ServingExecutableRegistry::Handle::operator=(
Handle&& other) {
if (this != &other) {
program_id_ = std::move(other.program_id_);
other.program_id_ = std::nullopt;
}
return *this;
}
ServingExecutableRegistry::Handle::~Handle() { Release(); }
absl::Status ServingExecutableRegistry::Handle::Freeze() {
if (!program_id_.has_value()) {
return absl::FailedPreconditionError("Program is not registered");
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
return absl::NotFoundError(
absl::StrCat("Program ", *program_id_, " not found in the registry"));
}
VLOG(1) << "Freeze the program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
it->second->Freeze();
return absl::OkStatus();
}
void ServingExecutableRegistry::Handle::Release() {
if (!program_id_.has_value()) {
return;
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
LOG(ERROR) << "Program " << *program_id_ << " not found in the registry";
return;
}
VLOG(1) << "Unregistering program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
ServingExecutableRegistry::executables_->erase(it);
program_id_ = std::nullopt;
}
ServingExecutableRegistry::Handle::Handle(int64_t program_id)
: program_id_(program_id) {}
absl::StatusOr<ServingExecutableRegistry::Handle>
ServingExecutableRegistry::Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable) {
absl::MutexLock l(&mu_);
VLOG(1) << "Registering program " << program_id << " from signature '"
<< executable->signature_name() << "' of model '"
<< executable->model_name() << "'"
<< ", address is " << executable.get();
if (!executables_->insert({program_id, std::move(executable)}).second) {
return absl::AlreadyExistsError(absl::StrCat(
"Program ", program_id, " already exists in the program registry"));
}
return Handle(program_id);
}
IfrtServingExecutable* ServingExecutableRegistry::Lookup(int64_t program_id) {
absl::ReaderMutexLock l(&mu_);
VLOG(1) << "Looking up program " << program_id;
const auto it = executables_->find(program_id);
return it != executables_->end() ? it->second.get() : nullptr;
}
ABSL_CONST_INIT absl::Mutex ServingExecutableRegistry::mu_(absl::kConstInit);
absl::flat_hash_map<int64_t, std::unique_ptr<IfrtServingExecutable>>* const
ServingExecutableRegistry::executables_ =
new absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>();
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static auto* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
CreateIfrtServingExecutable(mlir::MLIRContext& context, int64_t program_id) {
constexpr absl::string_view kDataDirectory =
"tensorflow/core/tfrt/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/executable.mlir"));
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
if (!mlir_module) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to parse MLIR file: ", mlir_module_path));
}
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
IfrtLoadedVariableRegistry ifrt_loaded_variable_registry;
IfrtRestoreTensorRegistry ifrt_restore_tensor_registry;
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSIGN_OR_RETURN(std::unique_ptr<tensorflow::DynamicDeviceMgr> device_mgr,
CreateTfDynamicDeviceMgr());
return IfrtServingExecutable::Create(
program_id, "test", "main", std::move(mlir_module), client,
&GetThreadPool(), &ifrt_loaded_variable_registry,
&ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(),
tensorflow::IdentityShapeRepresentationFn(),
nullptr,
nullptr);
}
TEST(IfrtExecutableRegistry, Basic) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, DuplicateRegistrationFails) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
EXPECT_THAT(
ServingExecutableRegistry::Register(program_id, std::move(executable)),
testing::StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtExecutableRegistry, ReleaseOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_EQ(ServingExecutableRegistry::Lookup(program_id), nullptr);
}
TEST(IfrtExecutableRegistry, FreezeOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
ASSERT_OK(handle.Freeze());
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, FreezeFailedProgramNotRegistered) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_THAT(handle.Freeze(),
testing::StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(IfrtExecutableRegistry, InvalidProgramIdShallReturnNull) {
int64_t program_id = 1234;
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, nullptr);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5a8fa47-b710-49c7-b98f-355ccd37a6f2 | cpp | tensorflow/tensorflow | ifrt_device_utils | tensorflow/core/tfrt/ifrt/ifrt_device_utils.cc | tensorflow/core/tfrt/ifrt/ifrt_device_utils_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/host_compute_metadata.pb.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/service/computation_placer.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/grid.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
static constexpr int kTpuTopologyRank = 4;
absl::StatusOr<std::vector<xla::ifrt::Device*>> GetAssignedIfrtDevices(
const xla::ifrt::Client& ifrt_client, int num_replicas,
int num_cores_per_replica,
std::optional<std::vector<int>> device_assignment) {
const int num_devices = num_replicas * num_cores_per_replica;
bool no_device_coordinates = false;
for (auto* device : ifrt_client.devices()) {
if (!device->Attributes().map().contains("coords") ||
!device->Attributes().map().contains("core_on_chip")) {
no_device_coordinates = true;
break;
}
}
if (!device_assignment || device_assignment->empty() ||
no_device_coordinates) {
TF_ASSIGN_OR_RETURN(xla::DeviceAssignment xla_device_assignment,
ifrt_client.GetDefaultDeviceAssignment(
num_replicas, num_cores_per_replica));
VLOG(3) << "Getting default device lists";
std::vector<xla::ifrt::Device*> devices;
devices.reserve(num_devices);
for (int replica_idx = 0; replica_idx < num_replicas; replica_idx++) {
for (int core_idx = 0; core_idx < num_cores_per_replica; core_idx++) {
auto device_id = xla_device_assignment(replica_idx, core_idx);
TF_ASSIGN_OR_RETURN(
xla::ifrt::Device * device,
ifrt_client.LookupDevice(xla::ifrt::DeviceId(device_id)));
devices.push_back(device);
}
}
return devices;
}
absl::flat_hash_map<GridCoords, int> devices_from_attribute;
std::vector<int> coord;
coord.reserve(kTpuTopologyRank);
int device_index = 0;
for (auto coord_attr : *device_assignment) {
coord.push_back(coord_attr);
if (coord.size() == kTpuTopologyRank) {
devices_from_attribute.insert(
{GridCoords(coord[0], coord[1], coord[2], coord[3]), device_index});
device_index++;
coord.clear();
}
}
if (!coord.empty()) {
return absl::FailedPreconditionError(
absl::StrCat("Device assignment attribute is expected to be a multiple "
"of 4, but got ",
device_assignment->size()));
}
if (devices_from_attribute.size() != num_devices) {
return absl::FailedPreconditionError(
absl::StrCat("Device assignment has ", devices_from_attribute.size(),
" devices, but expected ", num_devices));
}
struct IfrtDeviceGrid {
xla::ifrt::Device* device;
GridCoords grid;
int index_at_attribute;
};
std::vector<IfrtDeviceGrid> ifrt_devices;
ifrt_devices.reserve(num_devices);
for (auto* device : ifrt_client.devices()) {
GridCoords grid;
auto coords_it = device->Attributes().map().find("coords");
auto core_on_chip_it = device->Attributes().map().find("core_on_chip");
if (coords_it != device->Attributes().map().end() &&
core_on_chip_it != device->Attributes().map().end()) {
VLOG(3) << "Adding coords and core_on_chip attributes:"
<< device->DebugString();
auto coords_list =
std::get<xla::ifrt::AttributeMap::Int64ListValue>(coords_it->second);
auto core_on_chip = std::get<xla::ifrt::AttributeMap::Int64Value>(
core_on_chip_it->second);
if (coords_list.value.size() != 3) {
return absl::InternalError(absl::StrCat(
"Expected coords to be of size 3, but got ",
coords_list.value.size(), " for device ", device->DebugString()));
}
grid = GridCoords(coords_list.value[0], coords_list.value[1],
coords_list.value[2], core_on_chip.value);
} else {
return absl::InternalError(
absl::StrCat("Device ", device->DebugString(),
" does not have coords or core_on_chip attribute."));
}
auto device_it_from_attribute = devices_from_attribute.find(grid);
if (device_it_from_attribute == devices_from_attribute.end()) {
VLOG(1) << "Device coordinates " << grid.ToString()
<< " does not match any TPU device assigned "
<< absl::StrJoin(*device_assignment, " ");
continue;
}
ifrt_devices.push_back(
{.device = device,
.grid = grid,
.index_at_attribute = device_it_from_attribute->second});
}
if (ifrt_devices.size() != num_devices) {
return absl::FailedPreconditionError(absl::StrCat(
"Match ", ifrt_devices.size(), " devices, but expected ", num_devices));
}
absl::c_sort(ifrt_devices, [&](const auto& lhs, const auto& rhs) {
return lhs.index_at_attribute < rhs.index_at_attribute;
});
std::vector<xla::ifrt::Device*> result;
result.reserve(ifrt_devices.size());
for (auto& device_grid : ifrt_devices) {
result.push_back(device_grid.device);
VLOG(3) << "Device: " << device_grid.device->DebugString()
<< " is assigned";
}
return result;
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include <memory>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/mock.h"
#include "xla/service/computation_placer.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::testing::ElementsAre;
using ::testing::Return;
using ::testing::ReturnRef;
using ::tsl::testing::StatusIs;
static constexpr int kNumReplicas = 1;
static constexpr int kNumCoresPerReplica = 2;
static constexpr int kNumDevices = 4;
static constexpr int kDeviceIdOffset = 8;
class IfrtDeviceUtilsTest : public ::testing::Test {
protected:
void SetUp() override {
mocked_devices_.reserve(kNumDevices);
devices_.reserve(kNumDevices);
for (int i = 0; i < kNumDevices; ++i) {
mocked_devices_.push_back(std::make_unique<xla::ifrt::MockDevice>());
ON_CALL(*mocked_devices_[i], Attributes())
.WillByDefault(ReturnRef(device_attributes_maps_[i]));
ON_CALL(*mocked_devices_[i], Id())
.WillByDefault(Return(xla::ifrt::DeviceId(kDeviceIdOffset + i)));
ON_CALL(client_, LookupDevice(xla::ifrt::DeviceId(kDeviceIdOffset + i)))
.WillByDefault(Return(mocked_devices_[i].get()));
devices_.push_back(mocked_devices_[i].get());
};
ON_CALL(client_, devices()).WillByDefault(Return(devices_));
xla::DeviceAssignment assignment(kNumReplicas, kNumCoresPerReplica);
assignment(0, 0) = kDeviceIdOffset + 2;
assignment(0, 1) = kDeviceIdOffset + 3;
ON_CALL(client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumCoresPerReplica))
.WillByDefault(Return(assignment));
}
xla::ifrt::MockClient client_;
std::vector<std::unique_ptr<xla::ifrt::MockDevice>> mocked_devices_;
std::vector<xla::ifrt::Device*> devices_;
std::vector<xla::ifrt::AttributeMap> device_attributes_maps_ = {
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({1, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(0)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({1, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(1)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({2, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(0)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({2, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(1)}}),
};
};
TEST_F(IfrtDeviceUtilsTest, Basic) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 1, 0, 0, 0};
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
device_assignment_attr));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[1], devices_[0]));
}
TEST_F(IfrtDeviceUtilsTest, SeparateXCoordinates) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 2, 0, 0, 0};
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
device_assignment_attr));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[1], devices_[2]));
}
TEST_F(IfrtDeviceUtilsTest, EmptyDeviceAssignmentShallReturnDefault) {
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
std::nullopt));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[2], devices_[3]));
}
TEST_F(IfrtDeviceUtilsTest, MismatchCoordinatesShallFail) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 3, 0, 0, 0};
auto status = GetAssignedIfrtDevices(client_, 1, 2, device_assignment_attr);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_device_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_device_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4226e9b-7fbf-4289-b06e-d2775cb74417 | cpp | tensorflow/tensorflow | tf_host_callback | tensorflow/core/tfrt/ifrt/tf_host_callback.cc | tensorflow/core/tfrt/ifrt/tf_host_callback_test.cc | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_operation.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using RefCountHandle = ::tsl::core::RefCountPtr<tensorflow::TensorHandle>;
size_t GetSizeInBytes(const tensorflow::Tensor& tensor) {
return tensor.shape().num_elements() * DataTypeSize(tensor.dtype());
}
tensorflow::Tensor GetTensor(const DtypeAndShape& dtype_and_shape, void* src) {
DCHECK(DataTypeCanUseMemcpy(dtype_and_shape.dtype));
tensorflow::Tensor t(dtype_and_shape.dtype, dtype_and_shape.shape);
std::memcpy(t.data(), src, GetSizeInBytes(t));
return t;
}
void CopyToBuffer(void* dst, const tensorflow::Tensor& tensor) {
DCHECK(DataTypeCanUseMemcpy(tensor.dtype()));
std::memcpy(dst, tensor.data(), GetSizeInBytes(tensor));
}
}
absl::Status TfHostCallback::Call(void** inputs, void** outputs) {
tsl::profiler::TraceMe trace_me("TfHostCallback::Call");
tensorflow::ImmediateOpPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(
op->Reset(entry_function_name_.c_str(), nullptr));
ctx_->StartStep();
absl::Cleanup cleanup_step = [this]() { ctx_->EndStep(); };
for (int i = 0; i < operand_type_and_shapes_.size(); ++i) {
tensorflow::Tensor t = GetTensor(operand_type_and_shapes_[i], inputs[i]);
RefCountHandle handle(tensorflow::down_cast<tensorflow::TensorHandle*>(
ctx_->CreateLocalHandleFromTFTensor(t, nullptr)));
TF_RETURN_IF_ERROR(op->AddInput(handle.get()));
}
int num_outputs = result_type_and_shapes_.size();
absl::FixedArray<tensorflow::AbstractTensorHandle*> output_raw_handles(
num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::MakeSpan(output_raw_handles), &num_outputs));
std::vector<RefCountHandle> output_handles;
output_handles.reserve(num_outputs);
for (auto* output_raw_handle : output_raw_handles) {
output_handles.emplace_back(
tensorflow::down_cast<tensorflow::TensorHandle*>(output_raw_handle));
}
if (result_type_and_shapes_.size() != num_outputs) {
return absl::InternalError(absl::StrCat(
"TF host callback invocation expected ", result_type_and_shapes_.size(),
" results, instead got ", num_outputs));
}
for (int i = 0; i < num_outputs; ++i) {
const tensorflow::Tensor* tensor;
TF_RETURN_IF_ERROR(output_handles[i]->Tensor(&tensor));
CopyToBuffer(outputs[i], *tensor);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<TfHostCallback>> TfHostCallback::Create(
absl::Span<const tensorflow::FunctionDef> functions,
absl::string_view entry_function_name,
absl::Span<const DtypeAndShape> operand_type_and_shapes,
absl::Span<const DtypeAndShape> result_type_and_shapes,
tensorflow::DeviceMgr* device_mgr) {
tensorflow::SessionOptions options;
options.config.add_device_filters("/device:CPU:*");
DCHECK(device_mgr != nullptr);
tensorflow::EagerContextPtr ctx(new tensorflow::EagerContext(
options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr,
false,
nullptr,
nullptr,
nullptr,
true));
for (const tensorflow::FunctionDef& function : functions) {
TF_RETURN_IF_ERROR(ctx->AddFunctionDef(function));
}
return absl::WrapUnique(
new TfHostCallback(entry_function_name, operand_type_and_shapes,
result_type_and_shapes, std::move(ctx)));
}
absl::StatusOr<std::unique_ptr<tensorflow::DynamicDeviceMgr>>
CreateTfDynamicDeviceMgr() {
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddCpuDevices(
tensorflow::SessionOptions(), "/job:localhost/replica:0/task:0",
&devices));
return std::make_unique<tensorflow::DynamicDeviceMgr>(std::move(devices));
}
}
} | #include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
absl::StatusOr<tensorflow::FunctionDef> ToFunctionDef(
tensorflow::Scope scope, const std::string& function_name) {
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
tensorflow::FunctionDef function_def;
TF_RETURN_IF_ERROR(
tensorflow::GraphToFunctionDef(*graph, function_name, &function_def));
return function_def;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddOneFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
auto const0_value = tensorflow::test::AsScalar<float>(1);
auto const0 =
tensorflow::ops::Const(scope.WithOpName("const0"),
tensorflow::Input::Initializer(const0_value));
auto add0 = tensorflow::ops::Add(scope.WithOpName("add0"), arg0, const0);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add0, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>>
MakeAddOneWithCallFunctionDef(const std::string& function_name) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
MakeAddOneFunctionDef("add"));
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_FLOAT, 0);
tensorflow::NameAttrList f;
f.set_name("add");
auto call = tensorflow::ops::StatefulPartitionedCall(
scope.WithOpName("call"), {arg0.output}, {tensorflow::DT_FLOAT}, f);
auto retval0 = tensorflow::ops::_Retval(scope.WithOpName("retval0"),
call.output[0], 0);
}
TF_ASSIGN_OR_RETURN(function_defs.emplace_back(),
ToFunctionDef(std::move(scope), function_name));
return function_defs;
}
absl::StatusOr<tensorflow::FunctionDef> MakeAssignVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
arg0);
}
return ToFunctionDef(std::move(scope), function_name);
}
absl::StatusOr<tensorflow::FunctionDef> MakeAddVarFunctionDef(
const std::string& function_name) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
{
auto arg0 = tensorflow::ops::_Arg(scope.WithOpName("arg0"),
tensorflow::DT_INT32, 0);
auto var = tensorflow::ops::VarHandleOp(
scope.WithOpName("var"), tensorflow::DT_INT32,
tensorflow::TensorShape(),
tensorflow::ops::VarHandleOp::Attrs().SharedName("var"));
auto read = tensorflow::ops::ReadVariableOp(scope.WithOpName("read"), var,
tensorflow::DT_INT32);
auto add = tensorflow::ops::Add(scope.WithOpName("add"), read, arg0);
tensorflow::ops::AssignVariableOp assign_op(scope.WithOpName("assign"), var,
add);
auto retval0 =
tensorflow::ops::_Retval(scope.WithOpName("retval0"), add, 0);
}
return ToFunctionDef(std::move(scope), function_name);
}
TEST(TfHostCallbackTest, Simple) {
ASSERT_OK_AND_ASSIGN(auto function_defs,
MakeAddOneWithCallFunctionDef("main"));
auto in = AsTensor<float>({2.5f}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back({.dtype = in.dtype(), .shape = in.shape()});
auto out = AsTensor<float>({0.0f}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back({.dtype = out.dtype(), .shape = out.shape()});
ASSERT_OK_AND_ASSIGN(auto device_mgr, CreateTfDynamicDeviceMgr());
ASSERT_OK_AND_ASSIGN(auto tf_host_callback,
tensorflow::ifrt_serving::TfHostCallback::Create(
function_defs, "main", in_dtype_shapes,
out_dtype_shapes, device_mgr.get()));
ASSERT_OK(tf_host_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out,
TensorEq(AsTensor<float>({3.5f}, tensorflow::TensorShape({1}))));
}
TEST(TfHostCallbackTest, SharedState) {
tensorflow::ConfigProto session_config;
ASSERT_OK_AND_ASSIGN(auto state, CreateTfDynamicDeviceMgr());
std::unique_ptr<TfHostCallback> assign_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAssignVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
ASSERT_OK_AND_ASSIGN(
assign_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
std::unique_ptr<TfHostCallback> incr_callback;
{
ASSERT_OK_AND_ASSIGN(auto functions, MakeAddVarFunctionDef("main"));
std::vector<DtypeAndShape> in_dtype_shapes;
in_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
std::vector<DtypeAndShape> out_dtype_shapes;
out_dtype_shapes.push_back(
{.dtype = DT_INT32, .shape = tensorflow::TensorShape({1})});
ASSERT_OK_AND_ASSIGN(
incr_callback,
TfHostCallback::Create({functions}, "main", in_dtype_shapes,
out_dtype_shapes, state.get()));
}
constexpr int32_t kInit = 2;
{
auto in = AsTensor<int32_t>({kInit}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
void* out_ptrs[0];
ASSERT_OK(assign_callback->Call(in_ptrs, out_ptrs));
}
for (int i = 0; i < 3; ++i) {
auto in = AsTensor<int32_t>({1}, tensorflow::TensorShape({1}));
void* in_ptrs[1] = {in.data()};
auto out = AsTensor<int32_t>({0}, tensorflow::TensorShape({1}));
void* out_ptrs[1] = {out.data()};
ASSERT_OK(incr_callback->Call(in_ptrs, out_ptrs));
EXPECT_THAT(out, TensorEq(AsTensor<int32_t>({kInit + i + 1},
tensorflow::TensorShape({1}))));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/tf_host_callback.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/tf_host_callback_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c775d3d1-0469-48e5-a02a-d44ccc159d13 | cpp | tensorflow/tensorflow | attribute | tensorflow/core/tfrt/mlrt/attribute/attribute.cc | tensorflow/core/tfrt/mlrt/attribute/attribute_test.cc | #include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tf_mlrt {
absl::StatusOr<std::string> EncodeTensorflowAttribute(
const mlrt::ModuleEmitterContext& module_context, mlir::Attribute attr) {
if (auto result = mlrt::EncodeSimpleAttribute(module_context, attr)) {
return std::move(*result);
}
if (auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr)) {
auto element_type = dense_attr.getElementType();
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(tensorflow::ConvertToDataType(element_type, &dtype));
if (dtype == tensorflow::DT_STRING) {
return absl::InvalidArgumentError(
"String tensor attribute is not yet supported");
}
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto tensor_ctor = mlrt::bc::New<TensorAttr>(&allocator, dtype);
auto shaped_type = dense_attr.getType();
size_t num_elements = shaped_type.getNumElements();
tensor_ctor.set_num_elements(num_elements);
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
tensor_ctor.construct_shape(shape);
if (dtype == tensorflow::DT_BOOL) {
std::vector<uint8_t> data(num_elements);
int i = 0;
for (auto v : dense_attr.getValues<bool>()) {
data[i++] = static_cast<uint8_t>(v);
}
tensor_ctor.construct_data(data.size())
.Place(reinterpret_cast<const char*>(data.data()), data.size());
} else {
auto raw_data = dense_attr.getRawData();
if (dense_attr.isSplat()) {
std::vector<char> data(raw_data.size() * num_elements);
char* p = data.data();
for (int i = 0; i < num_elements; ++i, p += raw_data.size()) {
std::memcpy(p, raw_data.data(), raw_data.size());
}
tensor_ctor.construct_data(data.size()).Place(data.data(), data.size());
} else {
tensor_ctor.construct_data(raw_data.size())
.Place(raw_data.data(), raw_data.size());
}
}
return std::string(buffer.data(), buffer.size());
}
if (auto type_attr = mlir::dyn_cast<mlir::TypeAttr>(attr)) {
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(
tensorflow::ConvertToDataType(type_attr.getValue(), &dtype));
std::string data(sizeof(dtype), '\0');
std::memcpy(data.data(), &dtype, sizeof(dtype));
return data;
}
if (auto shape_attr = mlir::dyn_cast<mlir::TF::ShapeAttr>(attr)) {
llvm::ArrayRef<int64_t> shape;
if (!shape_attr.getUnranked()) {
auto shape_or = shape_attr.getValue();
if (!shape_or.has_value()) {
std::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Failed to get shape from shape attr: ", attr_str));
}
shape = *shape_or;
}
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto shape_attr_ctor = mlrt::bc::New<ShapeAttr>(&allocator);
shape_attr_ctor.set_unranked(shape_attr.getUnranked());
std::vector<int64_t> shape_vec(shape.begin(), shape.end());
shape_attr_ctor.construct_shape(shape_vec);
return std::string(buffer.data(), buffer.size());
}
if (auto array_attr = mlir::dyn_cast<mlir::ArrayAttr>(attr)) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>(
&allocator, array_attr.size());
int i;
for (i = 0; i < array_attr.size(); ++i) {
if (auto type_attr = mlir::dyn_cast<mlir::TypeAttr>(array_attr[i])) {
tensorflow::DataType dtype;
TF_RETURN_IF_ERROR(
tensorflow::ConvertToDataType(type_attr.getValue(), &dtype));
ctor.ConstructAt(i, dtype);
} else {
break;
}
}
if (i == array_attr.size()) {
return std::string(buffer.data(), buffer.size());
}
}
std::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
}
} | #include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include <array>
#include <cstring>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
TEST(AttributeTest, TensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
std::array<int64_t, 4> data = {0, 1, 2, 3};
auto dense_i64_attr = builder.getI64VectorAttr(data);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_i64_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_INT64);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
EXPECT_EQ(
absl::string_view(tensor_attr.data().data(), tensor_attr.data().size()),
absl::string_view(reinterpret_cast<const char*>(data.data()),
data.size() * sizeof(int64_t)));
}
TEST(AttributeTest, BoolTensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
auto dense_bool_attr = builder.getBoolVectorAttr({true, false, true, false});
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_bool_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_BOOL);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
std::array<uint8_t, 4> expected_data = {1, 0, 1, 0};
EXPECT_EQ(
absl::string_view(tensor_attr.data().data(), tensor_attr.data().size()),
absl::string_view(reinterpret_cast<const char*>(expected_data.data()),
expected_data.size() * sizeof(uint8_t)));
}
TEST(AttributeTest, SplatTensorAttr) {
mlir::MLIRContext mlir_context;
mlir::Builder builder(&mlir_context);
auto dense_splat_i64_attr = mlir::DenseElementsAttr::get<int64_t>(
mlir::RankedTensorType::get(4, builder.getI64Type()), 100);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, dense_splat_i64_attr));
TensorAttr tensor_attr(attr_buffer.data());
EXPECT_EQ(tensor_attr.dtype(), tensorflow::DT_INT64);
EXPECT_THAT(tensor_attr.shape(), ::testing::ElementsAreArray({4}));
EXPECT_EQ(tensor_attr.data().size(), 4 * sizeof(int64_t));
const char* p = tensor_attr.data().data();
for (int i = 0; i < 4; ++i, p += sizeof(int64_t)) {
int64_t v;
std::memcpy(&v, p, sizeof(int64_t));
EXPECT_EQ(v, 100);
}
}
TEST(AttributeTest, TypedAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
auto type_attr = mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(32));
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer, EncodeTensorflowAttribute(emitter_context, type_attr));
tensorflow::DataType dtype;
std::memcpy(&dtype, attr_buffer.data(), sizeof(dtype));
EXPECT_EQ(dtype, DT_INT32);
}
TEST(AttributeTest, ShapeAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
std::array<int64_t, 4> data = {1, 2, 3, 4};
auto shape_attr = mlir::TF::ShapeAttr::get(
&mlir_context, llvm::ArrayRef<int64_t>(data.begin(), data.end()),
false);
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(
auto attr_buffer, EncodeTensorflowAttribute(emitter_context, shape_attr));
ShapeAttr shape_attr_decoded(attr_buffer.data());
EXPECT_EQ(shape_attr_decoded.unranked(), false);
EXPECT_THAT(shape_attr_decoded.dims(),
::testing::ElementsAreArray({1, 2, 3, 4}));
}
TEST(AttributeTest, DtypeArrayAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
std::array<mlir::Attribute, 4> arr = {
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(32)),
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(64)),
mlir::TypeAttr::get(builder.getType<mlir::Float32Type>()),
mlir::TypeAttr::get(builder.getType<mlir::IntegerType>(1))};
auto arr_attr = mlir::ArrayAttr::get(
&mlir_context, llvm::ArrayRef<mlir::Attribute>(arr.begin(), arr.end()));
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
TF_ASSERT_OK_AND_ASSIGN(auto attr_buffer,
EncodeTensorflowAttribute(emitter_context, arr_attr));
mlrt::bc::Vector<tensorflow::DataType> dtype_arr(attr_buffer.data());
EXPECT_THAT(dtype_arr, ::testing::ElementsAreArray(
{DT_INT32, DT_INT64, DT_FLOAT, DT_BOOL}));
}
TEST(AttributeTest, UnsupportedAttr) {
mlir::MLIRContext mlir_context;
mlir_context.loadDialect<mlir::TF::TensorFlowDialect>();
mlir::Builder builder(&mlir_context);
auto dense_string_attr = mlir::DenseStringElementsAttr::get(
mlir::RankedTensorType::get({2}, builder.getType<mlir::TF::StringType>()),
{"a", "b"});
mlrt::AttributeEncoderRegistry attribute_encoder_registry;
mlrt::ModuleEmitterContext emitter_context(&attribute_encoder_registry);
EXPECT_THAT(
EncodeTensorflowAttribute(emitter_context, dense_string_attr),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument,
"String tensor attribute is not yet supported"));
EXPECT_THAT(
EncodeTensorflowAttribute(emitter_context, builder.getUnitAttr()),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/attribute/attribute.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/attribute/attribute_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1764824f-014b-48c7-86d7-e5d951be561b | cpp | tensorflow/tensorflow | shard_restore_util | tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.cc | tensorflow/core/tfrt/mlrt/kernel/shard_restore_util_test.cc | #include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <queue>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
namespace tensorflow {
namespace tf_mlrt {
std::vector<std::vector<int>> ShardVariables(
int num_shards, absl::Span<int64_t> variable_sizes) {
DCHECK_GT(num_shards, 0);
struct IndexSize {
int index;
int64_t size;
};
std::vector<IndexSize> variable_index_sizes;
variable_index_sizes.reserve(variable_sizes.size());
for (int i = 0; i < variable_sizes.size(); ++i) {
variable_index_sizes.push_back({.index = i, .size = variable_sizes[i]});
}
std::sort(
variable_index_sizes.begin(), variable_index_sizes.end(),
[&](const IndexSize& a, const IndexSize& b) { return a.size > b.size; });
struct RestoreVariableCluster {
std::vector<int> indices;
size_t total_size = 0;
};
auto cmp = [](const RestoreVariableCluster& a,
const RestoreVariableCluster& b) {
return a.total_size > b.total_size;
};
std::priority_queue<RestoreVariableCluster,
std::vector<RestoreVariableCluster>, decltype(cmp)>
min_heap(cmp);
for (int i = 0; i < num_shards; ++i) {
min_heap.push(RestoreVariableCluster());
}
for (int i = 0; i < variable_index_sizes.size(); ++i) {
RestoreVariableCluster min_cluster = min_heap.top();
min_heap.pop();
min_cluster.total_size += variable_index_sizes[i].size;
min_cluster.indices.push_back(variable_index_sizes[i].index);
min_heap.push(std::move(min_cluster));
}
std::vector<std::vector<int>> shards;
shards.reserve(min_heap.size());
while (!min_heap.empty()) {
auto& min_cluster = min_heap.top();
if (min_cluster.total_size > 0) {
shards.push_back(min_cluster.indices);
}
min_heap.pop();
}
return shards;
}
}
} | #include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(ShardRestoreUtilTest, Basic) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {8, 10, 3};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 2);
EXPECT_THAT(shards[0], ElementsAre(1));
EXPECT_THAT(shards[1], ElementsAre(0, 2));
}
TEST(ShardRestoreUtilTest, Imbalance) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {3, 3, 10, 3};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 2);
EXPECT_THAT(shards[0], UnorderedElementsAre(0, 1, 3));
EXPECT_THAT(shards[1], ElementsAre(2));
}
TEST(ShardRestoreUtilTest, SingleShard) {
int num_shards = 1;
std::vector<int64_t> shard_sizes = {10, 2};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 1);
EXPECT_THAT(shards[0], ElementsAre(0, 1));
}
TEST(ShardRestoreUtilTest, NumVariablesLessThanShard) {
int num_shards = 2;
std::vector<int64_t> shard_sizes = {1};
std::vector<std::vector<int>> shards =
ShardVariables(num_shards, absl::MakeSpan(shard_sizes));
EXPECT_EQ(shards.size(), 1);
EXPECT_THAT(shards[0], ElementsAre(0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/shard_restore_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
908bf121-508e-4c4a-9d0d-8904854bb57f | cpp | tensorflow/tensorflow | ifrt_ops_kernel | tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc | tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc | #include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/future.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/future.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/tstring.h"
using tensorflow::ifrt_serving::IfrtModelContext;
namespace tensorflow {
namespace tf_mlrt {
namespace {
struct MlrtIfrtRestoreVariableKernel : mlrt::KernelFrame {
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_restore_variable";
tensorflow::tfrt_stub::FallbackTensor prefix() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor tensor_names() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[1].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor shape_and_slices() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[2].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
mlrt::bc::Vector<tensorflow::DataType> restored_dtypes() const {
return attributes().GetAs<mlrt::bc::Vector<tensorflow::DataType>>(0);
}
mlrt::bc::Vector<bool> truncate_in_cast() const {
return attributes().GetAs<mlrt::bc::Vector<bool>>(1);
}
std::vector<tensorflow::tfrt_stub::FallbackTensor> var_handles() const {
DCHECK_GT(arguments().size(), 3);
std::vector<tensorflow::tfrt_stub::FallbackTensor> result;
result.reserve(arguments().size() - 3);
for (int i = 3; i < arguments().size(); ++i) {
result.push_back(
arguments()[i].Get<tensorflow::tfrt_stub::FallbackTensor>());
}
return result;
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
static constexpr int kNumRestoreClusters = 4;
absl::Status InvokeHelper();
absl::Status ValidateInput();
};
void MlrtIfrtRestoreVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::Status MlrtIfrtRestoreVariableKernel::ValidateInput() {
if (prefix().tensor().NumElements() != 1) {
return absl::InvalidArgumentError(
"The prefix tensor must be a scalar tensor.");
}
if (!TensorShapeUtils::IsVector(tensor_names().tensor().shape()) ||
!TensorShapeUtils::IsVector(shape_and_slices().tensor().shape())) {
return absl::InvalidArgumentError(
absl::StrCat("Input tensor_names and shape_and_slices "
"should be an 1-D tensors, got ",
tensor_names().tensor().shape().DebugString(), " and ",
shape_and_slices().tensor().shape().DebugString()));
}
if (tensor_names().tensor().NumElements() !=
shape_and_slices().tensor().NumElements()) {
return absl::InvalidArgumentError(
"The tensor_names and shape_and_slices tensors must have the same "
"number of elements.");
}
if (tensor_names().tensor().NumElements() != var_handles().size()) {
return absl::InvalidArgumentError(
"The tensor_names and var_handles must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != restored_dtypes().size()) {
return absl::InvalidArgumentError(
"The tensor_names and restored_dtypes must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != truncate_in_cast().size()) {
return absl::InvalidArgumentError(
"The tensor_names and truncate_in_cast must have the same number of "
"elements.");
}
return absl::OkStatus();
}
absl::Status MlrtIfrtRestoreVariableKernel::InvokeHelper() {
std::optional<ifrt_serving::IfrtModelRestoreContext*> model_restore_context =
context()
.resource_context()
.GetResource<ifrt_serving::IfrtModelRestoreContext>(
ifrt_serving::kIfrtModelRestoreContextName);
if (!model_restore_context.has_value()) {
return absl::InternalError(
"Did not find IfrtModelRestoreContext resource.");
}
if (*model_restore_context == nullptr) {
return absl::InternalError("IfrtModelRestoreContext must not be null.");
}
ifrt_serving::CheckpointLoader* checkpoint_loader =
(*model_restore_context)->checkpoint_loader();
if (!checkpoint_loader) {
return absl::InternalError("CheckpointLoader must not be null.");
}
TF_RETURN_IF_ERROR(ValidateInput());
std::vector<tensorflow::DataType> restored_dtypes_vec(
restored_dtypes().begin(), restored_dtypes().end());
std::vector<bool> truncate_in_cast_vec(truncate_in_cast().begin(),
truncate_in_cast().end());
return checkpoint_loader->Load(prefix(), var_handles(), tensor_names(),
shape_and_slices(), restored_dtypes_vec,
truncate_in_cast_vec, context());
}
class MlrtIfrtLoadVariableKernel : public mlrt::KernelFrame {
public:
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_load_variable";
const tensorflow::Tensor& variable_handler_tensor() const {
DCHECK_GE(arguments().size(), 1);
const tensorflow::Tensor& ret =
arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>().tensor();
DCHECK_EQ(ret.NumElements(), 1);
return ret;
}
bool used_by_host() const {
DCHECK_EQ(attributes().size(), 1);
return attributes().GetAs<bool>(0);
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
absl::Status InvokeHelper();
};
void MlrtIfrtLoadVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() {
DCHECK_EQ(2, results().size());
std::optional<IfrtModelContext*> ifrt_model_context =
context().resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
if (!ifrt_model_context.has_value()) {
return absl::FailedPreconditionError(
"LoadVariableOp: failed to fetch IfrtModelContext: ");
}
auto tensor_promise =
mlrt::Promise::Allocate<tensorflow::tfrt_stub::FallbackTensor>();
auto tensor_future = tensor_promise.GetFuture();
ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =
(*ifrt_model_context)->GetRestoreTensorRegistry();
auto& resource_handle = variable_handler_tensor().scalar<ResourceHandle>()();
std::string runtime_name =
ifrt_serving::GetRuntimeNameFromVarHandle(resource_handle);
if (used_by_host()) {
if (ifrt_restore_tensor_registry.SetUsedByHost(runtime_name).ok()) {
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
restored_tensor_future.OnReady(
[tensor_promise = std::move(tensor_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
std::move(tensor_promise).SetError(restored_tensor.status());
return;
}
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*restored_tensor));
});
} else {
auto resource_manager = context()
.fallback_request_state()
.device_manager()
.HostCPU()
->resource_manager();
DCHECK(resource_manager);
Var* variable;
TF_RETURN_IF_ERROR(resource_manager->Lookup(
resource_handle.container(), resource_handle.name(), &variable));
if (tensorflow::Tensor* t = variable->tensor(); t != nullptr) {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*t));
} else {
std::move(tensor_promise)
.SetError(absl::InternalError(
absl::StrCat("Variable ", resource_handle.name(),
" is not found in either "
"IfrtRestoreTensorRegistry or ResourceManager")));
}
}
} else {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor());
}
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = runtime_name;
results()[0].Set(tensorflow::tfrt_stub::FallbackTensor(key_tensor));
results()[1].Set(std::move(tensor_future));
return absl::OkStatus();
}
void RegisterTfMlrtIfrtKernels(mlrt::KernelRegistry& registry) {
registry.Register<MlrtIfrtLoadVariableKernel>();
registry.Register<MlrtIfrtRestoreVariableKernel>();
}
}
const bool kUnused = [] {
RegisterTfMlrtIfrtKernels(GetTfMlrtOptionalKernelRegistry());
return true;
}();
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
using tensorflow::test::AsScalar;
using tensorflow::test::AsTensor;
using tensorflow::test::ExpectEqual;
using tensorflow::test::TensorEq;
constexpr absl::string_view kContainer = "test";
constexpr absl::string_view kSharedName = "y";
constexpr absl::string_view kVariableRuntimeName = "test__y";
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
std::string EncodeRestoreDtypesInt32(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>(
&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, tensorflow::DT_INT32);
}
return std::string(buffer.data(), buffer.size());
}
std::string EncodeTruncateInCast(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<bool>>(&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, false);
}
return std::string(buffer.data(), buffer.size());
}
mlrt::bc::Buffer CreateExecutableForIfrtRestoreVariableOp(
int num_variables = 1) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_restore_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
static constexpr int kNumAttributes =
5;
mlrt::testing::AttributeTable attributes(executable_ctor.construct_attributes(
kNumAttributes + 2 * (num_variables - 1)));
std::string restore_dtypes = EncodeRestoreDtypesInt32(num_variables);
attributes.Add("restore_dtypes", restore_dtypes);
std::vector<bool> truncate_in_cast(num_variables, false);
attributes.Add("truncate_in_cast", EncodeTruncateInCast(num_variables));
for (int i = 0; i < num_variables; ++i) {
attributes.Add(
absl::StrCat("var_handle_op_node_def", i),
absl::Substitute(
R"pb(name: "$0"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$1" }
}
attr {
key: "shared_name"
value { s: "$2" }
}
attr {
key: "dtype"
value { type: DT_INT16 }
}
attr {
key: "shape"
value { shape { dim { size: 3 } } }
}
)pb",
absl::StrCat("VarHandleOp", i), kContainer,
absl::StrCat(kSharedName, i)));
attributes.Add(absl::StrCat("var_handle_op_key", i), i);
}
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_input_regs(3).Assign(
regs.Def({"prefix_tensor", "name_tensor", "slice_tensor"}));
const int kNumKernels = 4;
auto kernels_ctor =
function_ctor.construct_kernels(kNumKernels + 2 * (num_variables - 1));
int kernel_index = 0;
std::vector<std::string> variable_handle_names;
variable_handle_names.reserve(num_variables);
for (int i = 0; i < num_variables; ++i) {
variable_handle_names.push_back(absl::StrCat("variable_handle", i));
std::string variable_handle_op_node_def =
absl::StrCat("var_handle_op_node_def", i);
std::string variable_handle_op_key = absl::StrCat("var_handle_op_key", i);
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign(
{regs.Def(variable_handle_names.back())});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
executeop_ctor.construct_last_uses(1).Assign({0});
kernel_index++;
}
}
{
std::vector<std::string> args;
args.reserve(3 + num_variables);
args.push_back("prefix_tensor");
args.push_back("name_tensor");
args.push_back("slice_tensor");
for (int i = 0; i < num_variables; ++i) {
args.push_back(variable_handle_names[i]);
}
auto restore_ctor = kernels_ctor.ConstructAt(kernel_index);
restore_ctor.set_code(kernels.Use("tf_mlrt.ifrt_restore_variable"));
restore_ctor.construct_arguments(args.size()).Assign(regs.Use(args));
restore_ctor.construct_results(0);
restore_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("restore_dtypes"),
attributes.GetHandle("truncate_in_cast")});
kernel_index++;
}
{
auto return_ctor = kernels_ctor.ConstructAt(kernel_index);
return_ctor.set_code(kernels.Use("return"));
return_ctor.construct_arguments(0);
kernel_index++;
}
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
mlrt::bc::Buffer CreateExecutableForIfrtLoadVariableOp(
bool redundant_ifrt_load_variable_op = false, bool used_by_host = false) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_load_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
mlrt::testing::AttributeTable attributes(
executable_ctor.construct_attributes(3));
attributes.Add("var_handle_op_node_def",
absl::Substitute(
R"pb(name: "VarHandleOp"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$0" }
}
attr {
key: "shared_name"
value { s: "$1" }
}
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
)pb",
kContainer, kSharedName));
attributes.Add("var_handle_op_key", 0);
attributes.Add("used_by_host", used_by_host);
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_output_regs(2).Assign(
{regs.Def("output_tensor"), regs.Def("output_future")});
const int kNumKernels = 4 + (redundant_ifrt_load_variable_op ? 1 : 0);
auto kernels_ctor = function_ctor.construct_kernels(kNumKernels);
int kernel_index = 0;
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign({regs.Def("variable_handle")});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_last_uses(1).Assign(
{redundant_ifrt_load_variable_op ? 0 : 1});
kernel_index++;
}
if (redundant_ifrt_load_variable_op) {
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Def("dummy"), regs.Def("dummy_future2")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_last_uses(1).Assign({1});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_index++;
}
DCHECK_EQ(kernel_index, kNumKernels);
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
class KernelTest : public ::testing::Test {
protected:
void SetUp() override {
mlrt::RegisterBuiltinKernels(registry_);
RegisterTfMlrtKernels(registry_);
execution_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
restore_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options_, fdef_lib_));
runner_ = [](const std::function<void()>& f) { f(); };
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner_, &fallback_state_->device_manager(), 0,
&runner_table_, &resource_array_,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
TF_ASSERT_OK_AND_ASSIGN(client_, xla::ifrt::test_util::GetClient());
resource_context_
.CreateResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext", client_, ifrt_core_selector_.get(),
&GetThreadPool(), nullptr);
tf_context_ = std::make_unique<Context>(fallback_request_state_.get(),
&resource_context_);
ifrt_model_context_ =
resource_context_
.GetResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext")
.value();
ifrt_model_context_->set_checkpoint_loader_queue(restore_work_queue_.get());
resource_context_
.CreateResource<tensorflow::ifrt_serving::IfrtModelRestoreContext>(
ifrt_serving::kIfrtModelRestoreContextName,
std::make_unique<tensorflow::ifrt_serving::CheckpointLoader>(
&ifrt_model_context_->GetRestoreTensorRegistry(),
ifrt_model_context_->checkpoint_loader_queue()));
serving_device_selector_ =
std::make_unique<tsl::test_util::MockServingDeviceSelector>();
ifrt_core_selector_ =
std::make_unique<ifrt_serving::IfrtServingCoreSelector>(
serving_device_selector_.get(),
client_->addressable_device_count());
}
std::unique_ptr<tsl::test_util::MockServingDeviceSelector>
serving_device_selector_;
std::unique_ptr<ifrt_serving::IfrtServingCoreSelector> ifrt_core_selector_;
mlrt::KernelRegistry registry_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> execution_work_queue_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> restore_work_queue_;
tensorflow::SessionOptions session_options_;
tensorflow::FunctionDefLibrary fdef_lib_;
std::function<void(std::function<void()>)> runner_;
tfrt_stub::OpKernelRunnerTable runner_table_;
tfd::FallbackResourceArray resource_array_;
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
tfrt::ResourceContext resource_context_;
std::shared_ptr<xla::ifrt::Client> client_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<Context> tf_context_;
tensorflow::ifrt_serving::IfrtModelContext* ifrt_model_context_;
};
TEST_F(KernelTest, IfrtLoadVariableOpCanGetTensorFromResourceManager) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
false, true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
tsl::core::RefCountPtr<Var> variable(new Var(DT_INT32));
*variable->tensor() = input_tensor;
variable->is_initialized = true;
ASSERT_OK(
fallback_state_->device_manager().HostCPU()->resource_manager()->Create(
std::string(kContainer), std::string(kSharedName), &(*variable)));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(input_tensor));
}
TEST_F(KernelTest, IfrtLoadVariableOp) {
auto buffer = CreateExecutableForIfrtLoadVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, DuplicateIfrtLoadVariableOpShallSucceed) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, IfrtRestoreVariableOp) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
auto buffer = CreateExecutableForIfrtRestoreVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>({tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOp4Variables) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future1 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 1));
absl::StatusOr<tensorflow::Tensor> restored_tensor1 =
restored_future1.Await();
TF_ASSERT_OK(restored_tensor1.status());
EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor<int16_t>({4, 5, 6}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future2 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 2));
absl::StatusOr<tensorflow::Tensor> restored_tensor2 =
restored_future2.Await();
TF_ASSERT_OK(restored_tensor2.status());
EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor<int16_t>({7, 8, 9}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future3 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 3));
absl::StatusOr<tensorflow::Tensor> restored_tensor3 =
restored_future3.Await();
TF_ASSERT_OK(restored_tensor3.status());
EXPECT_THAT(*restored_tensor3,
TensorEq(AsTensor<int16_t>({10, 11, 12}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOpInValidInput) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
EXPECT_THAT(execution_context.status(),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a0ec33-fc7c-48d1-bb5c-a573c76e4ae8 | cpp | tensorflow/tensorflow | run_handler_concurrent_work_queue | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.cc | tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue_test.cc | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <ostream>
#include <utility>
#include "absl/strings/str_join.h"
#include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
namespace tfrt {
namespace tf {
RunHandlerThreadWorkQueue::RunHandlerThreadWorkQueue(const Options& options)
: options_(options),
quiescing_state_(std::make_unique<::tfrt::internal::QuiescingState>()),
non_blocking_work_queue_(quiescing_state_.get(),
1),
blocking_work_queue_(quiescing_state_.get(),
1) {
CHECK(options.num_threads_in_sub_thread_pool.size() ==
options.num_sub_thread_pool);
CHECK(options.sub_thread_request_percentage.size() ==
options.num_sub_thread_pool);
RunHandlerPool::Options pool_options;
pool_options.num_inter_op_threads = options.num_main_threads;
pool_options.num_intra_op_threads = options.num_complementary_threads;
pool_options.max_concurrent_handler = options.max_concurrent_handler;
pool_options.blocking_threads_max_sleep_time_micro_sec =
options.blocking_threads_max_sleep_time_micro_sec;
pool_options.non_blocking_threads_sleep_time_micro_sec =
options.non_blocking_threads_sleep_time_micro_sec;
pool_options.num_sub_thread_pool = options.num_sub_thread_pool;
pool_options.num_threads_in_sub_thread_pool =
options.num_threads_in_sub_thread_pool;
pool_options.sub_thread_request_percentage =
options.sub_thread_request_percentage;
pool_options.enable_wake_up = options.enable_wake_up;
pool_options.wait_if_no_active_request = options.wait_if_no_active_request;
pool_options.use_adaptive_waiting_time = options.use_adaptive_waiting_time;
handler_pool_ = std::make_unique<RunHandlerPool>(pool_options);
}
absl::StatusOr<std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface>>
RunHandlerThreadWorkQueue::InitializeRequest(int64_t request_id) const {
RunHandlerOptions options;
std::unique_ptr<RunHandler> handler =
handler_pool_->Get(request_id, options_.init_timeout_ms, options);
if (!handler) {
return tensorflow::errors::Internal(absl::StrCat(
"Could not obtain RunHandler for request after waiting for ",
options_.init_timeout_ms, " ms."));
}
return {std::make_unique<RunHandlerWorkQueue>(std::move(handler))};
}
void RunHandlerThreadWorkQueue::AddTask(TaskFunction work) {
non_blocking_work_queue_.AddTask(std::move(work));
}
std::optional<TaskFunction> RunHandlerThreadWorkQueue::AddBlockingTask(
TaskFunction work, bool allow_queuing) {
if (allow_queuing) {
return blocking_work_queue_.EnqueueBlockingTask(std::move(work));
} else {
return blocking_work_queue_.RunBlockingTask(std::move(work));
}
return std::nullopt;
}
void RunHandlerThreadWorkQueue::Quiesce() {
handler_pool_->Quiesce();
non_blocking_work_queue_.Quiesce();
blocking_work_queue_.Quiesce();
}
void RunHandlerThreadWorkQueue::Await(
ArrayRef<RCReference<AsyncValue>> values) {
tfrt::Await(values);
}
bool RunHandlerThreadWorkQueue::IsInWorkerThread() const {
return true;
}
std::ostream& operator<<(std::ostream& strm,
const RunHandlerThreadWorkQueue::Options& options) {
return strm << "{"
<< "num_main_threads = " << options.num_main_threads
<< ", num_complementary_threads = "
<< options.num_complementary_threads
<< ", init_timeout_ms = " << options.init_timeout_ms
<< ", max_concurrent_handler = " << options.max_concurrent_handler
<< ", num_sub_thread_pool = " << options.num_sub_thread_pool
<< ", num_threads_in_sub_thread_pool = ["
<< absl::StrJoin(options.num_threads_in_sub_thread_pool, ",")
<< "]"
<< ", sub_thread_request_percentage = ["
<< absl::StrJoin(options.sub_thread_request_percentage, ",")
<< "]"
<< ", non_blocking_threads_sleep_time_micro_sec = "
<< options.non_blocking_threads_sleep_time_micro_sec
<< ", blocking_threads_max_sleep_time_micro_sec = "
<< options.blocking_threads_max_sleep_time_micro_sec
<< ", use_adaptive_waiting_time = "
<< options.use_adaptive_waiting_time
<< ", wait_if_no_active_request = "
<< options.wait_if_no_active_request
<< ", enable_wake_up = " << options.enable_wake_up << "}";
}
}
} | #include "tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.h"
#include <cstdio>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/mutex.h"
namespace tfrt {
namespace tf {
namespace {
const int kNumMainThreads = 1;
const int kNumComplementaryThreads = 1;
class RunHandlerThreadWorkQueueTest : public ::testing::Test {
protected:
void SetUp() override {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = kNumComplementaryThreads;
options.num_main_threads = kNumMainThreads;
options.init_timeout_ms = 100;
pool_ = std::make_unique<RunHandlerThreadWorkQueue>(options);
auto decoded_diagnostic_handler = [&](const DecodedDiagnostic& diag) {};
std::unique_ptr<ConcurrentWorkQueue> work_queue =
CreateSingleThreadedWorkQueue();
std::unique_ptr<HostAllocator> host_allocator = CreateMallocAllocator();
host_ = std::make_unique<HostContext>(decoded_diagnostic_handler,
std::move(host_allocator),
std::move(work_queue));
RequestContextBuilder req_ctx_builder{host_.get(),
nullptr};
auto queue = pool_->InitializeRequest(100);
TF_CHECK_OK(queue.status());
queue_ = std::move(*queue);
auto req_ctx = std::move(req_ctx_builder).build();
ASSERT_TRUE(static_cast<bool>(req_ctx));
exec_ctx_ = std::make_unique<ExecutionContext>(std::move(*req_ctx));
}
std::unique_ptr<RunHandlerThreadWorkQueue> pool_;
std::unique_ptr<tensorflow::tfrt_stub::WorkQueueInterface> queue_;
std::unique_ptr<HostContext> host_;
std::unique_ptr<ExecutionContext> exec_ctx_;
};
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true);
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningBlockingTaskNoQueueing) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
false));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningNonBlockingTaskWithNoExecCtx) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
pool_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
}
pool_->Quiesce();
EXPECT_EQ(n, 10);
}
TEST_F(RunHandlerThreadWorkQueueTest, RunningMixedTask) {
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
queue_->AddTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}));
ASSERT_FALSE(pool_->AddBlockingTask(TaskFunction([&n, &m] {
tensorflow::mutex_lock lock(m);
++n;
}),
true));
}
pool_->Quiesce();
EXPECT_EQ(n, 20);
}
TEST_F(RunHandlerThreadWorkQueueTest, NameReturnsValidString) {
EXPECT_TRUE(absl::StrContains(pool_->name(), "RunHandlerThreadWorkQueue"));
}
TEST_F(RunHandlerThreadWorkQueueTest, GetParallelismLevelOk) {
EXPECT_EQ(pool_->GetParallelismLevel(),
kNumComplementaryThreads + kNumMainThreads);
}
TEST_F(RunHandlerThreadWorkQueueTest, IsWorkerThreadOk) {
EXPECT_TRUE(pool_->IsInWorkerThread());
}
TEST_F(RunHandlerThreadWorkQueueTest, NoHandlerReturnsError) {
RunHandlerThreadWorkQueue::Options options;
options.num_complementary_threads = 0;
options.num_main_threads = 0;
options.init_timeout_ms = 1;
options.max_concurrent_handler = 0;
auto queue = std::make_unique<RunHandlerThreadWorkQueue>(options);
tfrt::RequestContextBuilder ctx_builder(nullptr, nullptr);
EXPECT_THAT(
queue->InitializeRequest(100),
tensorflow::testing::StatusIs(
tensorflow::error::INTERNAL,
"Could not obtain RunHandler for request after waiting for 1 ms."));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/run_handler_thread_pool/run_handler_concurrent_work_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a3a494c-3b74-4871-a96d-6f77ba7803fe | cpp | tensorflow/tensorflow | mutable_graph_view | tensorflow/core/grappler/mutable_graph_view.cc | tensorflow/core/grappler/mutable_graph_view_test.cc | #include "tensorflow/core/grappler/mutable_graph_view.h"
#include <algorithm>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace {
bool IsTensorIdPortValid(const TensorId& tensor_id) {
return tensor_id.index() >= Graph::kControlSlot;
}
bool IsTensorIdRegular(const TensorId& tensor_id) {
return tensor_id.index() > Graph::kControlSlot;
}
bool IsTensorIdControlling(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
bool IsOutputPortControlling(const MutableGraphView::OutputPort& port) {
return port.port_id == Graph::kControlSlot;
}
bool IsIdentityConsumingSwitch(const MutableGraphView& graph,
const NodeDef& node) {
if ((IsIdentity(node) || IsIdentityNSingleInput(node)) &&
node.input_size() > 0) {
TensorId tensor_id = ParseTensorName(node.input(0));
if (IsTensorIdControlling(tensor_id)) {
return false;
}
NodeDef* input_node = graph.GetNode(tensor_id.node());
if (input_node == nullptr) {
return false;
}
return IsSwitch(*input_node);
}
return false;
}
bool CanDedupControlWithRegularInput(const MutableGraphView& graph,
const NodeDef& control_node) {
return !IsIdentityConsumingSwitch(graph, control_node);
}
bool CanDedupControlWithRegularInput(const MutableGraphView& graph,
absl::string_view control_node_name) {
NodeDef* control_node = graph.GetNode(control_node_name);
if (control_node == nullptr) {
return false;
}
return CanDedupControlWithRegularInput(graph, *control_node);
}
bool HasRegularFaninNode(const MutableGraphView& graph, const NodeDef& node,
absl::string_view fanin_node_name) {
const int num_regular_fanins =
graph.NumFanins(node, false);
for (int i = 0; i < num_regular_fanins; ++i) {
if (ParseTensorName(node.input(i)).node() == fanin_node_name) {
return true;
}
}
return false;
}
using FanoutsMap =
absl::flat_hash_map<MutableGraphView::OutputPort,
absl::flat_hash_set<MutableGraphView::InputPort>>;
void SwapControlledFanoutInputs(const MutableGraphView& graph,
const FanoutsMap::iterator& control_fanouts,
absl::string_view to_node_name) {
absl::string_view from_node_name(control_fanouts->first.node->name());
string control = TensorIdToString({to_node_name, Graph::kControlSlot});
for (const auto& control_fanout : control_fanouts->second) {
const int start = graph.NumFanins(*control_fanout.node,
false);
for (int i = start; i < control_fanout.node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(control_fanout.node->input(i));
if (tensor_id.node() == from_node_name) {
control_fanout.node->set_input(i, control);
break;
}
}
}
}
void SwapRegularFanoutInputs(FanoutsMap* fanouts, NodeDef* from_node,
absl::string_view to_node_name, int max_port) {
MutableGraphView::OutputPort port;
port.node = from_node;
for (int i = 0; i <= max_port; ++i) {
port.port_id = i;
auto it = fanouts->find(port);
if (it == fanouts->end()) {
continue;
}
string input = TensorIdToString({to_node_name, i});
for (const auto& fanout : it->second) {
fanout.node->set_input(fanout.port_id, input);
}
}
}
using MaxOutputPortsMap = absl::flat_hash_map<const NodeDef*, int>;
void SwapFanoutInputs(const MutableGraphView& graph, FanoutsMap* fanouts,
MaxOutputPortsMap* max_output_ports, NodeDef* from_node,
NodeDef* to_node) {
auto from_control_fanouts = fanouts->find({from_node, Graph::kControlSlot});
if (from_control_fanouts != fanouts->end()) {
SwapControlledFanoutInputs(graph, from_control_fanouts, to_node->name());
}
auto to_control_fanouts = fanouts->find({to_node, Graph::kControlSlot});
if (to_control_fanouts != fanouts->end()) {
SwapControlledFanoutInputs(graph, to_control_fanouts, from_node->name());
}
auto from_max_port = max_output_ports->find(from_node);
if (from_max_port != max_output_ports->end()) {
SwapRegularFanoutInputs(fanouts, from_node, to_node->name(),
from_max_port->second);
}
auto to_max_port = max_output_ports->find(to_node);
if (to_max_port != max_output_ports->end()) {
SwapRegularFanoutInputs(fanouts, to_node, from_node->name(),
to_max_port->second);
}
}
void SwapFanoutsMapValues(FanoutsMap* fanouts,
const MutableGraphView::OutputPort& from_port,
const FanoutsMap::iterator& from_fanouts,
const MutableGraphView::OutputPort& to_port,
const FanoutsMap::iterator& to_fanouts) {
const bool from_exists = from_fanouts != fanouts->end();
const bool to_exists = to_fanouts != fanouts->end();
if (from_exists && to_exists) {
std::swap(from_fanouts->second, to_fanouts->second);
} else if (from_exists) {
auto node = fanouts->extract(from_fanouts);
fanouts->emplace(to_port, std::move(node.mapped()));
} else if (to_exists) {
auto node = fanouts->extract(to_port);
fanouts->emplace(from_port, std::move(node.mapped()));
}
}
void SwapRegularFanoutsAndMaxPortValues(FanoutsMap* fanouts,
MaxOutputPortsMap* max_output_ports,
NodeDef* from_node, NodeDef* to_node) {
auto from_max_port = max_output_ports->find(from_node);
auto to_max_port = max_output_ports->find(to_node);
bool from_exists = from_max_port != max_output_ports->end();
bool to_exists = to_max_port != max_output_ports->end();
auto forward_fanouts = [fanouts](NodeDef* from, NodeDef* to, int start,
int end) {
for (int i = start; i <= end; ++i) {
MutableGraphView::OutputPort from_port(from, i);
auto node = fanouts->extract(from_port);
if (!node.empty()) {
MutableGraphView::OutputPort to_port(to, i);
fanouts->emplace(to_port, std::move(node.mapped()));
}
}
};
if (from_exists && to_exists) {
const int from = from_max_port->second;
const int to = to_max_port->second;
const int shared = std::min(from, to);
for (int i = 0; i <= shared; ++i) {
MutableGraphView::OutputPort from_port(from_node, i);
auto from_fanouts = fanouts->find(from_port);
MutableGraphView::OutputPort to_port(to_node, i);
auto to_fanouts = fanouts->find(to_port);
SwapFanoutsMapValues(fanouts, from_port, from_fanouts, to_port,
to_fanouts);
}
if (to > from) {
forward_fanouts(to_node, from_node, shared + 1, to);
} else if (from > to) {
forward_fanouts(from_node, to_node, shared + 1, from);
}
std::swap(from_max_port->second, to_max_port->second);
} else if (from_exists) {
forward_fanouts(from_node, to_node, 0, from_max_port->second);
max_output_ports->emplace(to_node, from_max_port->second);
max_output_ports->erase(from_node);
} else if (to_exists) {
forward_fanouts(to_node, from_node, 0, to_max_port->second);
max_output_ports->emplace(from_node, to_max_port->second);
max_output_ports->erase(to_node);
}
}
bool HasFanoutValue(const FanoutsMap& fanouts, const FanoutsMap::iterator& it) {
return it != fanouts.end() && !it->second.empty();
}
Status MutationError(absl::string_view function_name, absl::string_view params,
absl::string_view msg) {
return errors::InvalidArgument(absl::Substitute(
"MutableGraphView::$0($1) error: $2.", function_name, params, msg));
}
using ErrorHandler = std::function<Status(absl::string_view)>;
ErrorHandler UpdateFanoutsError(absl::string_view from_node_name,
absl::string_view to_node_name) {
return [from_node_name, to_node_name](absl::string_view msg) {
string params = absl::Substitute("from_node_name='$0', to_node_name='$1'",
from_node_name, to_node_name);
return MutationError("UpdateFanouts", params, msg);
};
}
Status CheckFaninIsRegular(const TensorId& fanin, ErrorHandler handler) {
if (!IsTensorIdRegular(fanin)) {
return handler(absl::Substitute("fanin '$0' must be a regular tensor id",
fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckFaninIsValid(const TensorId& fanin, ErrorHandler handler) {
if (!IsTensorIdPortValid(fanin)) {
return handler(absl::Substitute("fanin '$0' must be a valid tensor id",
fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckAddingFaninToSelf(absl::string_view node_name,
const TensorId& fanin, ErrorHandler handler) {
if (node_name == fanin.node()) {
return handler(
absl::Substitute("can't add fanin '$0' to self", fanin.ToString()));
}
return absl::OkStatus();
}
Status CheckRemovingFaninFromSelf(absl::string_view node_name,
const TensorId& fanin, ErrorHandler handler) {
if (node_name == fanin.node()) {
return handler(absl::Substitute("can't remove fanin '$0' from self",
fanin.ToString()));
}
return absl::OkStatus();
}
string NodeMissingErrorMsg(absl::string_view node_name) {
return absl::Substitute("node '$0' was not found", node_name);
}
Status CheckNodeExists(absl::string_view node_name, NodeDef* node,
ErrorHandler handler) {
if (node == nullptr) {
return handler(NodeMissingErrorMsg(node_name));
}
return absl::OkStatus();
}
Status CheckPortRange(int port, int min, int max, ErrorHandler handler) {
if (port < min || port > max) {
if (max < min) {
return handler("no available ports as node has no regular fanins");
}
return handler(
absl::Substitute("port must be in range [$0, $1]", min, max));
}
return absl::OkStatus();
}
string SwapNodeNamesSwitchControlErrorMsg(absl::string_view node_name) {
return absl::Substitute(
"can't swap node name '$0' as it will become a Switch control dependency",
node_name);
}
string GeneratedNameForIdentityConsumingSwitch(
const MutableGraphView::OutputPort& fanin) {
return AddPrefixToNodeName(
absl::StrCat(fanin.node->name(), "_", fanin.port_id),
kMutableGraphViewCtrl);
}
string PrintInTextFormat(const protobuf::MessageLite& message) {
return message.ShortDebugString();
}
string PrintInTextFormat(const protobuf::Message& message) {
string message_text;
::tensorflow::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
printer.PrintToString(message, &message_text);
if (!message_text.empty() && message_text[message_text.size() - 1] == ' ') {
message_text.resize(message_text.size() - 1);
}
return message_text;
}
}
void MutableGraphView::AddAndDedupFanouts(NodeDef* node) {
absl::flat_hash_set<absl::string_view> fanins;
absl::flat_hash_set<absl::string_view> controlling_fanins;
int max_input_port = -1;
int pos = 0;
const int last_idx = node->input_size() - 1;
int last_pos = last_idx;
while (pos <= last_pos) {
TensorId tensor_id = ParseTensorName(node->input(pos));
absl::string_view input_node_name = tensor_id.node();
bool is_control_input = IsTensorIdControlling(tensor_id);
bool can_dedup_control_with_regular_input =
CanDedupControlWithRegularInput(*this, input_node_name);
bool can_dedup_control =
is_control_input && (can_dedup_control_with_regular_input ||
controlling_fanins.contains(input_node_name));
if (!gtl::InsertIfNotPresent(&fanins, input_node_name) &&
can_dedup_control) {
node->mutable_input()->SwapElements(pos, last_pos);
--last_pos;
} else {
OutputPort output(nodes()[input_node_name], tensor_id.index());
if (is_control_input) {
fanouts()[output].emplace(node, Graph::kControlSlot);
} else {
max_input_port = pos;
int& max_port = max_regular_output_port()[output.node];
max_port = std::max(max_port, output.port_id);
fanouts()[output].emplace(node, pos);
}
++pos;
}
if (is_control_input) {
controlling_fanins.insert(input_node_name);
}
}
if (last_pos < last_idx) {
node->mutable_input()->DeleteSubrange(last_pos + 1, last_idx - last_pos);
}
if (max_input_port > -1) {
max_regular_input_port()[node] = max_input_port;
}
}
void MutableGraphView::UpdateMaxRegularOutputPortForRemovedFanin(
const OutputPort& fanin,
const absl::flat_hash_set<InputPort>& fanin_fanouts) {
int max_port = max_regular_output_port()[fanin.node];
if (!fanin_fanouts.empty() || max_port != fanin.port_id) {
return;
}
bool updated_max_port = false;
for (int i = fanin.port_id - 1; i >= 0; --i) {
OutputPort fanin_port(fanin.node, i);
if (!fanouts()[fanin_port].empty()) {
max_regular_output_port()[fanin.node] = i;
updated_max_port = true;
break;
}
}
if (!updated_max_port) {
max_regular_output_port().erase(fanin.node);
}
}
void MutableGraphView::UpdateMaxRegularOutputPortForAddedFanin(
const OutputPort& fanin) {
if (max_regular_output_port()[fanin.node] < fanin.port_id) {
max_regular_output_port()[fanin.node] = fanin.port_id;
}
}
const absl::flat_hash_set<MutableGraphView::InputPort>&
MutableGraphView::GetFanout(const GraphView::OutputPort& port) const {
return GetFanout(MutableGraphView::OutputPort(const_cast<NodeDef*>(port.node),
port.port_id));
}
absl::flat_hash_set<MutableGraphView::OutputPort> MutableGraphView::GetFanin(
const GraphView::InputPort& port) const {
return GetFanin(MutableGraphView::InputPort(const_cast<NodeDef*>(port.node),
port.port_id));
}
const MutableGraphView::OutputPort MutableGraphView::GetRegularFanin(
const GraphView::InputPort& port) const {
return GetRegularFanin(MutableGraphView::InputPort(
const_cast<NodeDef*>(port.node), port.port_id));
}
NodeDef* MutableGraphView::AddNode(NodeDef&& node) {
auto* node_in_graph = graph()->add_node();
*node_in_graph = std::move(node);
AddUniqueNodeOrDie(node_in_graph);
AddAndDedupFanouts(node_in_graph);
return node_in_graph;
}
Status MutableGraphView::AddSubgraph(GraphDef&& subgraph) {
const int function_size = subgraph.library().function_size();
if (function_size > 0) {
absl::flat_hash_map<absl::string_view, const FunctionDef*> graph_fdefs;
for (const FunctionDef& fdef : graph()->library().function()) {
graph_fdefs.emplace(fdef.signature().name(), &fdef);
}
for (FunctionDef& fdef : *subgraph.mutable_library()->mutable_function()) {
const auto graph_fdef = graph_fdefs.find(fdef.signature().name());
if (graph_fdef == graph_fdefs.end()) {
VLOG(3) << "Add new function definition: " << fdef.signature().name();
graph()->mutable_library()->add_function()->Swap(&fdef);
} else {
if (!FunctionDefsEqual(fdef, *graph_fdef->second)) {
return MutationError(
"AddSubgraph",
absl::Substitute("function_size=$0", function_size),
absl::StrCat(
"Found different function definition with the same name: ",
fdef.signature().name()));
}
}
}
}
int node_size_before = graph()->node_size();
for (NodeDef& node : *subgraph.mutable_node()) {
auto* node_in_graph = graph()->add_node();
node_in_graph->Swap(&node);
TF_RETURN_IF_ERROR(AddUniqueNode(node_in_graph));
}
for (int i = node_size_before; i < graph()->node_size(); ++i) {
NodeDef* node = graph()->mutable_node(i);
AddAndDedupFanouts(node);
}
return absl::OkStatus();
}
Status MutableGraphView::UpdateNode(
absl::string_view node_name, absl::string_view op, absl::string_view device,
absl::Span<const std::pair<string, AttrValue>> attrs) {
auto error_status = [node_name, op, device, attrs](absl::string_view msg) {
std::vector<string> attr_strs;
attr_strs.reserve(attrs.size());
for (const auto& attr : attrs) {
string attr_str = absl::Substitute("('$0', $1)", attr.first,
PrintInTextFormat(attr.second));
attr_strs.push_back(attr_str);
}
string params =
absl::Substitute("node_name='$0', op='$1', device='$2', attrs={$3}",
node_name, op, device, absl::StrJoin(attr_strs, ", "));
return MutationError("UpdateNodeOp", params, msg);
};
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
MutableGraphView::OutputPort control_port(node, Graph::kControlSlot);
auto control_fanouts = GetFanout(control_port);
if (op == "Switch" && !control_fanouts.empty()) {
return error_status(
"can't change node op to Switch when node drives a control dependency "
"(alternatively, we could add the identity node needed, but it seems "
"like an unlikely event and probably a mistake)");
}
if (node->device() != device) {
node->set_device(string(device));
}
node->mutable_attr()->clear();
for (const auto& attr : attrs) {
(*node->mutable_attr())[attr.first] = attr.second;
}
if (node->op() == op) {
return absl::OkStatus();
}
node->set_op(string(op));
if (CanDedupControlWithRegularInput(*this, *node)) {
for (const auto& control_fanout : control_fanouts) {
if (HasRegularFaninNode(*this, *control_fanout.node, node->name())) {
RemoveControllingFaninInternal(control_fanout.node, node);
}
}
}
return absl::OkStatus();
}
Status MutableGraphView::UpdateNodeName(absl::string_view from_node_name,
absl::string_view to_node_name,
bool update_fanouts) {
auto error_status = [from_node_name, to_node_name,
update_fanouts](absl::string_view msg) {
string params = absl::Substitute(
"from_node_name='$0', to_node_name='$1', update_fanouts=$2",
from_node_name, to_node_name, update_fanouts);
return MutationError("UpdateNodeName", params, msg);
};
NodeDef* node = GetNode(from_node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(from_node_name, node, error_status));
if (node->name() == to_node_name) {
return absl::OkStatus();
}
if (HasNode(to_node_name)) {
return error_status(
"can't update node name because new node name is in use");
}
auto max_output_port = max_regular_output_port().find(node);
const bool has_max_output_port =
max_output_port != max_regular_output_port().end();
auto control_fanouts = fanouts().find({node, Graph::kControlSlot});
if (update_fanouts) {
SwapControlledFanoutInputs(*this, control_fanouts, to_node_name);
if (has_max_output_port) {
SwapRegularFanoutInputs(&fanouts(), node, to_node_name,
max_output_port->second);
}
} else if (has_max_output_port ||
HasFanoutValue(fanouts(), control_fanouts)) {
return error_status("can't update node name because node has fanouts");
}
nodes().erase(node->name());
node->set_name(string(to_node_name));
nodes().emplace(node->name(), node);
return absl::OkStatus();
}
Status MutableGraphView::SwapNodeNames(absl::string_view from_node_name,
absl::string_view to_node_name,
bool update_fanouts) {
auto error_status = [from_node_name, to_node_name,
update_fanouts](absl::string_view msg) {
string params = absl::Substitute(
"from_node_name='$0', to_node_name='$1', update_fanouts=$2",
from_node_name, to_node_name, update_fanouts);
return MutationError("SwapNodeNames", params, msg);
};
NodeDef* from_node = GetNode(from_node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(from_node_name, from_node, error_status));
if (from_node_name == to_node_name) {
return absl::OkStatus();
}
NodeDef* to_node = GetNode(to_node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(to_node_name, to_node, error_status));
auto swap_names = [this, from_node, to_node]() {
nodes().erase(from_node->name());
nodes().erase(to_node->name());
std::swap(*from_node->mutable_name(), *to_node->mutable_name());
nodes().emplace(from_node->name(), from_node);
nodes().emplace(to_node->name(), to_node);
};
if (update_fanouts) {
SwapFanoutInputs(*this, &fanouts(), &max_regular_output_port(), from_node,
to_node);
swap_names();
return absl::OkStatus();
}
bool from_is_switch = IsSwitch(*from_node);
MutableGraphView::OutputPort to_control(to_node, Graph::kControlSlot);
auto to_control_fanouts = fanouts().find(to_control);
if (from_is_switch && HasFanoutValue(fanouts(), to_control_fanouts)) {
return error_status(SwapNodeNamesSwitchControlErrorMsg(from_node_name));
}
bool to_is_switch = IsSwitch(*to_node);
MutableGraphView::OutputPort from_control(from_node, Graph::kControlSlot);
auto from_control_fanouts = fanouts().find(from_control);
if (to_is_switch && HasFanoutValue(fanouts(), from_control_fanouts)) {
return error_status(SwapNodeNamesSwitchControlErrorMsg(to_node_name));
}
swap_names();
SwapFanoutsMapValues(&fanouts(), from_control, from_control_fanouts,
to_control, to_control_fanouts);
SwapRegularFanoutsAndMaxPortValues(&fanouts(), &max_regular_output_port(),
from_node, to_node);
auto update_fanins = [this](NodeDef* node, absl::string_view old_node_name) {
for (int i = 0; i < node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
if (tensor_id.node() == node->name()) {
const int idx = tensor_id.index();
const int node_idx =
IsTensorIdControlling(tensor_id) ? Graph::kControlSlot : i;
MutableGraphView::OutputPort from_fanin(node, idx);
absl::flat_hash_set<InputPort>* from_fanouts = &fanouts()[from_fanin];
from_fanouts->erase({node, node_idx});
UpdateMaxRegularOutputPortForRemovedFanin(from_fanin, *from_fanouts);
MutableGraphView::OutputPort to_fanin(nodes().at(old_node_name), idx);
fanouts()[to_fanin].insert({node, node_idx});
UpdateMaxRegularOutputPortForAddedFanin(to_fanin);
node->set_input(i, TensorIdToString({old_node_name, idx}));
}
}
};
update_fanins(from_node, to_node->name());
update_fanins(to_node, from_node->name());
auto dedup_control_fanouts =
[this](NodeDef* node, const FanoutsMap::iterator& control_fanouts) {
if (CanDedupControlWithRegularInput(*this, *node) &&
control_fanouts != fanouts().end()) {
for (auto it = control_fanouts->second.begin();
it != control_fanouts->second.end();) {
const auto& control_fanout = *it++;
if (HasRegularFaninNode(*this, *control_fanout.node,
node->name())) {
RemoveControllingFaninInternal(control_fanout.node, node);
}
}
}
};
auto dedup_switch_control = [this, dedup_control_fanouts](NodeDef* node) {
OutputPort port;
port.node = node;
const int max_port =
gtl::FindWithDefault(max_regular_output_port(), node, -1);
for (int i = 0; i <= max_port; ++i) {
port.port_id = i;
auto it = fanouts().find(port);
if (it == fanouts().end()) {
continue;
}
for (const auto& fanout : it->second) {
auto fanout_controls =
fanouts().find({fanout.node, Graph::kControlSlot});
dedup_control_fanouts(fanout.node, fanout_controls);
}
}
};
if (!from_is_switch) {
if (to_is_switch) {
dedup_switch_control(from_node);
} else {
auto from_control_fanouts = fanouts().find(from_control);
dedup_control_fanouts(from_node, from_control_fanouts);
}
}
if (!to_is_switch) {
if (from_is_switch) {
dedup_switch_control(to_node);
} else {
auto to_control_fanouts = fanouts().find(to_control);
dedup_control_fanouts(to_node, to_control_fanouts);
}
}
return absl::OkStatus();
}
Status MutableGraphView::UpdateFanouts(absl::string_view from_node_name,
absl::string_view to_node_name) {
NodeDef* from_node = GetNode(from_node_name);
TF_RETURN_IF_ERROR(
CheckNodeExists(from_node_name, from_node,
UpdateFanoutsError(from_node_name, to_node_name)));
NodeDef* to_node = GetNode(to_node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(
to_node_name, to_node, UpdateFanoutsError(from_node_name, to_node_name)));
return UpdateFanoutsInternal(from_node, to_node);
}
Status MutableGraphView::UpdateFanoutsInternal(NodeDef* from_node,
NodeDef* to_node) {
VLOG(2) << absl::Substitute("Update fanouts from '$0' to '$1'.",
from_node->name(), to_node->name());
if (from_node == to_node) {
return absl::OkStatus();
}
const auto add_edge = [this](const OutputPort& output_port,
const InputPort& input_port) {
fanouts()[output_port].insert(input_port);
};
const auto remove_edge = [this](const OutputPort& output_port,
const InputPort& input_port) {
fanouts()[output_port].erase(input_port);
};
auto control_fanouts =
GetFanout(GraphView::OutputPort(from_node, Graph::kControlSlot));
bool to_node_is_switch = IsSwitch(*to_node);
for (const InputPort& control_port : control_fanouts) {
if (control_port.node == to_node) continue;
if (to_node_is_switch) {
return UpdateFanoutsError(from_node->name(), to_node->name())(
absl::Substitute("can't update fanouts to node '$0' as it will "
"become a Switch control dependency",
to_node->name()));
}
NodeDef* node = control_port.node;
RemoveControllingFaninInternal(node, from_node);
AddFaninInternal(node, {to_node, Graph::kControlSlot});
}
auto regular_edges =
GetFanoutEdges(*from_node, false);
int keep_max_regular_output_port = -1;
for (const Edge& edge : regular_edges) {
const OutputPort output_port = edge.src;
const InputPort input_port = edge.dst;
if (input_port.node == to_node) {
keep_max_regular_output_port =
std::max(keep_max_regular_output_port, output_port.port_id);
continue;
}
input_port.node->set_input(
input_port.port_id,
TensorIdToString({to_node->name(), output_port.port_id}));
remove_edge(output_port, input_port);
add_edge(OutputPort(to_node, output_port.port_id), input_port);
if (CanDedupControlWithRegularInput(*this, *to_node)) {
RemoveControllingFaninInternal(input_port.node, to_node);
}
}
max_regular_output_port()[to_node] = max_regular_output_port()[from_node];
if (keep_max_regular_output_port >= 0) {
max_regular_output_port()[from_node] = keep_max_regular_output_port;
} else {
max_regular_output_port().erase(from_node);
}
return absl::OkStatus();
}
bool MutableGraphView::AddFaninInternal(NodeDef* node,
const OutputPort& fanin) {
int num_regular_fanins =
NumFanins(*node, false);
bool input_is_control = IsOutputPortControlling(fanin);
bool can_dedup_control_with_regular_input =
CanDedupControlWithRegularInput(*this, *fanin.node);
if (input_is_control) {
const int start =
can_dedup_control_with_regular_input ? 0 : num_regular_fanins;
for (int i = start; i < node->input_size(); ++i) {
if (ParseTensorName(node->input(i)).node() == fanin.node->name()) {
return false;
}
}
}
InputPort input;
input.node = node;
input.port_id = input_is_control ? Graph::kControlSlot : num_regular_fanins;
node->add_input(TensorIdToString({fanin.node->name(), fanin.port_id}));
if (!input_is_control) {
const int last_node_input = node->input_size() - 1;
if (num_regular_fanins < last_node_input) {
node->mutable_input()->SwapElements(last_node_input, num_regular_fanins);
}
}
fanouts()[fanin].insert(input);
if (max_regular_output_port()[fanin.node] < fanin.port_id) {
max_regular_output_port()[fanin.node] = fanin.port_id;
}
if (!input_is_control) {
max_regular_input_port()[node] = num_regular_fanins;
if (can_dedup_control_with_regular_input) {
RemoveControllingFaninInternal(node, fanin.node);
}
}
return true;
}
Status MutableGraphView::AddRegularFanin(absl::string_view node_name,
const TensorId& fanin) {
auto error_status = [node_name, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', fanin='$1'", node_name,
fanin.ToString());
return MutationError("AddRegularFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsRegular(fanin, error_status));
TF_RETURN_IF_ERROR(CheckAddingFaninToSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
AddFaninInternal(node, {fanin_node, fanin.index()});
return absl::OkStatus();
}
Status MutableGraphView::AddRegularFaninByPort(absl::string_view node_name,
int port,
const TensorId& fanin) {
auto error_status = [node_name, port, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', port=$1, fanin='$2'",
node_name, port, fanin.ToString());
return MutationError("AddRegularFaninByPort", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsRegular(fanin, error_status));
TF_RETURN_IF_ERROR(CheckAddingFaninToSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
const int num_regular_fanins =
NumFanins(*node, false);
TF_RETURN_IF_ERROR(
CheckPortRange(port, 0, num_regular_fanins, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
const int last_node_input = node->input_size();
node->add_input(TensorIdToString(fanin));
node->mutable_input()->SwapElements(num_regular_fanins, last_node_input);
for (int i = num_regular_fanins - 1; i >= port; --i) {
TensorId tensor_id = ParseTensorName(node->input(i));
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
absl::flat_hash_set<InputPort>* fanouts_set = &fanouts()[fanin_port];
fanouts_set->erase({node, i});
fanouts_set->insert({node, i + 1});
node->mutable_input()->SwapElements(i, i + 1);
}
OutputPort fanin_port(fanin_node, fanin.index());
fanouts()[fanin_port].insert({node, port});
UpdateMaxRegularOutputPortForAddedFanin(fanin_port);
max_regular_input_port()[node] = num_regular_fanins;
if (CanDedupControlWithRegularInput(*this, *fanin_node)) {
RemoveControllingFaninInternal(node, fanin_node);
}
return absl::OkStatus();
}
NodeDef* MutableGraphView::GetControllingFaninToAdd(absl::string_view node_name,
const OutputPort& fanin,
string* error_msg) {
if (!IsSwitch(*fanin.node)) {
return fanin.node;
} else {
if (IsOutputPortControlling(fanin)) {
TensorId tensor_id(fanin.node->name(), fanin.port_id);
*error_msg = absl::Substitute(
"can't add fanin '$0' as it will become a Switch control dependency",
tensor_id.ToString());
return nullptr;
}
for (const auto& fanout : GetFanout(fanin)) {
if (IsIdentity(*fanout.node) || IsIdentityNSingleInput(*fanout.node)) {
if (fanout.node->name() == node_name) {
*error_msg =
absl::Substitute("can't add found fanin '$0' to self",
AsControlDependency(fanout.node->name()));
return nullptr;
}
return fanout.node;
}
}
if (GeneratedNameForIdentityConsumingSwitch(fanin) == node_name) {
*error_msg = absl::Substitute("can't add generated fanin '$0' to self",
AsControlDependency(string(node_name)));
}
}
return nullptr;
}
NodeDef* MutableGraphView::GetOrCreateIdentityConsumingSwitch(
const OutputPort& fanin) {
string identity_name = GeneratedNameForIdentityConsumingSwitch(fanin);
NodeDef* identity_node = GetNode(identity_name);
if (identity_node == nullptr) {
NodeDef new_node;
new_node.set_name(identity_name);
new_node.set_op("Identity");
new_node.set_device(fanin.node->device());
(*new_node.mutable_attr())["T"].set_type(fanin.node->attr().at("T").type());
new_node.add_input(TensorIdToString({fanin.node->name(), fanin.port_id}));
identity_node = AddNode(std::move(new_node));
}
return identity_node;
}
Status MutableGraphView::AddControllingFanin(absl::string_view node_name,
const TensorId& fanin) {
auto error_status = [node_name, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', fanin='$1'", node_name,
fanin.ToString());
return MutationError("AddControllingFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsValid(fanin, error_status));
TF_RETURN_IF_ERROR(CheckAddingFaninToSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
OutputPort fanin_port(fanin_node, fanin.index());
string error_msg = "";
NodeDef* control_node = GetControllingFaninToAdd(
node_name, {fanin_node, fanin.index()}, &error_msg);
if (!error_msg.empty()) {
return error_status(error_msg);
}
if (control_node == nullptr) {
control_node = GetOrCreateIdentityConsumingSwitch(fanin_port);
}
AddFaninInternal(node, {control_node, Graph::kControlSlot});
return absl::OkStatus();
}
bool MutableGraphView::RemoveRegularFaninInternal(NodeDef* node,
const OutputPort& fanin) {
auto remove_input = [this, node](const OutputPort& fanin_port,
int node_input_port, bool update_max_port) {
InputPort input(node, node_input_port);
absl::flat_hash_set<InputPort>* fanouts_set = &fanouts()[fanin_port];
fanouts_set->erase(input);
if (update_max_port) {
UpdateMaxRegularOutputPortForRemovedFanin(fanin_port, *fanouts_set);
}
return fanouts_set;
};
auto mutable_inputs = node->mutable_input();
bool modified = false;
const int num_regular_fanins =
NumFanins(*node, false);
int i;
int curr_pos = 0;
for (i = 0; i < num_regular_fanins; ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
if (tensor_id.node() == fanin.node->name() &&
tensor_id.index() == fanin.port_id) {
remove_input(fanin, i, true);
modified = true;
} else if (modified) {
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
auto fanouts_set = remove_input(fanin_port, i, false);
fanouts_set->insert({node, curr_pos});
mutable_inputs->SwapElements(i, curr_pos);
++curr_pos;
} else {
++curr_pos;
}
}
if (modified) {
const int last_regular_input_port = curr_pos - 1;
if (last_regular_input_port < 0) {
max_regular_input_port().erase(node);
} else {
max_regular_input_port()[node] = last_regular_input_port;
}
if (curr_pos < i) {
mutable_inputs->DeleteSubrange(curr_pos, i - curr_pos);
}
}
return modified;
}
Status MutableGraphView::RemoveRegularFanin(absl::string_view node_name,
const TensorId& fanin) {
auto error_status = [node_name, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', fanin='$1'", node_name,
fanin.ToString());
return MutationError("RemoveRegularFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsRegular(fanin, error_status));
TF_RETURN_IF_ERROR(
CheckRemovingFaninFromSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
RemoveRegularFaninInternal(node, {fanin_node, fanin.index()});
return absl::OkStatus();
}
Status MutableGraphView::RemoveRegularFaninByPort(absl::string_view node_name,
int port) {
auto error_status = [node_name, port](absl::string_view msg) {
string params =
absl::Substitute("node_name='$0', port=$1", node_name, port);
return MutationError("RemoveRegularFaninByPort", params, msg);
};
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
const int last_regular_fanin_port =
gtl::FindWithDefault(max_regular_input_port(), node, -1);
TF_RETURN_IF_ERROR(
CheckPortRange(port, 0, last_regular_fanin_port, error_status));
TensorId tensor_id = ParseTensorName(node->input(port));
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
fanouts()[fanin_port].erase({node, port});
auto mutable_inputs = node->mutable_input();
for (int i = port + 1; i <= last_regular_fanin_port; ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
absl::flat_hash_set<InputPort>* fanouts_set = &fanouts()[fanin_port];
fanouts_set->erase({node, i});
fanouts_set->insert({node, i - 1});
mutable_inputs->SwapElements(i - 1, i);
}
const int last_node_input = node->input_size() - 1;
if (last_regular_fanin_port < last_node_input) {
mutable_inputs->SwapElements(last_regular_fanin_port, last_node_input);
}
mutable_inputs->RemoveLast();
const int updated_last_regular_input_port = last_regular_fanin_port - 1;
if (updated_last_regular_input_port < 0) {
max_regular_input_port().erase(node);
} else {
max_regular_input_port()[node] = updated_last_regular_input_port;
}
return absl::OkStatus();
}
bool MutableGraphView::RemoveControllingFaninInternal(NodeDef* node,
NodeDef* fanin_node) {
for (int i = node->input_size() - 1; i >= 0; --i) {
TensorId tensor_id = ParseTensorName(node->input(i));
if (tensor_id.index() > Graph::kControlSlot) {
break;
}
if (tensor_id.node() == fanin_node->name()) {
fanouts()[{fanin_node, Graph::kControlSlot}].erase(
{node, Graph::kControlSlot});
node->mutable_input()->SwapElements(i, node->input_size() - 1);
node->mutable_input()->RemoveLast();
return true;
}
}
return false;
}
Status MutableGraphView::RemoveControllingFanin(
absl::string_view node_name, absl::string_view fanin_node_name) {
auto error_status = [node_name, fanin_node_name](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', fanin_node_name='$1'",
node_name, fanin_node_name);
return MutationError("RemoveControllingFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckRemovingFaninFromSelf(
node_name, {fanin_node_name, Graph::kControlSlot}, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* fanin_node = GetNode(fanin_node_name);
TF_RETURN_IF_ERROR(
CheckNodeExists(fanin_node_name, fanin_node, error_status));
RemoveControllingFaninInternal(node, fanin_node);
return absl::OkStatus();
}
Status MutableGraphView::RemoveAllFanins(absl::string_view node_name,
bool keep_controlling_fanins) {
NodeDef* node = GetNode(node_name);
if (node == nullptr) {
string params =
absl::Substitute("node_name='$0', keep_controlling_fanins=$1",
node_name, keep_controlling_fanins);
return MutationError("RemoveAllFanins", params,
NodeMissingErrorMsg(node_name));
}
if (node->input().empty()) {
return absl::OkStatus();
}
const int num_regular_fanins =
NumFanins(*node, false);
RemoveFaninsInternal(node, keep_controlling_fanins);
if (keep_controlling_fanins) {
if (num_regular_fanins == 0) {
return absl::OkStatus();
} else if (num_regular_fanins < node->input_size()) {
node->mutable_input()->DeleteSubrange(0, num_regular_fanins);
} else {
node->clear_input();
}
} else {
node->clear_input();
}
return absl::OkStatus();
}
Status MutableGraphView::UpdateFanin(absl::string_view node_name,
const TensorId& from_fanin,
const TensorId& to_fanin) {
auto error_status = [node_name, from_fanin, to_fanin](absl::string_view msg) {
string params =
absl::Substitute("node_name='$0', from_fanin='$1', to_fanin='$2'",
node_name, from_fanin.ToString(), to_fanin.ToString());
return MutationError("UpdateFanin", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsValid(from_fanin, error_status));
TF_RETURN_IF_ERROR(CheckFaninIsValid(to_fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
NodeDef* from_fanin_node = GetNode(from_fanin.node());
TF_RETURN_IF_ERROR(
CheckNodeExists(from_fanin.node(), from_fanin_node, error_status));
NodeDef* to_fanin_node = GetNode(to_fanin.node());
TF_RETURN_IF_ERROR(
CheckNodeExists(to_fanin.node(), to_fanin_node, error_status));
bool to_fanin_is_control = IsTensorIdControlling(to_fanin);
if (to_fanin_is_control && IsSwitch(*to_fanin_node)) {
return error_status(
absl::Substitute("can't update to fanin '$0' as it will become a "
"Switch control dependency",
to_fanin.ToString()));
}
if (node_name == from_fanin.node() || node_name == to_fanin.node()) {
return error_status("can't update fanin to or from self");
}
if (from_fanin == to_fanin) {
return absl::OkStatus();
}
bool from_fanin_is_control = IsTensorIdControlling(from_fanin);
if (from_fanin_is_control || to_fanin_is_control) {
bool modified = false;
if (from_fanin_is_control) {
modified |= RemoveControllingFaninInternal(node, from_fanin_node);
} else {
modified |= RemoveRegularFaninInternal(
node, {from_fanin_node, from_fanin.index()});
}
if (modified) {
AddFaninInternal(node, {to_fanin_node, to_fanin.index()});
}
return absl::OkStatus();
}
string to_fanin_string = TensorIdToString(to_fanin);
const int num_regular_fanins =
NumFanins(*node, false);
bool modified = false;
for (int i = 0; i < num_regular_fanins; ++i) {
if (ParseTensorName(node->input(i)) == from_fanin) {
InputPort input(node, i);
OutputPort from_fanin_port(from_fanin_node, from_fanin.index());
fanouts()[from_fanin_port].erase(input);
OutputPort to_fanin_port(to_fanin_node, to_fanin.index());
fanouts()[to_fanin_port].insert(input);
node->set_input(i, to_fanin_string);
modified = true;
}
}
if (modified) {
OutputPort from_fanin_port(from_fanin_node, from_fanin.index());
UpdateMaxRegularOutputPortForRemovedFanin(
{from_fanin_node, from_fanin.index()}, fanouts()[from_fanin_port]);
if (max_regular_output_port()[to_fanin_node] < to_fanin.index()) {
max_regular_output_port()[to_fanin_node] = to_fanin.index();
}
if (CanDedupControlWithRegularInput(*this, *to_fanin_node)) {
RemoveControllingFaninInternal(node, to_fanin_node);
}
}
return absl::OkStatus();
}
Status MutableGraphView::UpdateRegularFaninByPort(absl::string_view node_name,
int port,
const TensorId& fanin) {
auto error_status = [node_name, port, fanin](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', port=$1, fanin='$2'",
node_name, port, fanin.ToString());
return MutationError("UpdateRegularFaninByPort", params, msg);
};
TF_RETURN_IF_ERROR(CheckFaninIsRegular(fanin, error_status));
TF_RETURN_IF_ERROR(CheckAddingFaninToSelf(node_name, fanin, error_status));
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
const int last_regular_fanin_port =
gtl::FindWithDefault(max_regular_input_port(), node, -1);
TF_RETURN_IF_ERROR(
CheckPortRange(port, 0, last_regular_fanin_port, error_status));
NodeDef* fanin_node = GetNode(fanin.node());
TF_RETURN_IF_ERROR(CheckNodeExists(fanin.node(), fanin_node, error_status));
TensorId tensor_id = ParseTensorName(node->input(port));
if (tensor_id == fanin) {
return absl::OkStatus();
}
InputPort input(node, port);
OutputPort from_fanin_port(nodes()[tensor_id.node()], tensor_id.index());
absl::flat_hash_set<InputPort>* from_fanouts = &fanouts()[from_fanin_port];
from_fanouts->erase(input);
UpdateMaxRegularOutputPortForRemovedFanin(from_fanin_port, *from_fanouts);
OutputPort to_fanin_port(fanin_node, fanin.index());
fanouts()[to_fanin_port].insert(input);
UpdateMaxRegularOutputPortForAddedFanin(to_fanin_port);
node->set_input(port, TensorIdToString(fanin));
if (CanDedupControlWithRegularInput(*this, *fanin_node)) {
RemoveControllingFaninInternal(node, fanin_node);
}
return absl::OkStatus();
}
Status MutableGraphView::SwapRegularFaninsByPorts(absl::string_view node_name,
int from_port, int to_port) {
auto error_status = [node_name, from_port, to_port](absl::string_view msg) {
string params = absl::Substitute("node_name='$0', from_port=$1, to_port=$2",
node_name, from_port, to_port);
return MutationError("SwapRegularFaninsByPorts", params, msg);
};
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
const int last_regular_fanin_port =
gtl::FindWithDefault(max_regular_input_port(), node, -1);
TF_RETURN_IF_ERROR(CheckPortRange(from_port, 0,
last_regular_fanin_port, error_status));
TF_RETURN_IF_ERROR(CheckPortRange(to_port, 0, last_regular_fanin_port,
error_status));
if (from_port == to_port) {
return absl::OkStatus();
}
TensorId from_fanin = ParseTensorName(node->input(from_port));
TensorId to_fanin = ParseTensorName(node->input(to_port));
if (from_fanin == to_fanin) {
return absl::OkStatus();
}
InputPort from_input(node, from_port);
InputPort to_input(node, to_port);
NodeDef* from_fanin_node = GetNode(from_fanin.node());
absl::flat_hash_set<InputPort>* from_fanouts =
&fanouts()[{from_fanin_node, from_fanin.index()}];
from_fanouts->erase(from_input);
from_fanouts->insert(to_input);
NodeDef* to_fanin_node = GetNode(to_fanin.node());
absl::flat_hash_set<InputPort>* to_fanouts =
&fanouts()[{to_fanin_node, to_fanin.index()}];
to_fanouts->erase(to_input);
to_fanouts->insert(from_input);
node->mutable_input()->SwapElements(from_port, to_port);
return absl::OkStatus();
}
Status MutableGraphView::UpdateAllRegularFaninsToControlling(
absl::string_view node_name) {
auto error_status = [node_name](absl::string_view msg) {
string params = absl::Substitute("node_name='$0'", node_name);
return MutationError("UpdateAllRegularFaninsToControlling", params, msg);
};
NodeDef* node = GetNode(node_name);
TF_RETURN_IF_ERROR(CheckNodeExists(node_name, node, error_status));
const int num_regular_fanins =
NumFanins(*node, false);
std::vector<OutputPort> regular_fanins;
regular_fanins.reserve(num_regular_fanins);
std::vector<NodeDef*> controlling_fanins;
controlling_fanins.reserve(num_regular_fanins);
for (int i = 0; i < num_regular_fanins; ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
OutputPort fanin_port(nodes()[tensor_id.node()], tensor_id.index());
string error_msg = "";
NodeDef* control_node =
GetControllingFaninToAdd(node_name, fanin_port, &error_msg);
if (!error_msg.empty()) {
return error_status(error_msg);
}
regular_fanins.push_back(fanin_port);
controlling_fanins.push_back(control_node);
}
int pos = 0;
InputPort input_port(node, Graph::kControlSlot);
absl::flat_hash_set<absl::string_view> controls;
for (int i = 0; i < num_regular_fanins; ++i) {
OutputPort fanin_port = regular_fanins[i];
NodeDef* control = controlling_fanins[i];
if (control == nullptr) {
control = GetOrCreateIdentityConsumingSwitch(fanin_port);
}
fanouts()[fanin_port].erase({node, i});
if (controls.contains(control->name())) {
continue;
}
controls.insert(control->name());
node->set_input(pos, AsControlDependency(control->name()));
fanouts()[{control, Graph::kControlSlot}].insert(input_port);
++pos;
}
for (int i = num_regular_fanins; i < node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(node->input(i));
if (controls.contains(tensor_id.node())) {
continue;
}
controls.insert(tensor_id.node());
node->mutable_input()->SwapElements(pos, i);
++pos;
}
node->mutable_input()->DeleteSubrange(pos, node->input_size() - pos);
max_regular_input_port().erase(node);
return absl::OkStatus();
}
Status MutableGraphView::CheckNodesCanBeDeleted(
const absl::flat_hash_set<string>& nodes_to_delete) {
std::vector<string> missing_nodes;
std::vector<string> nodes_with_fanouts;
for (const string& node_name_to_delete : nodes_to_delete) {
NodeDef* node = GetNode(node_name_to_delete);
if (node == nullptr) {
missing_nodes.push_back(node_name_to_delete);
continue;
}
const int max_port = gtl::FindWithDefault(max_regular_output_port(), node,
Graph::kControlSlot);
for (int i = Graph::kControlSlot; i <= max_port; ++i) {
auto it = fanouts().find({node, i});
bool has_retained_fanout = false;
if (it != fanouts().end()) {
for (const auto& fanout : it->second) {
if (!nodes_to_delete.contains(fanout.node->name())) {
has_retained_fanout = true;
break;
}
}
}
if (has_retained_fanout) {
nodes_with_fanouts.push_back(node_name_to_delete);
break;
}
}
}
auto sort_and_sample = [](std::vector<string>* s) {
constexpr int kMaxNodeNames = 5;
std::sort(s->begin(), s->end());
if (s->size() > kMaxNodeNames) {
return absl::StrCat(
absl::StrJoin(s->begin(), s->begin() + kMaxNodeNames, ", "), ", ...");
}
return absl::StrJoin(*s, ", ");
};
if (!missing_nodes.empty()) {
VLOG(2) << absl::Substitute("Attempting to delete missing node(s) [$0].",
sort_and_sample(&missing_nodes));
}
if (!nodes_with_fanouts.empty()) {
std::vector<string> input_node_names(nodes_to_delete.begin(),
nodes_to_delete.end());
string params = absl::Substitute("nodes_to_delete={$0}",
sort_and_sample(&input_node_names));
string error_msg =
absl::Substitute("can't delete node(s) with retained fanouts(s) [$0]",
sort_and_sample(&nodes_with_fanouts));
return MutationError("DeleteNodes", params, error_msg);
}
return absl::OkStatus();
}
Status MutableGraphView::DeleteNodes(
const absl::flat_hash_set<string>& nodes_to_delete) {
TF_RETURN_IF_ERROR(CheckNodesCanBeDeleted(nodes_to_delete));
for (const string& node_name_to_delete : nodes_to_delete) {
NodeDef* node = GetNode(node_name_to_delete);
if (node != nullptr) {
RemoveFaninsInternal(node, false);
RemoveFanoutsInternal(node);
}
}
for (const string& node_name_to_delete : nodes_to_delete) {
nodes().erase(node_name_to_delete);
}
int pos = 0;
const int last_idx = graph()->node_size() - 1;
int last_pos = last_idx;
while (pos <= last_pos) {
if (nodes_to_delete.contains(graph()->node(pos).name())) {
graph()->mutable_node()->SwapElements(pos, last_pos);
--last_pos;
} else {
++pos;
}
}
if (last_pos < last_idx) {
graph()->mutable_node()->DeleteSubrange(last_pos + 1, last_idx - last_pos);
}
return absl::OkStatus();
}
void MutableGraphView::RemoveFaninsInternal(NodeDef* deleted_node,
bool keep_controlling_fanins) {
for (int i = 0; i < deleted_node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(deleted_node->input(i));
bool is_control = IsTensorIdControlling(tensor_id);
if (keep_controlling_fanins && is_control) {
break;
}
OutputPort fanin(nodes()[tensor_id.node()], tensor_id.index());
InputPort input;
input.node = deleted_node;
input.port_id = is_control ? Graph::kControlSlot : i;
auto it = fanouts().find(fanin);
if (it != fanouts().end()) {
absl::flat_hash_set<InputPort>* fanouts_set = &it->second;
fanouts_set->erase(input);
UpdateMaxRegularOutputPortForRemovedFanin(fanin, *fanouts_set);
}
}
max_regular_input_port().erase(deleted_node);
}
void MutableGraphView::RemoveFanoutsInternal(NodeDef* deleted_node) {
const int max_port =
gtl::FindWithDefault(max_regular_output_port(), deleted_node, -1);
for (int i = Graph::kControlSlot; i <= max_port; ++i) {
fanouts().erase({deleted_node, i});
}
max_regular_output_port().erase(deleted_node);
}
}
} | #include "tensorflow/core/grappler/mutable_graph_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::NDef;
using FDH = FunctionDefHelper;
void CompareNodeFanins(const MutableGraphView& graph, NodeDef* node,
absl::Span<const string> fanins) {
ASSERT_EQ(node->input_size(), fanins.size());
for (int i = 0; i < node->input_size(); ++i) {
TensorId tensor_id = ParseTensorName(fanins[i]);
EXPECT_EQ(ParseTensorName(node->input(i)), tensor_id);
int port;
if (tensor_id.index() == Graph::kControlSlot) {
port = Graph::kControlSlot;
} else {
port = i;
}
MutableGraphView::InputPort input_port(node, port);
MutableGraphView::OutputPort output_port =
graph.GetOutputPort(tensor_id.node(), tensor_id.index());
EXPECT_TRUE(graph.GetFanin(input_port).contains(output_port));
EXPECT_TRUE(graph.GetFanout(output_port).contains(input_port));
}
}
void CompareNodeFanouts(const MutableGraphView& graph, NodeDef* node,
absl::Span<const string> fanouts) {
auto node_fanouts =
graph.GetFanouts(*node, true);
EXPECT_EQ(node_fanouts.size(), fanouts.size());
for (const string& fanout : fanouts) {
TensorId tensor_id = ParseTensorName(fanout);
MutableGraphView::InputPort input_port(graph.GetNode(tensor_id.node()),
tensor_id.index());
EXPECT_TRUE(node_fanouts.contains(input_port));
}
}
void CheckNode(const MutableGraphView& graph, absl::string_view node_name,
absl::string_view op, absl::string_view device,
absl::Span<const std::pair<string, FDH::AttrValueWrapper>> attrs,
absl::Span<const string> fanins,
absl::Span<const string> fanouts) {
NodeDef* node = graph.GetNode(node_name);
ASSERT_NE(node, nullptr);
EXPECT_EQ(node->op(), op);
EXPECT_EQ(node->device(), device);
EXPECT_EQ(node->attr_size(), attrs.size());
for (const auto& attr : attrs) {
auto it = node->attr().find(attr.first);
ASSERT_NE(it, node->attr().end());
EXPECT_TRUE(AreAttrValuesEqual(it->second, attr.second.proto));
}
CompareNodeFanins(graph, node, fanins);
CompareNodeFanouts(graph, node, fanouts);
}
void CheckGraph(const MutableGraphView& mutable_graph) {
GraphView immutable_graph(mutable_graph.graph());
EXPECT_EQ(mutable_graph.graph()->node_size(),
immutable_graph.graph()->node_size());
EXPECT_EQ(mutable_graph.graph(), immutable_graph.graph());
auto check_edges =
[](const absl::flat_hash_set<MutableGraphView::Edge>& mutable_edges,
const absl::flat_hash_set<GraphView::Edge>& immutable_edges) {
EXPECT_EQ(mutable_edges.size(), immutable_edges.size());
for (const auto& fanin_edge : mutable_edges) {
GraphView::Edge immutable_edge(
{fanin_edge.src.node, fanin_edge.src.port_id},
{fanin_edge.dst.node, fanin_edge.dst.port_id});
EXPECT_TRUE(immutable_edges.contains(immutable_edge));
}
};
for (auto& node : *mutable_graph.graph()->mutable_node()) {
EXPECT_EQ(&node, immutable_graph.GetNode(node.name()));
auto mutable_fanins =
mutable_graph.GetFanins(node, true);
auto immutable_fanins =
immutable_graph.GetFanins(node, true);
EXPECT_EQ(mutable_fanins.size(), immutable_fanins.size());
for (const auto& fanin : mutable_fanins) {
GraphView::OutputPort immutable_fanin(fanin.node, fanin.port_id);
EXPECT_TRUE(immutable_fanins.contains(immutable_fanin));
}
auto mutable_fanouts =
mutable_graph.GetFanouts(node, true);
auto immutable_fanouts =
immutable_graph.GetFanouts(node, true);
EXPECT_EQ(mutable_fanouts.size(), immutable_fanouts.size());
for (const auto& fanout : mutable_fanouts) {
GraphView::InputPort immutable_fanout(fanout.node, fanout.port_id);
EXPECT_TRUE(immutable_fanouts.contains(immutable_fanout));
}
auto mutable_fanin_edges =
mutable_graph.GetFaninEdges(node, true);
auto immutable_fanin_edges =
immutable_graph.GetFaninEdges(node, true);
check_edges(mutable_fanin_edges, immutable_fanin_edges);
auto mutable_fanout_edges =
mutable_graph.GetFanoutEdges(node, true);
auto immutable_fanout_edges =
immutable_graph.GetFanoutEdges(node, true);
check_edges(mutable_fanout_edges, immutable_fanout_edges);
}
}
TEST(MutableGraphViewTest, AddSubgraph) {
GraphDef graph_def = test::function::GDef(
{
NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {}, {}),
NDef("baz", "NotImportant", {"foo", "bar"}),
},
{});
MutableGraphView graph(&graph_def);
GraphDef subgraph = test::function::GDef(
{
NDef("s/n0", "NotImportant", {}, {}),
NDef("s/n1", "NotImportant", {"bar", "s/n0"}, {}),
},
{});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {"baz:1", "s/n1"});
CheckNode(graph, "s/n1", "NotImportant", "", {}, {"bar", "s/n0"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddSubgraphAndAddFunction) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef subgraph = test::function::GDef({}, {x_times_two});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
EXPECT_EQ(graph_def.library().function_size(), 1);
}
TEST(MutableGraphViewTest, AddSubgraphAndSkipSameFunction) {
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef graph_def = test::function::GDef({}, {x_times_two});
MutableGraphView graph(&graph_def);
GraphDef subgraph = test::function::GDef({}, {x_times_two});
TF_EXPECT_OK(graph.AddSubgraph(std::move(subgraph)));
EXPECT_EQ(graph_def.library().function_size(), 1);
}
TEST(MutableGraphViewTest, AddSubgraphAndFailIfFunctionDifferent) {
FunctionDef x_times_four = test::function::XTimesFour();
x_times_four.mutable_signature()->set_name("XTimesTwo");
GraphDef graph_def = test::function::GDef({}, {x_times_four});
MutableGraphView graph(&graph_def);
FunctionDef x_times_two = test::function::XTimesTwo();
GraphDef subgraph = test::function::GDef({}, {x_times_two});
Status status = graph.AddSubgraph(std::move(subgraph));
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.message(),
"MutableGraphView::AddSubgraph(function_size=1) error: Found "
"different function definition with the same name: XTimesTwo.");
}
TEST(MutableGraphViewTest, UpdateNodeNoDedupControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
AttrValue list_value;
list_value.mutable_list()->add_type(DT_FLOAT);
TF_EXPECT_OK(
graph.UpdateNode("bar_2", "IdentityN", kDevice, {{"T", list_value}}));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2"});
CheckNode(graph, "bar_2", "IdentityN", kDevice, {{"T", list_value}},
{"bar_1:1"}, {"foo_1", "foo_1:2", "^foo_1", "foo_2:1", "^foo_2"});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"bar_2", "other", "bar_2:1", "^bar_2"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"other:1", "bar_2:2", "^bar_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeDedupControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNode("bar_2", "NotImportant", kDevice, {}));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2"});
CheckNode(graph, "bar_2", "NotImportant", kDevice, {}, {"bar_1:1"},
{"foo_1", "foo_1:2", "foo_2:1"});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"bar_2", "other", "bar_2:1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"other:1", "bar_2:2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeSwitchNoControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def =
test::function::GDef({NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {"foo:1"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNode("foo", "Switch", kDevice, {}));
CheckNode(graph, "foo", "Switch", kDevice, {}, {}, {"bar"});
CheckNode(graph, "bar", "NotImportant", "", {}, {"foo:1"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeSwitchControlDependency) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def =
test::function::GDef({NDef("foo", "NotImportant", {}, {}),
NDef("bar", "NotImportant", {"^foo"})},
{});
MutableGraphView graph(&graph_def);
AttrValue attr;
attr.set_type(DT_FLOAT);
Status s = graph.UpdateNode("foo", "Switch", kDevice, {{"T", attr}});
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::UpdateNodeOp(node_name='foo', op='Switch', "
"device='/device:foo:0', attrs={('T', type: DT_FLOAT)}) error: can't "
"change node op to Switch when node drives a control dependency "
"(alternatively, we could add the identity node needed, but it seems "
"like an unlikely event and probably a mistake).";
EXPECT_EQ(s.message(), expected_msg);
CheckNode(graph, "foo", "NotImportant", "", {}, {}, {"^bar"});
CheckNode(graph, "bar", "NotImportant", "", {}, {"^foo"}, {});
CheckGraph(graph);
}
absl::flat_hash_map<string, std::vector<string>> GetNodeInputsFromGraph(
const GraphDef& graph, absl::string_view node_to_exclude) {
absl::flat_hash_map<string, std::vector<string>> node_inputs;
for (const auto& node : graph.node()) {
if (node.name() == node_to_exclude) {
continue;
}
node_inputs[node.name()] =
std::vector<string>(node.input().begin(), node.input().end());
}
return node_inputs;
}
void CheckUnmodifiedNodeFanins(
const GraphDef& graph, absl::string_view node_to_exclude,
const absl::flat_hash_map<string, std::vector<string>>&
unmodified_node_inputs) {
for (const auto& node : graph.node()) {
if (node.name() == node_to_exclude) {
continue;
}
auto it = unmodified_node_inputs.find(node.name());
ASSERT_NE(it, unmodified_node_inputs.end());
ASSERT_EQ(it->second.size(), node.input_size());
for (int i = 0; i < node.input_size(); ++i) {
EXPECT_EQ(node.input(i), it->second[i]);
}
}
}
void TestUpdateNodeName(absl::string_view from_node_name, bool node_exists,
absl::string_view to_node_name, bool update_fanouts,
bool success, const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a"}),
NDef("c", "NotImportant", {}, {})},
{});
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(from_node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, from_node_name);
Status s = graph.UpdateNodeName(from_node_name, to_node_name, update_fanouts);
EXPECT_EQ(s.ok(), success);
string updated_node_name;
if (success) {
updated_node_name = string(to_node_name);
} else {
updated_node_name = string(from_node_name);
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
EXPECT_EQ(node->name(), updated_node_name);
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, updated_node_name,
unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateNodeName) {
string error_msg;
TestUpdateNodeName("b", true, "d", false,
true, error_msg, {"a"});
TestUpdateNodeName("b", true, "b", false,
true, error_msg, {"a"});
TestUpdateNodeName("a", true, "a", false,
true, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='c', to_node_name='b', "
"update_fanouts=false) error: can't update node name because new node "
"name is in use.";
TestUpdateNodeName("c", true, "b", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='a', to_node_name='b', "
"update_fanouts=true) error: can't update node name because new node "
"name is in use.";
TestUpdateNodeName("a", true, "b", true,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='a', to_node_name='d', "
"update_fanouts=false) error: can't update node name because node has "
"fanouts.";
TestUpdateNodeName("a", true, "d", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='d', to_node_name='e', "
"update_fanouts=false) error: node 'd' was not found.";
TestUpdateNodeName("d", false, "e", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateNodeName(from_node_name='d', to_node_name='e', "
"update_fanouts=true) error: node 'd' was not found.";
TestUpdateNodeName("d", false, "e", true,
false, error_msg, {});
}
TEST(MutableGraphViewTest, UpdateNodeNameWithFanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:2"}),
NDef("c", "NotImportant", {"b", "^a"}),
NDef("d", "NotImportant", {"^b", "^a"}),
NDef("e", "NotImportant", {"b:2", "c:4", "b:1", "^a"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateNodeName("b", "f", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"f", "^c", "^d", "^e"});
CheckNode(graph, "f", "NotImportant", "", {}, {"a:2"},
{"c", "^d", "e", "e:2"});
CheckNode(graph, "c", "NotImportant", "", {}, {"f", "^a"}, {"e:1"});
CheckNode(graph, "d", "NotImportant", "", {}, {"^f", "^a"}, {});
CheckNode(graph, "e", "NotImportant", "", {}, {"f:2", "c:4", "f:1", "^a"},
{});
CheckGraph(graph);
}
GraphDef SimpleSwapNodeNamesMutationGraph() {
return test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("switch_1", "Switch", {"a"}),
NDef("identity_1", "Identity", {"switch_1:1"}),
NDef("b", "NotImportant", {}, {}), NDef("switch_2", "Switch", {"b"}),
NDef("identity_2", "Identity", {"switch_2:0"}),
NDef("foo_1", "NotImportant", {"identity_1", "^identity_1"}),
NDef("foo_2", "NotImportant", {"identity_2", "^identity_2"})},
{});
}
void TestSwapNodeNames(bool update_fanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("foo_1", "foo_2", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_2", "^foo_2"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_1", "^foo_1"});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNames) {
TestSwapNodeNames(false);
TestSwapNodeNames(true);
}
void TestSwapNodeNamesWithSameNames(bool update_fanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("identity_1", "identity_1", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSameName) {
TestSwapNodeNamesWithSameNames(false);
TestSwapNodeNamesWithSameNames(true);
}
TEST(MutableGraphView, SwapNodeNamesBetweenSwitches) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(
graph.SwapNodeNames("switch_1", "switch_2", false));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"a"}, {"identity_2"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"b"}, {"identity_1"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesBetweenSwitchesAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(
graph.SwapNodeNames("switch_1", "switch_2", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_2:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_1:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSwitchAndNonSwitch) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "switch_1", false));
CheckNode(graph, "switch_1", "NotImportant", "", {}, {}, {"a", "identity_1"});
CheckNode(graph, "a", "Switch", "", {}, {"switch_1"}, {});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"}, {"foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {}, {"identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSwitchAndNonSwitchAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "switch_1", true));
CheckNode(graph, "switch_1", "NotImportant", "", {}, {}, {"a"});
CheckNode(graph, "a", "Switch", "", {}, {"switch_1"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"a:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesNonSwitchAndSwitch) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("switch_2", "b", false));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "switch_2", "NotImportant", "", {}, {}, {"b", "identity_2"});
CheckNode(graph, "b", "Switch", "", {}, {"switch_2"}, {});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"}, {"foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesNonSwitchAndSwitchAndUpdateFanouts) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("switch_2", "b", true));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "switch_2", "NotImportant", "", {}, {}, {"b"});
CheckNode(graph, "b", "Switch", "", {}, {"switch_2"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"b:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
void TestSwapNodeNamesSimpleSelfLoop(bool update_fanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {"b:7"}), NDef("b", "NotImportant", {"a:10"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.SwapNodeNames("a", "b", update_fanouts));
CheckNode(graph, "a", "NotImportant", "", {}, {"b:10"}, {"b:0"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:7"}, {"a:0"});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesSelfLoops) {
TestSwapNodeNamesSimpleSelfLoop(false);
TestSwapNodeNamesSimpleSelfLoop(true);
}
void TestSwapNodeNamesError(absl::string_view from_node_name,
absl::string_view to_node_name, bool update_fanouts,
const string& error_msg) {
GraphDef graph_def = SimpleSwapNodeNamesMutationGraph();
MutableGraphView graph(&graph_def);
Status s = graph.SwapNodeNames(from_node_name, to_node_name, update_fanouts);
EXPECT_EQ(s.ok(), false);
EXPECT_EQ(s.message(), error_msg);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"switch_1"});
CheckNode(graph, "switch_1", "Switch", "", {}, {"a"}, {"identity_1"});
CheckNode(graph, "identity_1", "Identity", "", {}, {"switch_1:1"},
{"foo_1", "^foo_1"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"switch_2"});
CheckNode(graph, "switch_2", "Switch", "", {}, {"b"}, {"identity_2"});
CheckNode(graph, "identity_2", "Identity", "", {}, {"switch_2:0"},
{"foo_2", "^foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"identity_1", "^identity_1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"identity_2", "^identity_2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphView, SwapNodeNamesError) {
string error_msg;
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_3', "
"to_node_name='foo_2', update_fanouts=false) error: node 'foo_3' was not "
"found.";
TestSwapNodeNamesError("foo_3", "foo_2", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_3', "
"to_node_name='foo_2', update_fanouts=true) error: node 'foo_3' was not "
"found.";
TestSwapNodeNamesError("foo_3", "foo_2", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_1', "
"to_node_name='foo_4', update_fanouts=false) error: node 'foo_4' was not "
"found.";
TestSwapNodeNamesError("foo_1", "foo_4", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_1', "
"to_node_name='foo_4', update_fanouts=true) error: node 'foo_4' was not "
"found.";
TestSwapNodeNamesError("foo_1", "foo_4", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_5', "
"to_node_name='foo_6', update_fanouts=false) error: node 'foo_5' was not "
"found.";
TestSwapNodeNamesError("foo_5", "foo_6", false, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='foo_5', "
"to_node_name='foo_6', update_fanouts=true) error: node 'foo_5' was not "
"found.";
TestSwapNodeNamesError("foo_5", "foo_6", true, error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='switch_2', "
"to_node_name='identity_1', update_fanouts=false) error: can't swap node "
"name 'switch_2' as it will become a Switch control dependency.";
TestSwapNodeNamesError("switch_2", "identity_1", false,
error_msg);
error_msg =
"MutableGraphView::SwapNodeNames(from_node_name='identity_2', "
"to_node_name='switch_1', update_fanouts=false) error: can't swap node "
"name 'switch_1' as it will become a Switch control dependency.";
TestSwapNodeNamesError("identity_2", "switch_1", false,
error_msg);
}
TEST(MutableGraphViewTest, AddAndUpdateFanouts) {
GraphDef graph_def = test::function::GDef(
{NDef("bar", "NotImportant", {}, {}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar", "other", "bar:1", "^bar"}),
NDef("foo_2", "NotImportant", {"other:1", "bar:2", "^bar"}),
NDef("foo_3", "NotImportant", {"other:2", "^bar"})},
{});
MutableGraphView graph(&graph_def);
NodeDef* new_bar = graph.AddNode(NDef("new_bar", "NotImportant", {}, {}));
TF_EXPECT_OK(graph.UpdateFanouts("bar", new_bar->name()));
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {});
CheckNode(graph, "other", "NotImportant", "", {}, {},
{"foo_1:1", "foo_2", "foo_3"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"new_bar", "other", "new_bar:1"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"other:1", "new_bar:2"},
{});
CheckNode(graph, "foo_3", "NotImportant", "", {}, {"other:2", "^new_bar"},
{});
CheckNode(graph, "new_bar", "NotImportant", "", {}, {},
{"foo_1:0", "foo_1:2", "foo_2:1", "^foo_3"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddAndUpdateFanoutsKeepControls) {
GraphDef graph_def = test::function::GDef(
{NDef("bar_1", "Switch", {}, {}), NDef("bar_2", "Identity", {"bar_1:1"}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar_2", "other", "bar_2:1", "^bar_2"}),
NDef("foo_2", "NotImportant", {"other:1", "bar_2:2", "^bar_2"})},
{});
MutableGraphView graph(&graph_def);
NodeDef* new_bar = graph.AddNode(NDef("new_bar", "Identity", {"bar_1:2"}));
TF_EXPECT_OK(graph.UpdateFanouts("bar_2", new_bar->name()));
CheckNode(graph, "bar_1", "Switch", "", {}, {}, {"bar_2", "new_bar"});
CheckNode(graph, "bar_2", "Identity", "", {}, {"bar_1:1"}, {});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_1:1", "foo_2"});
CheckNode(graph, "foo_1", "NotImportant", "", {},
{"new_bar", "other", "new_bar:1", "^new_bar"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {},
{"other:1", "new_bar:2", "^new_bar"}, {});
CheckNode(graph, "new_bar", "Identity", "", {}, {"bar_1:2"},
{"foo_1", "foo_1:2", "^foo_1", "foo_2:1", "^foo_2"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddAndUpdateFanoutsWithoutSelfLoops) {
GraphDef graph_def =
test::function::GDef({NDef("bar", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar", "^bar"}),
NDef("foo_2", "NotImportant", {"^bar"})},
{});
MutableGraphView graph(&graph_def);
NodeDef* new_bar = graph.AddNode(NDef("new_bar", "NewBar", {"bar"}, {}));
TF_EXPECT_OK(graph.UpdateFanouts("bar", new_bar->name()));
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {"new_bar"});
CheckNode(graph, "foo_1", "NotImportant", "", {}, {"new_bar"}, {});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"^new_bar"}, {});
CheckNode(graph, "new_bar", "NewBar", "", {}, {"bar"}, {"foo_1", "^foo_2"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateFanoutsToSwitchWithControlFromSwitch) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "Switch", {}, {}),
NDef("c", "NotImportant", {}, {}), NDef("d", "NotImportant", {}, {}),
NDef("e", "NotImportant", {"c", "b", "^a", "^d"})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.UpdateFanouts("a", "b");
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::UpdateFanouts(from_node_name='a', to_node_name='b') "
"error: can't update fanouts to node 'b' as it will become a Switch "
"control dependency.";
EXPECT_EQ(s.message(), expected_msg);
s = graph.UpdateFanouts("d", "b");
EXPECT_FALSE(s.ok());
expected_msg =
"MutableGraphView::UpdateFanouts(from_node_name='d', to_node_name='b') "
"error: can't update fanouts to node 'b' as it will become a Switch "
"control dependency.";
EXPECT_EQ(s.message(), expected_msg);
EXPECT_EQ(graph.graph()->node_size(), 5);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"^e"});
CheckNode(graph, "b", "Switch", "", {}, {}, {"e:1"});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {"e:0"});
CheckNode(graph, "d", "NotImportant", "", {}, {}, {"^e"});
CheckNode(graph, "e", "NotImportant", "", {}, {"c", "b", "^a", "^d"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateFanoutsToSwitchWithNoControlFromSwitch) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "Switch", {}, {}),
NDef("c", "NotImportant", {}, {}), NDef("d", "NotImportant", {}, {}),
NDef("e", "NotImportant", {"c", "b", "^a", "^d"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateFanouts("c", "b"));
EXPECT_EQ(graph.graph()->node_size(), 5);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"^e"});
CheckNode(graph, "b", "Switch", "", {}, {}, {"e:0", "e:1"});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {});
CheckNode(graph, "d", "NotImportant", "", {}, {}, {"^e"});
CheckNode(graph, "e", "NotImportant", "", {}, {"b", "b", "^a", "^d"}, {});
CheckGraph(graph);
}
GraphDef SimpleMutateFaninGraph() {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "NotImportant", {}, {}), NDef("d", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"a"}),
NDef("foo_2", "NotImportant", {"b", "^a", "^c"}),
NDef("foo_3", "NotImportant", {"b", "a:1", "a:1"}),
NDef("foo_4", "NotImportant", {"a", "b:2", "b:2", "^c", "^d"}),
NDef("foo_5", "NotImportant", {}),
NDef("foo_6", "NotImportant", {"^a", "^b"})},
{});
return graph_def;
}
void TestAddRegularFanin(absl::string_view node_name, bool node_exists,
const TensorId& fanin_to_add, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.AddRegularFanin(node_name, fanin_to_add);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddRegularFanin) {
string error_msg;
TestAddRegularFanin("foo_1", true, {"b", 1}, true,
error_msg, {"a", "b:1"});
TestAddRegularFanin("foo_3", true, {"b", 2}, true,
error_msg, {"b", "a:1", "a:1", "b:2"});
TestAddRegularFanin("foo_2", true, {"a", 0}, true,
error_msg, {"b", "a", "^c"});
TestAddRegularFanin("foo_4", true, {"a", 1}, true,
error_msg, {"a", "b:2", "b:2", "a:1", "^d", "^c"});
TestAddRegularFanin("foo_5", true, {"a", 1}, true,
error_msg, {"a:1"});
TestAddRegularFanin("foo_6", true, {"c", 1}, true,
error_msg, {"c:1", "^b", "^a"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_1', fanin='^b') error: "
"fanin '^b' must be a regular tensor id.";
TestAddRegularFanin("foo_1", true, {"b", Graph::kControlSlot},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_3', fanin='^c') error: "
"fanin '^c' must be a regular tensor id.";
TestAddRegularFanin("foo_3", true, {"c", Graph::kControlSlot},
false, error_msg, {"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_2', fanin='^d') error: "
"fanin '^d' must be a regular tensor id.";
TestAddRegularFanin("foo_2", true, {"d", Graph::kControlSlot},
false, error_msg, {"b", "^a", "^c"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_4', fanin='^a') error: "
"fanin '^a' must be a regular tensor id.";
TestAddRegularFanin("foo_4", true, {"a", Graph::kControlSlot},
false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_5', fanin='^a') error: "
"fanin '^a' must be a regular tensor id.";
TestAddRegularFanin("foo_5", true, {"a", Graph::kControlSlot},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_6', fanin='^c') error: "
"fanin '^c' must be a regular tensor id.";
TestAddRegularFanin("foo_6", true, {"c", Graph::kControlSlot},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_2', fanin='^a') error: "
"fanin '^a' must be a regular tensor id.";
TestAddRegularFanin("foo_2", true, {"a", Graph::kControlSlot},
false, error_msg, {"b", "^a", "^c"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_missing', fanin='a:0') "
"error: node 'foo_missing' was not found.";
TestAddRegularFanin("foo_missing", false, {"a", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_1', "
"fanin='bar_missing:0') error: node 'bar_missing' was not found.";
TestAddRegularFanin("foo_1", true, {"bar_missing", 0},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_missing', "
"fanin='bar_missing:0') error: node 'foo_missing' was not found.";
TestAddRegularFanin("foo_missing", false, {"bar_missing", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_missing', "
"fanin='^bar_missing') error: fanin '^bar_missing' must be a regular "
"tensor id.";
TestAddRegularFanin("foo_missing", false,
{"bar_missing", Graph::kControlSlot},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFanin(node_name='foo_6', fanin='foo_6:2') "
"error: can't add fanin 'foo_6:2' to self.";
TestAddRegularFanin("foo_6", true, {"foo_6", 2},
false, error_msg, {"^a", "^b"});
}
void TestAddRegularFaninByPort(absl::string_view node_name, bool node_exists,
int port, const TensorId& fanin_to_add,
bool success, const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.AddRegularFaninByPort(node_name, port, fanin_to_add);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddRegularFaninByPort) {
string error_msg;
TestAddRegularFaninByPort("foo_3", true, 0, {"d", 2},
true, error_msg,
{"d:2", "b", "a:1", "a:1"});
TestAddRegularFaninByPort("foo_3", true, 3, {"d", 2},
true, error_msg,
{"b", "a:1", "a:1", "d:2"});
TestAddRegularFaninByPort("foo_3", true, 2, {"d", 2},
true, error_msg,
{"b", "a:1", "d:2", "a:1"});
TestAddRegularFaninByPort("foo_2", true, 0, {"d", 2},
true, error_msg,
{"d:2", "b", "^c", "^a"});
TestAddRegularFaninByPort("foo_2", true, 1, {"d", 2},
true, error_msg,
{"b", "d:2", "^c", "^a"});
TestAddRegularFaninByPort("foo_4", true, 2, {"d", 2},
true, error_msg,
{"a", "b:2", "d:2", "b:2", "^c"});
TestAddRegularFaninByPort("foo_5", true, 0, {"d", 2},
true, error_msg, {"d:2"});
TestAddRegularFaninByPort("foo_6", true, 0, {"d", 2},
true, error_msg, {"d:2", "^b", "^a"});
TestAddRegularFaninByPort("foo_6", true, 0, {"b", 2},
true, error_msg, {"b:2", "^a"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_4', port=2, "
"fanin='^d') error: fanin '^d' must be a regular tensor id.";
TestAddRegularFaninByPort(
"foo_4", true, 2, {"d", Graph::kControlSlot},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_5', port=-1, "
"fanin='d:2') error: port must be in range [0, 0].";
TestAddRegularFaninByPort("foo_5", true, -1,
{"d", 2},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_5', port=1, "
"fanin='d:2') error: port must be in range [0, 0].";
TestAddRegularFaninByPort("foo_5", true, 1, {"d", 2},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_6', port=-1, "
"fanin='d:2') error: port must be in range [0, 0].";
TestAddRegularFaninByPort("foo_6", true, -1,
{"d", 2},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_6', port=1, "
"fanin='d:2') error: port must be in range [0, 0].";
TestAddRegularFaninByPort("foo_6", true, 1, {"d", 2},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_4', port=-1, "
"fanin='d:2') error: port must be in range [0, 3].";
TestAddRegularFaninByPort(
"foo_4", true, -1, {"d", 2},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_4', port=4, "
"fanin='d:2') error: port must be in range [0, 3].";
TestAddRegularFaninByPort("foo_4", true, 4, {"d", 2},
false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_missing', "
"port=0, fanin='a:0') error: node 'foo_missing' was not found.";
TestAddRegularFaninByPort("foo_missing", false, 0,
{"a", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_1', port=0, "
"fanin='bar_missing:0') error: node 'bar_missing' was not found.";
TestAddRegularFaninByPort("foo_1", true, 0,
{"bar_missing", 0},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_missing', "
"port=0, fanin='bar_missing:0') error: node 'foo_missing' was not found.";
TestAddRegularFaninByPort("foo_missing", false, 0,
{"bar_missing", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::AddRegularFaninByPort(node_name='foo_6', port=0, "
"fanin='foo_6:2') error: can't add fanin 'foo_6:2' to self.";
TestAddRegularFaninByPort("foo_6", true, 0,
{"foo_6", 2},
false, error_msg, {"^a", "^b"});
}
void CheckFanoutRemoved(const MutableGraphView& graph, const TensorId& fanin,
absl::string_view node_name) {
MutableGraphView::OutputPort output_port =
graph.GetOutputPort(fanin.node(), fanin.index());
auto fanouts = graph.GetFanout(output_port);
for (auto fanout : fanouts) {
EXPECT_NE(fanout.node->name(), fanin.node());
}
}
void TestRemoveRegularFanin(absl::string_view node_name, bool node_exists,
const TensorId& fanin_to_remove, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(nullptr, node);
} else {
EXPECT_EQ(nullptr, node);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.RemoveRegularFanin(node_name, fanin_to_remove);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
if (success) {
CheckFanoutRemoved(graph, fanin_to_remove, node_name);
}
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveRegularFanin) {
string error_msg;
TestRemoveRegularFanin("foo_1", true, {"a", 0},
true, error_msg, {});
TestRemoveRegularFanin("foo_3", true, {"a", 1},
true, error_msg, {"b"});
TestRemoveRegularFanin("foo_2", true, {"b", 0},
true, error_msg, {"^a", "^c"});
TestRemoveRegularFanin("foo_4", true, {"b", 2},
true, error_msg, {"a", "^c", "^d"});
TestRemoveRegularFanin("foo_4", true, {"a", 0},
true, error_msg,
{"b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_2', fanin='^a') "
"error: fanin '^a' must be a regular tensor id.";
TestRemoveRegularFanin("foo_2", true,
{"a", Graph::kControlSlot},
false, error_msg, {"b", "^a", "^c"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_4', fanin='^d') "
"error: fanin '^d' must be a regular tensor id.";
TestRemoveRegularFanin(
"foo_4", true, {"d", Graph::kControlSlot},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_6', fanin='^a') "
"error: fanin '^a' must be a regular tensor id.";
TestRemoveRegularFanin("foo_6", true,
{"a", Graph::kControlSlot},
false, error_msg, {"^a", "^b"});
error_msg = "";
TestRemoveRegularFanin("foo_5", true, {"a", 1},
true, error_msg, {});
TestRemoveRegularFanin("foo_6", true, {"a", 1},
true, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_1', fanin='^b') "
"error: fanin '^b' must be a regular tensor id.";
TestRemoveRegularFanin("foo_1", true,
{"b", Graph::kControlSlot},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_3', fanin='^c') "
"error: fanin '^c' must be a regular tensor id.";
TestRemoveRegularFanin("foo_3", true,
{"c", Graph::kControlSlot},
false, error_msg, {"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_5', fanin='^a') "
"error: fanin '^a' must be a regular tensor id.";
TestRemoveRegularFanin("foo_5", true,
{"a", Graph::kControlSlot},
false, error_msg, {});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_missing', "
"fanin='a:0') error: node 'foo_missing' was not found.";
TestRemoveRegularFanin("foo_missing", false, {"a", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_1', "
"fanin='bar_missing:0') error: node 'bar_missing' was not found.";
TestRemoveRegularFanin("foo_1", true, {"bar_missing", 0},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_missing', "
"fanin='bar_missing:0') error: node 'foo_missing' was not found.";
TestRemoveRegularFanin("foo_missing", false,
{"bar_missing", 0}, false, error_msg, {});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_missing', "
"fanin='^bar_missing') error: fanin '^bar_missing' must be a regular "
"tensor id.";
TestRemoveRegularFanin("foo_missing", false,
{"bar_missing", Graph::kControlSlot},
false, error_msg, {});
error_msg =
"MutableGraphView::RemoveRegularFanin(node_name='foo_6', "
"fanin='foo_6:2') error: can't remove fanin 'foo_6:2' from self.";
TestRemoveRegularFanin("foo_6", true, {"foo_6", 2},
false, error_msg, {"^a", "^b"});
}
void TestRemoveRegularFaninByPort(absl::string_view node_name, bool node_exists,
int port, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(nullptr, node);
} else {
EXPECT_EQ(nullptr, node);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.RemoveRegularFaninByPort(node_name, port);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveRegularFaninByPort) {
string error_msg;
TestRemoveRegularFaninByPort("foo_3", true, 0,
true, error_msg, {"a:1", "a:1"});
TestRemoveRegularFaninByPort("foo_3", true, 2,
true, error_msg, {"b", "a:1"});
TestRemoveRegularFaninByPort("foo_3", true, 1,
true, error_msg, {"b", "a:1"});
TestRemoveRegularFaninByPort("foo_4", true, 0,
true, error_msg,
{"b:2", "b:2", "^d", "^c"});
TestRemoveRegularFaninByPort("foo_4", true, 2,
true, error_msg,
{"a", "b:2", "^d", "^c"});
TestRemoveRegularFaninByPort("foo_4", true, 1,
true, error_msg,
{"a", "b:2", "^d", "^c"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_5', port=0) "
"error: no available ports as node has no regular fanins.";
TestRemoveRegularFaninByPort("foo_5", true, 0,
false, error_msg, {});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_6', port=1) "
"error: no available ports as node has no regular fanins.";
TestRemoveRegularFaninByPort("foo_6", true, 1,
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_3', port=-1) "
"error: port must be in range [0, 2].";
TestRemoveRegularFaninByPort("foo_3", true, -1,
false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_3', port=3) "
"error: port must be in range [0, 2].";
TestRemoveRegularFaninByPort("foo_3", true, 3,
false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_4', port=-1) "
"error: port must be in range [0, 2].";
TestRemoveRegularFaninByPort("foo_4", true, -1,
false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_4', port=3) "
"error: port must be in range [0, 2].";
TestRemoveRegularFaninByPort("foo_4", true, 3,
false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::RemoveRegularFaninByPort(node_name='foo_missing', "
"port=0) error: node 'foo_missing' was not found.";
TestRemoveRegularFaninByPort("foo_missing", false, 0,
false, error_msg, {});
}
void TestRemoveAllFanins(absl::string_view node_name, bool node_exists,
bool keep_controlling_nodes, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
absl::flat_hash_set<string> fanin_strings;
if (node_exists) {
EXPECT_NE(node, nullptr);
fanin_strings.insert(node->input().begin(), node->input().end());
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.RemoveAllFanins(node_name, keep_controlling_nodes);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
if (success) {
TensorId tensor_id;
auto retained_inputs = absl::flat_hash_set<string>(node->input().begin(),
node->input().end());
for (const string& fanin : fanin_strings) {
if (!retained_inputs.contains(fanin)) {
tensor_id = ParseTensorName(fanin);
CheckFanoutRemoved(graph, tensor_id, node_name);
}
}
}
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveAllFanins) {
string error_msg;
TestRemoveAllFanins("foo_3", true,
false,
true, error_msg, {});
TestRemoveAllFanins("foo_4", true,
false,
true, error_msg, {});
TestRemoveAllFanins("foo_3", true,
true,
true, error_msg, {});
TestRemoveAllFanins("foo_4", true,
true,
true, error_msg, {"^c", "^d"});
TestRemoveAllFanins("foo_5", true,
false,
true, error_msg, {});
TestRemoveAllFanins("foo_5", true,
true,
true, error_msg, {});
TestRemoveAllFanins("foo_6", true,
false,
true, error_msg, {});
TestRemoveAllFanins("foo_6", true,
true,
true, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::RemoveAllFanins(node_name='foo_missing', "
"keep_controlling_fanins=false) error: node 'foo_missing' was not found.";
TestRemoveAllFanins("foo_missing", false,
false,
false, error_msg, {});
error_msg =
"MutableGraphView::RemoveAllFanins(node_name='foo_missing', "
"keep_controlling_fanins=true) error: node 'foo_missing' was not found.";
TestRemoveAllFanins("foo_missing", false,
true,
false, error_msg, {});
}
void TestUpdateFanin(absl::string_view node_name, bool node_exists,
const TensorId& from_fanin, const TensorId& to_fanin,
bool success, const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.UpdateFanin(node_name, from_fanin, to_fanin);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
if (success) {
CheckFanoutRemoved(graph, from_fanin, node_name);
}
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateFanin) {
string error_msg;
TestUpdateFanin("foo_4", true, {"b", 2}, {"b", 3},
true, error_msg, {"a", "b:3", "b:3", "^c", "^d"});
TestUpdateFanin("foo_4", true, {"b", 2},
{"b", Graph::kControlSlot},
true, error_msg, {"a", "^c", "^d", "^b"});
TestUpdateFanin(
"foo_4", true, {"d", Graph::kControlSlot}, {"d", 1},
true, error_msg, {"a", "b:2", "b:2", "d:1", "^c"});
TestUpdateFanin("foo_4", true, {"c", Graph::kControlSlot},
{"b", Graph::kControlSlot}, true, error_msg,
{"a", "b:2", "b:2", "^d"});
TestUpdateFanin("foo_4", true, {"c", Graph::kControlSlot},
{"d", Graph::kControlSlot}, true, error_msg,
{"a", "b:2", "b:2", "^d"});
TestUpdateFanin("foo_1", true, {"a", -1}, {"a", -1},
true, error_msg, {"a"});
TestUpdateFanin("foo_1", true, {"a", 0}, {"a", 0},
true, error_msg, {"a"});
TestUpdateFanin("foo_1", true, {"a", 1}, {"a", 1},
true, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_missing', "
"from_fanin='a:0', to_fanin='a:1') error: node 'foo_missing' was not "
"found.";
TestUpdateFanin("foo_missing", false, {"a", 0}, {"a", 1},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_1', "
"from_fanin='from_bar_missing:0', to_fanin='a:1') error: node "
"'from_bar_missing' was not found.";
TestUpdateFanin("foo_1", true, {"from_bar_missing", 0},
{"a", 1},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_1', from_fanin='a:0', "
"to_fanin='to_bar_missing:1') error: node 'to_bar_missing' was not "
"found.";
TestUpdateFanin("foo_1", true, {"a", 0},
{"to_bar_missing", 1}, false, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_missing', "
"from_fanin='from_bar_missing:0', to_fanin='to_bar_missing:1') error: "
"node 'foo_missing' was not found.";
TestUpdateFanin("foo_missing", false, {"from_bar_missing", 0},
{"to_bar_missing", 1},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_1', from_fanin='a:-2', "
"to_fanin='a:0') error: fanin 'a:-2' must be a valid tensor id.";
TestUpdateFanin("foo_1", true, {"a", -2}, {"a", 0},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_1', from_fanin='a:0', "
"to_fanin='a:-2') error: fanin 'a:-2' must be a valid tensor id.";
TestUpdateFanin("foo_1", true, {"a", 0}, {"a", -2},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_missing', "
"from_fanin='from_bar_missing:-2', to_fanin='to_bar_missing:-3') error: "
"fanin 'from_bar_missing:-2' must be a valid tensor id.";
TestUpdateFanin("foo_missing", false,
{"from_bar_missing", -2}, {"to_bar_missing", -3},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_4', from_fanin='b:2', "
"to_fanin='foo_4:3') error: can't update fanin to or from self.";
TestUpdateFanin("foo_4", true, {"b", 2}, {"foo_4", 3},
false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_4', from_fanin='b:2', "
"to_fanin='^foo_4') error: can't update fanin to or from self.";
TestUpdateFanin(
"foo_4", true, {"b", 2}, {"foo_4", Graph::kControlSlot},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_4', from_fanin='^c', "
"to_fanin='foo_4:4') error: can't update fanin to or from self.";
TestUpdateFanin(
"foo_4", true, {"c", Graph::kControlSlot}, {"foo_4", 4},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateFanin(node_name='foo_4', from_fanin='^c', "
"to_fanin='^foo_4') error: can't update fanin to or from self.";
TestUpdateFanin("foo_4", true, {"c", Graph::kControlSlot},
{"foo_4", Graph::kControlSlot}, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
}
void TestUpdateFaninFromFaninToNodeAsSwitchControl(const TensorId& fanin) {
string tensor_id_str = TensorIdToString(fanin);
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "Switch", {}, {}),
NDef("c", "NotImportant", {tensor_id_str})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.UpdateFanin("c", fanin, {"b", Graph::kControlSlot});
EXPECT_FALSE(s.ok());
string expected_msg = absl::Substitute(
"MutableGraphView::UpdateFanin(node_name='c', from_fanin='$0', "
"to_fanin='^b') error: can't update to fanin '^b' as it will become a "
"Switch control dependency.",
fanin.ToString());
EXPECT_EQ(s.message(), expected_msg);
EXPECT_EQ(graph.graph()->node_size(), 3);
string fanout = IsControlInput(fanin) ? AsControlDependency("c") : "c";
CheckNode(graph, "a", "NotImportant", "", {}, {}, {fanout});
CheckNode(graph, "b", "Switch", "", {}, {}, {});
CheckNode(graph, "c", "NotImportant", "", {}, {tensor_id_str}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateFaninToNodeAsSwitchControl) {
TestUpdateFaninFromFaninToNodeAsSwitchControl({"a", 0});
TestUpdateFaninFromFaninToNodeAsSwitchControl({"a", 1});
TestUpdateFaninFromFaninToNodeAsSwitchControl({"a", Graph::kControlSlot});
}
void TestUpdateRegularFaninByPort(absl::string_view node_name, bool node_exists,
int port, const TensorId& fanin, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.UpdateRegularFaninByPort(node_name, port, fanin);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateRegularFaninByPort) {
string error_msg;
TestUpdateRegularFaninByPort(
"foo_3", true, 0, {"d", 2},
true, error_msg, {"d:2", "a:1", "a:1"});
TestUpdateRegularFaninByPort(
"foo_3", true, 2, {"d", 2},
true, error_msg, {"b", "a:1", "d:2"});
TestUpdateRegularFaninByPort(
"foo_3", true, 1, {"d", 2},
true, error_msg, {"b", "d:2", "a:1"});
TestUpdateRegularFaninByPort(
"foo_4", true, 0, {"d", 2},
true, error_msg, {"d:2", "b:2", "b:2", "^c"});
TestUpdateRegularFaninByPort(
"foo_4", true, 2, {"d", 2},
true, error_msg, {"a", "b:2", "d:2", "^c"});
TestUpdateRegularFaninByPort(
"foo_4", true, 1, {"d", 2},
true, error_msg, {"a", "d:2", "b:2", "^c"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_4', port=1, "
"fanin='^d') error: fanin '^d' must be a regular tensor id.";
TestUpdateRegularFaninByPort(
"foo_4", true, 1, {"d", Graph::kControlSlot},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_5', port=-1, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_5", true, -1,
{"d", 2},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_5', port=0, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_5", true, 0,
{"d", 2},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_5', port=1, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_5", true, 1,
{"d", 2},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_6', port=-1, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_6", true, -1,
{"d", 2},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_6', port=0, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_6", true, 0,
{"d", 2},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_6', port=1, "
"fanin='d:2') error: no available ports as node has no regular fanins.";
TestUpdateRegularFaninByPort("foo_6", true, 1,
{"d", 2},
false, error_msg, {"^a", "^b"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_3', port=-1, "
"fanin='d:2') error: port must be in range [0, 2].";
TestUpdateRegularFaninByPort(
"foo_3", true, -1, {"d", 2},
false, error_msg, {"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_3', port=3, "
"fanin='d:2') error: port must be in range [0, 2].";
TestUpdateRegularFaninByPort(
"foo_3", true, 3, {"d", 2},
false, error_msg, {"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_4', port=-1, "
"fanin='d:2') error: port must be in range [0, 2].";
TestUpdateRegularFaninByPort(
"foo_4", true, -1, {"d", 2},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_4', port=3, "
"fanin='d:2') error: port must be in range [0, 2].";
TestUpdateRegularFaninByPort(
"foo_4", true, 3, {"d", 2},
false, error_msg, {"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_missing', "
"port=0, fanin='a:0') error: node 'foo_missing' was not found.";
TestUpdateRegularFaninByPort("foo_missing", false,
0, {"a", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_1', port=0, "
"fanin='bar_missing:0') error: node 'bar_missing' was not "
"found.";
TestUpdateRegularFaninByPort("foo_1", true, 0,
{"bar_missing", 0},
false, error_msg, {"a"});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_missing', "
"port=0, fanin='bar_missing:0') error: node 'foo_missing' was not found.";
TestUpdateRegularFaninByPort("foo_missing", false,
0, {"bar_missing", 0},
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateRegularFaninByPort(node_name='foo_6', port=0, "
"fanin='foo_6:2') error: can't add fanin 'foo_6:2' to self.";
TestUpdateRegularFaninByPort("foo_6", true, 0,
{"foo_6", 2},
false, error_msg, {"^a", "^b"});
}
void TestSwapRegularFaninsByPorts(absl::string_view node_name, bool node_exists,
int from_port, int to_port, bool success,
const string& error_msg,
absl::Span<const string> expected_fanins) {
GraphDef graph_def = SimpleMutateFaninGraph();
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.SwapRegularFaninsByPorts(node_name, from_port, to_port);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, SwapRegularFaninsByPorts) {
string error_msg;
TestSwapRegularFaninsByPorts("foo_3", true, 0,
2, true, error_msg,
{"a:1", "a:1", "b"});
TestSwapRegularFaninsByPorts("foo_3", true, 2,
0, true, error_msg,
{"a:1", "a:1", "b"});
TestSwapRegularFaninsByPorts("foo_4", true, 0,
2, true, error_msg,
{"b:2", "b:2", "a", "^c", "^d"});
TestSwapRegularFaninsByPorts("foo_4", true, 2,
0, true, error_msg,
{"b:2", "b:2", "a", "^c", "^d"});
TestSwapRegularFaninsByPorts("foo_3", true, 0,
1, true, error_msg,
{"a:1", "b", "a:1"});
TestSwapRegularFaninsByPorts("foo_3", true, 1,
0, true, error_msg,
{"a:1", "b", "a:1"});
TestSwapRegularFaninsByPorts("foo_4", true, 0,
1, true, error_msg,
{"b:2", "a", "b:2", "^c", "^d"});
TestSwapRegularFaninsByPorts("foo_4", true, 1,
0, true, error_msg,
{"b:2", "a", "b:2", "^c", "^d"});
TestSwapRegularFaninsByPorts("foo_4", true, 1,
1, true, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
TestSwapRegularFaninsByPorts("foo_4", true, 1,
2, true, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_5', "
"from_port=-1, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_5", true, -1,
0, false, error_msg, {});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_5', "
"from_port=0, to_port=-1) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_5", true, 0,
-1, false, error_msg,
{});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_5', "
"from_port=0, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_5", true, 0,
0, false, error_msg, {});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_5', "
"from_port=0, to_port=1) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_5", true, 0,
1, false, error_msg, {});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_5', "
"from_port=1, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_5", true, 1,
0, false, error_msg, {});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_6', "
"from_port=-1, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_6", true, -1,
0, false, error_msg,
{"^a", "^b"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_6', "
"from_port=0, to_port=-1) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_6", true, 0,
-1, false, error_msg,
{"^a", "^b"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_6', "
"from_port=0, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_6", true, 0,
0, false, error_msg,
{"^a", "^b"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_6', "
"from_port=0, to_port=1) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_6", true, 0,
1, false, error_msg,
{"^a", "^b"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_6', "
"from_port=1, to_port=0) error: no available ports as node has no "
"regular fanins.";
TestSwapRegularFaninsByPorts("foo_6", true, 1,
0, false, error_msg,
{"^a", "^b"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=-1, to_port=0) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, -1,
0, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=0, to_port=-1) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, 0,
-1, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=0, to_port=3) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, 0,
3, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=3, to_port=0) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, 3,
0, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=-1, to_port=3) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, -1,
3, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_3', "
"from_port=3, to_port=-1) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_3", true, 3,
-1, false, error_msg,
{"b", "a:1", "a:1"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=-1, to_port=0) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, -1,
0, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=0, to_port=-1) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, 0,
-1, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=0, to_port=3) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, 0,
3, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=3, to_port=0) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, 3,
0, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=-1, to_port=3) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, -1,
3, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_4', "
"from_port=3, to_port=-1) error: port must be in range [0, 2].";
TestSwapRegularFaninsByPorts("foo_4", true, 3,
-1, false, error_msg,
{"a", "b:2", "b:2", "^c", "^d"});
error_msg =
"MutableGraphView::SwapRegularFaninsByPorts(node_name='foo_missing', "
"from_port=0, to_port=1) error: node 'foo_missing' was not found.";
TestSwapRegularFaninsByPorts("foo_missing", false,
0, 1,
false, error_msg, {});
}
TEST(MutableGraphViewTest, DedupControllingFaninsOnGraphInit) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "Switch", {}, {}), NDef("d", "Identity", {"c:1"}),
NDef("foo_1", "IdentityN", {"a", "b:1", "^b"}),
NDef("foo_2", "IdentityN", {"a", "^b", "^b"}),
NDef("foo_3", "IdentityN", {"a", "b:1", "^b", "^b"}),
NDef("foo_4", "IdentityN", {"a:2", "b:1", "^b", "^b", "^a", "^a"}),
NDef("foo_5", "NotImportant", {"a:2", "b:1", "^b", "^b", "^a", "^a"}),
NDef("foo_6", "Identity", {"d", "^d"}),
NDef("foo_7", "NotImportant",
{"a:3", "b:2", "d", "^d", "^d", "^a", "^b", "^a", "^b"})},
{});
MutableGraphView graph(&graph_def);
EXPECT_EQ(graph.graph()->node_size(), 11);
CheckNode(graph, "a", "NotImportant", "", {}, {},
{"foo_1", "foo_2", "foo_3", "foo_4", "foo_5", "foo_7"});
CheckNode(graph, "b", "NotImportant", "", {}, {},
{"foo_1:1", "^foo_2", "foo_3:1", "foo_4:1", "foo_5:1", "foo_7:1"});
CheckNode(graph, "c", "Switch", "", {}, {}, {"d"});
CheckNode(graph, "d", "Identity", "", {}, {"c:1"},
{"foo_6", "^foo_6", "foo_7:2", "^foo_7"});
CheckNode(graph, "foo_1", "IdentityN", "", {}, {"a", "b:1"}, {});
CheckNode(graph, "foo_2", "IdentityN", "", {}, {"a", "^b"}, {});
CheckNode(graph, "foo_3", "IdentityN", "", {}, {"a", "b:1"}, {});
CheckNode(graph, "foo_4", "IdentityN", "", {}, {"a:2", "b:1"}, {});
CheckNode(graph, "foo_5", "NotImportant", "", {}, {"a:2", "b:1"}, {});
CheckNode(graph, "foo_6", "Identity", "", {}, {"d", "^d"}, {});
CheckNode(graph, "foo_7", "NotImportant", "", {}, {"a:3", "b:2", "d", "^d"},
{});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DedupControllingFaninsOnAddFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"^a"}),
NDef("c", "NotImportant", {"a:1"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddRegularFanin("b", {"a", 2}));
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("c", {"a", Graph::kControlSlot}));
CheckNode(graph, "c", "NotImportant", "", {}, {"a:1"}, {});
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b:0", "c:0"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, NoDedupControllingFaninsOnAddFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "Switch", {}, {}), NDef("b", "Identity", {"a:1"}),
NDef("c", "", {}, {}), NDef("d", "", {}, {})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddRegularFanin("c", {"b", 2}));
CheckNode(graph, "c", "", "", {}, {"b:2"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("c", {"b", Graph::kControlSlot}));
CheckNode(graph, "c", "", "", {}, {"b:2", "^b"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("c", {"b", Graph::kControlSlot}));
CheckNode(graph, "c", "", "", {}, {"b:2", "^b"}, {});
TF_EXPECT_OK(graph.AddRegularFanin("c", {"b", 2}));
CheckNode(graph, "c", "", "", {}, {"b:2", "b:2", "^b"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("d", {"b", Graph::kControlSlot}));
CheckNode(graph, "d", "", "", {}, {"^b"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("d", {"b", Graph::kControlSlot}));
CheckNode(graph, "d", "", "", {}, {"^b"}, {});
CheckNode(graph, "a", "Switch", "", {}, {}, {"b"});
CheckNode(graph, "b", "Identity", "", {}, {"a:1"},
{"c:0", "c:1", "^c", "^d"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DedupControllingFaninsOnAddFaninByPort) {
GraphDef graph_def =
test::function::GDef({NDef("a", "NotImportant", {}, {}),
NDef("b", "NotImportant", {"c", "^a"}),
NDef("c", "NotImportant", {"a:1"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddRegularFaninByPort("b", 0, {"a", 2}));
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2", "c"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("c", {"a", Graph::kControlSlot}));
CheckNode(graph, "c", "NotImportant", "", {}, {"a:1"}, {"b:1"});
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b:0", "c:0"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, NoDedupControllingFaninsOnAddFaninByPort) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "Switch", {}, {}), NDef("b", "Identity", {"a:1"}),
NDef("c", "", {}, {}), NDef("d", "", {"c:2"}, {})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddRegularFaninByPort("d", 1, {"b", 2}));
CheckNode(graph, "d", "", "", {}, {"c:2", "b:2"}, {});
TF_EXPECT_OK(graph.AddControllingFanin("d", {"b", Graph::kControlSlot}));
CheckNode(graph, "d", "", "", {}, {"c:2", "b:2", "^b"}, {});
TF_EXPECT_OK(graph.AddRegularFaninByPort("d", 0, {"b", 2}));
CheckNode(graph, "d", "", "", {}, {"b:2", "c:2", "b:2", "^b"}, {});
CheckNode(graph, "a", "Switch", "", {}, {}, {"b:0"});
CheckNode(graph, "b", "Identity", "", {}, {"a:1"}, {"d:0", "d:2", "^d"});
CheckNode(graph, "c", "", "", {}, {}, {"d:1"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DedupControllingFaninsOnUpdateFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "NotImportant", {"a:1", "^b"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateFanin("c", {"a", 1}, {"b", 2}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"b:2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, NoDedupControllingFaninsOnUpdateFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "Switch", {}, {}), NDef("b", "Identity", {"a:1"}),
NDef("c", "Identity", {"a:2"}), NDef("d", "NotImportant", {"c", "^b"}),
NDef("e", "NotImportant", {"b", "^c"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateFanin("d", {"b", Graph::kControlSlot},
{"c", Graph::kControlSlot}));
CheckNode(graph, "d", "NotImportant", "", {}, {"c", "^c"}, {});
TF_EXPECT_OK(graph.UpdateFanin("e", {"b", 0}, {"c", 3}));
CheckNode(graph, "e", "NotImportant", "", {}, {"c:3", "^c"}, {});
TF_EXPECT_OK(graph.UpdateFanin("e", {"c", 3}, {"c", Graph::kControlSlot}));
CheckNode(graph, "e", "NotImportant", "", {}, {"^c"}, {});
CheckNode(graph, "a", "Switch", "", {}, {}, {"b:0", "c:0"});
CheckNode(graph, "b", "Identity", "", {}, {"a:1"}, {});
CheckNode(graph, "c", "Identity", "", {}, {"a:2"}, {"d:0", "^d", "^e"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DedupControllingFaninsOnUpdateFaninByPort) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "NotImportant", {"a:1", "^b"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateRegularFaninByPort("c", 0, {"b", 2}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"b:2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, NoDedupControllingFaninsOnUpdateFaninByPort) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "Switch", {}, {}), NDef("b", "Identity", {"a:1"}),
NDef("c", "Identity", {"a:2"}), NDef("d", "NotImportant", {"c", "^b"}),
NDef("e", "NotImportant", {"b", "^c"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateRegularFaninByPort("d", 0, {"b", 1}));
CheckNode(graph, "d", "NotImportant", "", {}, {"b:1", "^b"}, {});
TF_EXPECT_OK(graph.UpdateRegularFaninByPort("e", 0, {"c", 2}));
CheckNode(graph, "e", "NotImportant", "", {}, {"c:2", "^c"}, {});
CheckNode(graph, "a", "Switch", "", {}, {}, {"b:0", "c:0"});
CheckNode(graph, "b", "Identity", "", {}, {"a:1"}, {"d:0", "^d"});
CheckNode(graph, "c", "Identity", "", {}, {"a:2"}, {"e:0", "^e"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateMaxRegularOutputPortOnAddFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:1"}),
NDef("c", "NotImportant", {"^b"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddRegularFanin("c", {"a", 3}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:1"}, {"^c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a:3", "^b"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateMaxRegularOutputPortOnRemoveFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:1"}),
NDef("c", "NotImportant", {"a:2"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.RemoveRegularFanin("c", {"a", 2}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:1"}, {});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, KeepMaxRegularOutputPortOnRemoveFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:1"}),
NDef("c", "NotImportant", {"a:2"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.RemoveRegularFanin("b", {"a", 1}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"c"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {});
CheckNode(graph, "c", "NotImportant", "", {}, {"a:2"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateMaxRegularOutputPortOnUpdateFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:1"}),
NDef("c", "NotImportant", {"a:2"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateFanin("c", {"a", 2}, {"b", 3}));
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:1"}, {"c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"b:3"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninMissing) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.AddControllingFanin("a", {"c", Graph::kControlSlot});
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::AddControllingFanin(node_name='a', fanin='^c') error: "
"node 'c' was not found.";
EXPECT_EQ(s.message(), expected_msg);
s = graph.AddControllingFanin("d", {"a", Graph::kControlSlot});
EXPECT_FALSE(s.ok());
expected_msg =
"MutableGraphView::AddControllingFanin(node_name='d', fanin='^a') error: "
"node 'd' was not found.";
EXPECT_EQ(s.message(), expected_msg);
s = graph.AddControllingFanin("c", {"d", Graph::kControlSlot});
EXPECT_FALSE(s.ok());
expected_msg =
"MutableGraphView::AddControllingFanin(node_name='c', fanin='^d') error: "
"node 'c' was not found.";
EXPECT_EQ(s.message(), expected_msg);
ASSERT_EQ(graph.graph()->node_size(), 2);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninExistingControl) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddControllingFanin("a", {"b", Graph::kControlSlot}));
TF_EXPECT_OK(graph.AddControllingFanin("a", {"b", Graph::kControlSlot}));
ASSERT_EQ(graph.graph()->node_size(), 2);
CheckNode(graph, "a", "NotImportant", "", {}, {"^b"}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"^a"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninNotSwitch) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddControllingFanin("a", {"b", 2}));
TF_EXPECT_OK(graph.AddControllingFanin("a", {"b", 2}));
ASSERT_EQ(graph.graph()->node_size(), 2);
CheckNode(graph, "a", "NotImportant", "", {}, {"^b"}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"^a"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninSwitch) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "Switch", {}, {})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.AddControllingFanin("a", {"b", Graph::kControlSlot});
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::AddControllingFanin(node_name='a', fanin='^b') error: "
"can't add fanin '^b' as it will become a Switch control dependency.";
EXPECT_EQ(s.message(), expected_msg);
ASSERT_EQ(graph.graph()->node_size(), 2);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "Switch", "", {}, {}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninSwitchWithIdentity) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("switch", "Switch", {}, {}),
NDef("identity", "Identity", {"switch"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
ASSERT_EQ(graph.graph()->node_size(), 3);
CheckNode(graph, "a", "NotImportant", "", {}, {"^identity"}, {});
CheckNode(graph, "switch", "Switch", "", {}, {}, {"identity"});
CheckNode(graph, "identity", "Identity", "", {}, {"switch"}, {"^a"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninSwitchWithNoExistingIdentity) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}),
NDef("switch", "Switch", {}, {{"T", DT_FLOAT}}, kDevice)},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
ASSERT_EQ(graph.graph()->node_size(), 3);
CheckNode(graph, "a", "NotImportant", "", {},
{"^ConstantFoldingCtrl/switch_0"}, {});
CheckNode(graph, "switch", "Switch", kDevice, {{"T", DT_FLOAT}}, {},
{"ConstantFoldingCtrl/switch_0"});
CheckNode(graph, "ConstantFoldingCtrl/switch_0", "Identity", kDevice,
{{"T", DT_FLOAT}}, {"switch"}, {"^a"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninSwitchWithExistingAddedIdentity) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("switch", "Switch", {}, {}),
NDef("ConstantFoldingCtrl/switch_0", "Identity", {"switch"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
TF_EXPECT_OK(graph.AddControllingFanin("a", {"switch", 0}));
ASSERT_EQ(graph.graph()->node_size(), 3);
CheckNode(graph, "a", "NotImportant", "", {},
{"^ConstantFoldingCtrl/switch_0"}, {});
CheckNode(graph, "switch", "Switch", "", {}, {},
{"ConstantFoldingCtrl/switch_0"});
CheckNode(graph, "ConstantFoldingCtrl/switch_0", "Identity", "", {},
{"switch"}, {"^a"});
CheckGraph(graph);
}
void TestAddControllingFaninSelfLoops(absl::string_view node_name,
const TensorId& fanin,
const string& error_msg) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}),
NDef("b", "Switch", {}, {{"T", DT_FLOAT}}),
NDef("c", "Identity", {"b:0"}), NDef("d", "Identity", {"b:1"}),
NDef("e", "NotImportant", {"^a"})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.AddControllingFanin(node_name, fanin);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), error_msg);
EXPECT_EQ(graph.graph()->node_size(), 5);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"^e"});
CheckNode(graph, "b", "Switch", "", {{"T", DT_FLOAT}}, {}, {"c", "d"});
CheckNode(graph, "c", "Identity", "", {}, {"b"}, {});
CheckNode(graph, "d", "Identity", "", {}, {"b:1"}, {});
CheckNode(graph, "e", "NotImportant", "", {}, {"^a"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, AddControllingFaninSelfLoops) {
string error_msg =
"MutableGraphView::AddControllingFanin(node_name='a', fanin='^a') error: "
"can't add fanin '^a' to self.";
TestAddControllingFaninSelfLoops("a", {"a", Graph::kControlSlot}, error_msg);
error_msg =
"MutableGraphView::AddControllingFanin(node_name='c', fanin='b:0') "
"error: can't add found fanin '^c' to self.";
TestAddControllingFaninSelfLoops("c", {"b", 0}, error_msg);
error_msg =
"MutableGraphView::AddControllingFanin(node_name='d', fanin='b:1') "
"error: can't add found fanin '^d' to self.";
TestAddControllingFaninSelfLoops("d", {"b", 1}, error_msg);
}
TEST(MutableGraphViewTest, AddControllingFaninSelfLoopsGeneratedIdentity) {
GraphDef graph_def =
test::function::GDef({NDef("a", "NotImportant", {}, {}),
NDef("b", "Switch", {}, {{"T", DT_FLOAT}}),
NDef("c", "NotImportant", {}),
NDef("ConstantFoldingCtrl/b_1", "Identity", {})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.AddControllingFanin("ConstantFoldingCtrl/b_1", {"b", 1});
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::AddControllingFanin(node_name='ConstantFoldingCtrl/"
"b_1', fanin='b:1') error: can't add generated fanin "
"'^ConstantFoldingCtrl/b_1' to self.";
EXPECT_EQ(s.message(), expected_msg);
EXPECT_EQ(graph.graph()->node_size(), 4);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "Switch", "", {{"T", DT_FLOAT}}, {}, {});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {});
CheckNode(graph, "ConstantFoldingCtrl/b_1", "Identity", "", {}, {}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveControllingFaninMissing) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "NotImportant", {}, {}),
NDef("d", "NotImportant", {"^a", "^b"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.RemoveControllingFanin("d", "c"));
ASSERT_EQ(graph.graph()->node_size(), 4);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"^d"});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"^d"});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {});
CheckNode(graph, "d", "NotImportant", "", {}, {"^a", "^b"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveControllingFaninExisting) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {}, {}),
NDef("c", "NotImportant", {}, {}),
NDef("d", "NotImportant", {"^a", "^b", "^c"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.RemoveControllingFanin("d", "a"));
TF_EXPECT_OK(graph.RemoveControllingFanin("d", "a"));
ASSERT_EQ(graph.graph()->node_size(), 4);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "b", "NotImportant", "", {}, {}, {"^d"});
CheckNode(graph, "c", "NotImportant", "", {}, {}, {"^d"});
CheckNode(graph, "d", "NotImportant", "", {}, {"^c", "^b"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveControllingFaninOnRegularFanin) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a"}),
NDef("c", "NotImportant", {"a", "b"})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.RemoveControllingFanin("c", "a"));
TF_EXPECT_OK(graph.RemoveControllingFanin("c", "b"));
ASSERT_EQ(graph.graph()->node_size(), 3);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a"}, {"c:1"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a", "b"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, RemoveControllingFaninSelfLoop) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a"}),
NDef("c", "NotImportant", {"a", "b"})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.RemoveControllingFanin("c", "c");
EXPECT_FALSE(s.ok());
string expected_msg =
"MutableGraphView::RemoveControllingFanin(node_name='c', "
"fanin_node_name='c') error: can't remove fanin '^c' from "
"self.";
EXPECT_EQ(s.message(), expected_msg);
ASSERT_EQ(graph.graph()->node_size(), 3);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a"}, {"c:1"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a", "b"}, {});
CheckGraph(graph);
}
void TestUpdateAllRegularFaninsToControlling(
absl::string_view node_name, bool node_exists, bool success,
const string& error_msg, absl::Span<const string> expected_fanins) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}),
NDef("switch", "Switch", {}, {{"T", DT_FLOAT}}, kDevice),
NDef("b", "NotImportant", {"switch:1"}, {}),
NDef("ConstantFoldingCtrl/switch_1", "Identity", {"switch:1"},
{{"T", DT_FLOAT}}, kDevice),
NDef("c", "NotImportant", {"a", "^b"}, {}),
NDef("d", "NotImportant", {"b", "c"}, {}),
NDef("e", "NotImportant", {"^d"}, {})},
{});
MutableGraphView graph(&graph_def);
NodeDef* node = graph.GetNode(node_name);
if (node_exists) {
EXPECT_NE(node, nullptr);
} else {
EXPECT_EQ(node, nullptr);
}
absl::flat_hash_map<string, std::vector<string>> unmodified_node_inputs =
GetNodeInputsFromGraph(graph_def, node_name);
Status s = graph.UpdateAllRegularFaninsToControlling(node_name);
EXPECT_EQ(s.ok(), success);
if (!success) {
EXPECT_EQ(s.message(), error_msg);
}
if (node_exists) {
CompareNodeFanins(graph, node, expected_fanins);
}
CheckUnmodifiedNodeFanins(graph_def, node_name, unmodified_node_inputs);
CheckGraph(graph);
}
TEST(MutableGraphViewTest, UpdateAllRegularFaninsToControlling) {
string error_msg;
TestUpdateAllRegularFaninsToControlling("a", true,
true, error_msg, {});
TestUpdateAllRegularFaninsToControlling("c", true,
true, error_msg,
{"^a", "^b"});
TestUpdateAllRegularFaninsToControlling("d", true,
true, error_msg,
{"^b", "^c"});
TestUpdateAllRegularFaninsToControlling("e", true,
true, error_msg, {"^d"});
TestUpdateAllRegularFaninsToControlling("b", true,
true, error_msg,
{"^ConstantFoldingCtrl/switch_1"});
error_msg =
"MutableGraphView::UpdateAllRegularFaninsToControlling(node_name='f') "
"error: node 'f' was not found.";
TestUpdateAllRegularFaninsToControlling("f", false,
false, error_msg, {});
error_msg =
"MutableGraphView::UpdateAllRegularFaninsToControlling(node_name='"
"ConstantFoldingCtrl/switch_1') error: can't add found fanin "
"'^ConstantFoldingCtrl/switch_1' to self.";
TestUpdateAllRegularFaninsToControlling("ConstantFoldingCtrl/switch_1",
true,
false, error_msg,
{"switch:1"});
}
TEST(MutableGraphViewTest, UpdateAllRegularFaninsToControllingConsumingSwitch) {
constexpr char kDevice[] = "/device:foo:0";
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}),
NDef("switch", "Switch", {}, {{"T", DT_FLOAT}}, kDevice),
NDef("b", "NotImportant", {"switch:1"}, {})},
{});
MutableGraphView graph(&graph_def);
TF_EXPECT_OK(graph.UpdateAllRegularFaninsToControlling("b"));
EXPECT_EQ(graph.graph()->node_size(), 4);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {});
CheckNode(graph, "switch", "Switch", kDevice, {{"T", DT_FLOAT}}, {},
{"ConstantFoldingCtrl/switch_1"});
CheckNode(graph, "b", "NotImportant", "", {},
{"^ConstantFoldingCtrl/switch_1"}, {});
CheckNode(graph, "ConstantFoldingCtrl/switch_1", "Identity", kDevice,
{{"T", DT_FLOAT}}, {"switch:1"}, {"^b"});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DeleteNodes) {
GraphDef graph_def = test::function::GDef(
{NDef("bar", "NotImportant", {}, {}),
NDef("other", "NotImportant", {}, {}),
NDef("foo_1", "NotImportant", {"bar", "other", "bar:1", "^bar"}),
NDef("foo_2", "NotImportant", {"other:1", "bar:2", "^bar"})},
{});
MutableGraphView graph(&graph_def);
EXPECT_NE(graph.GetNode("foo_1"), nullptr);
TF_EXPECT_OK(graph.DeleteNodes({"foo_1"}));
EXPECT_EQ(graph.graph()->node_size(), 3);
EXPECT_EQ(graph.GetNode("foo_1"), nullptr);
CheckNode(graph, "bar", "NotImportant", "", {}, {}, {"foo_2:1"});
CheckNode(graph, "other", "NotImportant", "", {}, {}, {"foo_2"});
CheckNode(graph, "foo_2", "NotImportant", "", {}, {"other:1", "bar:2"}, {});
CheckGraph(graph);
}
GraphDef SimpleDeleteNodeGraph() {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:2"}),
NDef("c", "NotImportant", {"a:5", "^b"}), NDef("d", "NotImportant", {}),
NDef("e", "NotImportant", {"d:2"}),
NDef("f", "NotImportant", {"d:3", "^e"})},
{});
return graph_def;
}
TEST(MutableGraphViewTest, DeleteNodesWithFanoutsBeingDeleted) {
GraphDef graph_def = SimpleDeleteNodeGraph();
MutableGraphView graph(&graph_def);
EXPECT_NE(graph.GetNode("a"), nullptr);
EXPECT_NE(graph.GetNode("b"), nullptr);
EXPECT_NE(graph.GetNode("c"), nullptr);
TF_EXPECT_OK(graph.DeleteNodes({"c", "a", "b"}));
EXPECT_EQ(graph.graph()->node_size(), 3);
EXPECT_EQ(graph.GetNode("a"), nullptr);
EXPECT_EQ(graph.GetNode("b"), nullptr);
EXPECT_EQ(graph.GetNode("c"), nullptr);
CheckNode(graph, "d", "NotImportant", "", {}, {}, {"e", "f"});
CheckNode(graph, "e", "NotImportant", "", {}, {"d:2"}, {"^f"});
CheckNode(graph, "f", "NotImportant", "", {}, {"d:3", "^e"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DeleteMissingNodes) {
GraphDef graph_def = SimpleDeleteNodeGraph();
MutableGraphView graph(&graph_def);
EXPECT_EQ(graph.GetNode("g"), nullptr);
EXPECT_EQ(graph.GetNode("h"), nullptr);
TF_EXPECT_OK(graph.DeleteNodes({"g", "h"}));
EXPECT_EQ(graph.graph()->node_size(), 6);
EXPECT_EQ(graph.GetNode("g"), nullptr);
EXPECT_EQ(graph.GetNode("h"), nullptr);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2"}, {"^c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a:5", "^b"}, {});
CheckNode(graph, "d", "NotImportant", "", {}, {}, {"e", "f"});
CheckNode(graph, "e", "NotImportant", "", {}, {"d:2"}, {"^f"});
CheckNode(graph, "f", "NotImportant", "", {}, {"d:3", "^e"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DeleteMissingNodesAndNodesWithFanoutsBeingDeleted) {
GraphDef graph_def = SimpleDeleteNodeGraph();
MutableGraphView graph(&graph_def);
EXPECT_NE(graph.GetNode("d"), nullptr);
EXPECT_NE(graph.GetNode("e"), nullptr);
EXPECT_NE(graph.GetNode("f"), nullptr);
TF_EXPECT_OK(graph.DeleteNodes({"d", "e", "f", "g", "h"}));
EXPECT_EQ(graph.graph()->node_size(), 3);
EXPECT_EQ(graph.GetNode("d"), nullptr);
EXPECT_EQ(graph.GetNode("e"), nullptr);
EXPECT_EQ(graph.GetNode("f"), nullptr);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2"}, {"^c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a:5", "^b"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DeleteNodesWithError) {
GraphDef graph_def = SimpleDeleteNodeGraph();
MutableGraphView graph(&graph_def);
Status s = graph.DeleteNodes({"b", "a"});
EXPECT_FALSE(s.ok());
string error_msg =
"MutableGraphView::DeleteNodes(nodes_to_delete={a, b}) error: can't "
"delete node(s) with retained fanouts(s) [a, b].";
EXPECT_EQ(s.message(), error_msg);
EXPECT_EQ(graph.graph()->node_size(), 6);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "c"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2"}, {"^c"});
CheckNode(graph, "c", "NotImportant", "", {}, {"a:5", "^b"}, {});
CheckNode(graph, "d", "NotImportant", "", {}, {}, {"e", "f"});
CheckNode(graph, "e", "NotImportant", "", {}, {"d:2"}, {"^f"});
CheckNode(graph, "f", "NotImportant", "", {}, {"d:3", "^e"}, {});
CheckGraph(graph);
}
TEST(MutableGraphViewTest, DeleteNodesWithLargeError) {
GraphDef graph_def = test::function::GDef(
{NDef("a", "NotImportant", {}, {}), NDef("b", "NotImportant", {"a:2"}),
NDef("c", "NotImportant", {"^b"}), NDef("d", "NotImportant", {"c:6"}),
NDef("e", "NotImportant", {"d:2"}),
NDef("f", "NotImportant", {"d:3", "^e"}),
NDef("g", "NotImportant", {"f"}), NDef("h", "NotImportant", {"a"}),
NDef("i", "NotImportant", {"b"}), NDef("j", "NotImportant", {"c"}),
NDef("k", "NotImportant", {"d"}), NDef("l", "NotImportant", {"e"}),
NDef("m", "NotImportant", {"f"})},
{});
MutableGraphView graph(&graph_def);
Status s = graph.DeleteNodes({"a", "b", "c", "d", "e", "f"});
EXPECT_FALSE(s.ok());
string error_msg =
"MutableGraphView::DeleteNodes(nodes_to_delete={a, b, c, d, e, ...}) "
"error: can't delete node(s) with retained fanouts(s) [a, b, c, d, e, "
"...].";
EXPECT_EQ(s.message(), error_msg);
EXPECT_EQ(graph.graph()->node_size(), 13);
CheckNode(graph, "a", "NotImportant", "", {}, {}, {"b", "h"});
CheckNode(graph, "b", "NotImportant", "", {}, {"a:2"}, {"^c", "i"});
CheckNode(graph, "c", "NotImportant", "", {}, {"^b"}, {"d", "j"});
CheckNode(graph, "d", "NotImportant", "", {}, {"c:6"}, {"e", "f", "k"});
CheckNode(graph, "e", "NotImportant", "", {}, {"d:2"}, {"^f", "l"});
CheckNode(graph, "f", "NotImportant", "", {}, {"d:3", "^e"}, {"g", "m"});
CheckNode(graph, "g", "NotImportant", "", {}, {"f"}, {});
CheckNode(graph, "h", "NotImportant", "", {}, {"a"}, {});
CheckNode(graph, "i", "NotImportant", "", {}, {"b"}, {});
CheckNode(graph, "j", "NotImportant", "", {}, {"c"}, {});
CheckNode(graph, "k", "NotImportant", "", {}, {"d"}, {});
CheckNode(graph, "l", "NotImportant", "", {}, {"e"}, {});
CheckNode(graph, "m", "NotImportant", "", {}, {"f"}, {});
CheckGraph(graph);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/mutable_graph_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/mutable_graph_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
302cc88e-5ab4-4ce3-bf18-b8cadb005346 | cpp | tensorflow/tensorflow | graph_topology_view | tensorflow/core/grappler/graph_topology_view.cc | tensorflow/core/grappler/graph_topology_view_test.cc | #include "tensorflow/core/grappler/graph_topology_view.h"
#include <algorithm>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges) {
if (graph_ != nullptr) {
return errors::InvalidArgument("GraphTopologyView is already initialized.");
}
graph_ = &graph;
num_nodes_ = graph.node_size();
index_to_node_name_.resize(num_nodes_);
node_name_to_index_.rehash(num_nodes_);
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
index_to_node_name_.emplace_back(node.name());
}
for (const GraphView::Edge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
const int src_idx = src->second;
const int dst_idx = dst->second;
if (ignore_control_edges && (src_idx < 0 || dst_idx < 0)) {
continue;
}
fanins_[dst_idx].push_back(src_idx);
fanouts_[src_idx].push_back(dst_idx);
}
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
fanins_[node_idx].reserve(node.input_size());
for (const string& input : node.input()) {
TensorId tensor = ParseTensorName(input);
if (ignore_control_edges && IsTensorIdControl(tensor)) {
continue;
}
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat("Non-existent input ", input,
" in node ", node.name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
fanins_[node_idx].push_back(input_idx);
fanouts_[input_idx].push_back(node_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_idx]);
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
SortAndRemoveDuplicates(&fanouts_[node_idx]);
}
return absl::OkStatus();
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges) {
return InitializeFromGraph(graph, ephemeral_edges,
false);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph,
bool ignore_control_edges) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
ignore_control_edges);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
false);
}
bool GraphTopologyView::HasNode(const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it != node_name_to_index_.end();
}
const NodeDef* GraphTopologyView::GetNode(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it == node_name_to_index_.end() ? nullptr : &graph_->node(it->second);
}
const NodeDef* GraphTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &graph_->node(node_idx);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
DCHECK(it != node_name_to_index_.end()) << "Node doesn't exist in a graph";
return it == node_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const NodeDef& node) const {
return GetNodeIndex(node.name());
}
const absl::InlinedVector<int, 4>& GraphTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
}
} | #include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class GraphTopologyViewTest : public ::testing::Test {
protected:
using NodeConfig = std::pair<string, std::vector<string>>;
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
const auto& node_name = node.first;
const auto& node_inputs = node.second;
NodeDef node_def;
node_def.set_name(node_name);
for (const string& input : node_inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(GraphTopologyViewTest, SimpleGraph) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
const NodeDef* a_by_name = graph_view.GetNode("a");
const NodeDef* a_by_idx = graph_view.GetNode(0);
ASSERT_TRUE(a_by_name);
ASSERT_TRUE(a_by_idx);
EXPECT_EQ(a_by_name, a_by_idx);
const NodeDef* b_by_name = graph_view.GetNode("b");
const NodeDef* b_by_idx = graph_view.GetNode(1);
ASSERT_TRUE(b_by_name);
ASSERT_TRUE(b_by_idx);
EXPECT_EQ(b_by_name, b_by_idx);
const absl::optional<int> b_idx = graph_view.GetNodeIndex(*b_by_name);
ASSERT_TRUE(b_idx.has_value());
EXPECT_EQ(b_idx.value(), 1);
const absl::optional<int> c_idx = graph_view.GetNodeIndex("c");
ASSERT_TRUE(c_idx.has_value());
EXPECT_EQ(c_idx.value(), 2);
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(0), Fanin());
EXPECT_EQ(graph_view.GetFanin(1), Fanin());
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(0), Fanout({2, 3}));
EXPECT_EQ(graph_view.GetFanout(1), Fanout({2}));
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout());
}
TEST_F(GraphTopologyViewTest, GraphWithALoop) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "d"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
TEST_F(GraphTopologyViewTest, GraphWithControls) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "^d"}},
{"d", {"a", "c"}},
});
{
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
{
GraphTopologyView graph_view;
TF_CHECK_OK(
graph_view.InitializeFromGraph(graph, true));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_topology_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_topology_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb3eda25-8fdd-4ad9-89dd-5e546b6e1c89 | cpp | tensorflow/tensorflow | grappler_item | tensorflow/core/grappler/grappler_item.cc | tensorflow/core/grappler/grappler_item_test.cc | #include "tensorflow/core/grappler/grappler_item.h"
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
GrapplerItem::OptimizationOptions CreateOptOptionsForEager() {
GrapplerItem::OptimizationOptions optimization_options;
optimization_options.allow_pruning_stateful_and_dataset_ops = true;
optimization_options.is_eager_mode = true;
optimization_options.optimize_function_library = false;
return optimization_options;
}
GrapplerItem GrapplerItem::WithGraph(GraphDef&& graph_def) const {
GrapplerItem item;
item.id = id;
item.feed = feed;
item.fetch = fetch;
item.init_ops = init_ops;
item.keep_ops = keep_ops;
item.expected_init_time = expected_init_time;
item.save_op = save_op;
item.restore_op = restore_op;
item.save_restore_loc_tensor = save_restore_loc_tensor;
item.queue_runners = queue_runners;
item.devices_ = devices_;
item.optimization_options_ = optimization_options_;
item.graph.Swap(&graph_def);
return item;
}
std::vector<const NodeDef*> GrapplerItem::MainOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::EnqueueOpsFanin() const {
std::vector<string> enqueue_ops;
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
enqueue_ops.push_back(enqueue_op);
}
}
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, fetch, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::InitOpsFanin() const {
std::vector<const NodeDef*> fanin_nodes;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin_nodes));
return fanin_nodes;
}
std::vector<const NodeDef*> GrapplerItem::MainVariables() const {
std::vector<const NodeDef*> fanin;
TF_CHECK_OK(ComputeTransitiveFanin(graph, init_ops, &fanin));
std::vector<const NodeDef*> vars;
for (const NodeDef* node : fanin) {
if (IsVariable(*node)) {
vars.push_back(node);
}
}
return vars;
}
std::unordered_set<string> GrapplerItem::NodesToPreserve() const {
std::unordered_set<string> result;
for (const string& f : fetch) {
VLOG(1) << "Add fetch " << f;
result.insert(NodeName(f));
}
for (const auto& f : feed) {
VLOG(1) << "Add feed " << f.first;
result.insert(NodeName(f.first));
}
for (const auto& node : init_ops) {
result.insert(NodeName(node));
}
for (const auto& node : keep_ops) {
result.insert(NodeName(node));
}
if (!save_op.empty()) {
result.insert(NodeName(save_op));
}
if (!restore_op.empty()) {
result.insert(NodeName(restore_op));
}
if (!save_restore_loc_tensor.empty()) {
result.insert(NodeName(save_restore_loc_tensor));
}
for (const auto& queue_runner : queue_runners) {
for (const string& enqueue_op : queue_runner.enqueue_op_name()) {
result.insert(NodeName(enqueue_op));
}
if (!queue_runner.close_op_name().empty()) {
result.insert(NodeName(queue_runner.close_op_name()));
}
if (!queue_runner.cancel_op_name().empty()) {
result.insert(NodeName(queue_runner.cancel_op_name()));
}
}
absl::optional<FunctionLibraryDefinition> fn_library;
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops) {
fn_library.emplace(OpRegistry::Global(), graph.library());
}
for (const NodeDef& node : graph.node()) {
const auto attrs = AttrSlice(&node.attr());
if (!optimization_options_.allow_pruning_stateful_and_dataset_ops &&
(IsStateful(node, &*fn_library) || IsDataset(node))) {
result.insert(node.name());
}
bool do_not_remove;
if (TryGetNodeAttr(attrs, "_grappler_do_not_remove", &do_not_remove) &&
do_not_remove) {
result.insert(node.name());
}
}
return result;
}
const std::unordered_set<string>& GrapplerItem::devices() const {
return devices_;
}
Status GrapplerItem::AddDevice(const string& device) {
DeviceNameUtils::ParsedName name;
if (!DeviceNameUtils::ParseFullName(device, &name)) {
return errors::InvalidArgument("Invalid device name: device=", device);
} else if (!name.has_job || !name.has_replica || !name.has_task ||
!name.has_type || !name.has_id) {
return errors::InvalidArgument("Not a fully defined device name: device=",
device);
}
devices_.insert(DeviceNameUtils::ParsedNameToString(name));
return absl::OkStatus();
}
Status GrapplerItem::AddDevices(const GrapplerItem& other) {
std::vector<absl::string_view> invalid_devices;
for (const string& device : other.devices()) {
Status added = AddDevice(device);
if (!added.ok()) invalid_devices.emplace_back(device);
}
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
Status GrapplerItem::InferDevicesFromGraph() {
absl::flat_hash_set<absl::string_view> invalid_devices;
for (const NodeDef& node : graph.node()) {
Status added = AddDevice(node.device());
if (!added.ok()) invalid_devices.insert(node.device());
}
VLOG(2) << "Inferred device set: [" << absl::StrJoin(devices_, ", ") << "]";
return invalid_devices.empty()
? absl::OkStatus()
: errors::InvalidArgument("Skipped invalid devices: [",
absl::StrJoin(invalid_devices, ", "),
"]");
}
void GrapplerItem::ClearDevices() { devices_.clear(); }
const GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options()
const {
return optimization_options_;
}
GrapplerItem::OptimizationOptions& GrapplerItem::optimization_options() {
return optimization_options_;
}
}
} | #include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemTest : public ::testing::Test {};
TEST_F(GrapplerItemTest, Basic) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {{"CPU:0"}});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
EXPECT_TRUE(item.InitOpsFanin().empty());
std::vector<string> graph_nodes;
for (const auto& node : item.graph.node()) {
graph_nodes.push_back(node.name());
}
std::vector<string> main_ops;
for (const auto& node : item.MainOpsFanin()) {
main_ops.push_back(node->name());
}
std::sort(graph_nodes.begin(), graph_nodes.end());
std::sort(main_ops.begin(), main_ops.end());
EXPECT_EQ(main_ops, graph_nodes);
}
TEST_F(GrapplerItemTest, InferDevices) {
using test::function::NDef;
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
const string cpu2 = "/device:CPU:2";
GrapplerItem item;
item.graph = test::function::GDef(
{
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu2),
},
{} );
ASSERT_FALSE(item.InferDevicesFromGraph().ok());
EXPECT_EQ(item.devices().size(), 2);
EXPECT_NE(item.devices().find(cpu0), item.devices().end());
EXPECT_NE(item.devices().find(cpu1), item.devices().end());
item.ClearDevices();
EXPECT_EQ(item.devices().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c4ce22d-cbf0-4e42-b3eb-255b77c035b5 | cpp | tensorflow/tensorflow | grappler_item_builder | tensorflow/core/grappler/grappler_item_builder.cc | tensorflow/core/grappler/grappler_item_builder_test.cc | #include "tensorflow/core/grappler/grappler_item_builder.h"
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
void InitializeTensor(DataType type, Tensor* tensor) {
const int period = 7;
if (type == DT_FLOAT) {
auto flat = tensor->flat<float>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = static_cast<float>(i % period) / 10.0f;
}
} else if (type == DT_INT64) {
auto flat = tensor->flat<int64_t>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = i % period;
}
} else if (type != DT_STRING && type != DT_RESOURCE && type != DT_VARIANT) {
memset(const_cast<char*>(tensor->tensor_data().data()), 0,
tensor->tensor_data().size());
}
}
Status PruneGraph(GrapplerItem* item) {
ModelPruner pruner;
GraphDef pruned_graph;
Cluster* cluster = nullptr;
TF_RETURN_IF_ERROR(pruner.Optimize(cluster, *item, &pruned_graph));
item->graph = std::move(pruned_graph);
return absl::OkStatus();
}
Status ReplaceUnknownShapeDim(const ItemConfig& cfg,
const TensorShapeProto& shape_pb_in,
TensorShapeProto* shape_pb_out,
TensorShape* shape_out) {
std::vector<int32> dims;
for (const auto& dim_proto : shape_pb_in.dim()) {
if (cfg.placeholder_unknown_output_shape_dim >= 0 &&
dim_proto.size() == -1) {
dims.push_back(cfg.placeholder_unknown_output_shape_dim);
shape_pb_out->add_dim()->set_size(
cfg.placeholder_unknown_output_shape_dim);
} else {
dims.push_back(std::max<int32>(1, dim_proto.size()));
shape_pb_out->add_dim()->set_size(dim_proto.size());
}
}
return TensorShapeUtils::MakeShape(dims.data(), dims.size(), shape_out);
}
Status UpdatePlaceholderShape(
const ItemConfig& cfg,
const std::unordered_set<string>& signature_feed_nodes,
GrapplerItem* new_item, NodeDef* node) {
if (node->attr().count("dtype") == 0) {
return absl::InternalError(absl::StrCat("Unknown type for placeholder ",
node->name(),
", skipping this input"));
}
DataType type = node->attr().at("dtype").type();
if (node->attr().count("shape") == 0) {
return absl::InternalError(absl::StrCat("Unknown shape for placeholder ",
node->name(),
", skipping this input"));
}
TensorShape shape;
TensorShapeProto shape_proto;
Status make_shape_status = ReplaceUnknownShapeDim(
cfg, node->attr().at("shape").shape(), &shape_proto, &shape);
if (!make_shape_status.ok()) {
return absl::InternalError(
absl::StrCat("Invalid shape for placeholder ", node->name(), ": ",
make_shape_status.ToString(), ", skipping this input"));
}
if ((cfg.placeholder_unknown_output_shape_dim >= 0) && (shape.dims() == 0) &&
(node->attr().count("_output_shapes") == 1)) {
const auto& output_shapes =
node->attr().at("_output_shapes").list().shape(0);
if (output_shapes.dim_size() != 0) {
shape.Clear();
shape_proto.clear_dim();
for (const auto& dim : output_shapes.dim()) {
auto size = dim.size();
if (size == -1) size = cfg.placeholder_unknown_output_shape_dim;
TF_RETURN_IF_ERROR(shape.AddDimWithStatus(size));
shape_proto.add_dim()->set_size(size);
}
}
}
Tensor fake_input(type, shape);
InitializeTensor(type, &fake_input);
if (cfg.feed_nodes.empty()) {
if (signature_feed_nodes.count(node->name()) == 0) {
new_item->feed.emplace_back(node->name(), fake_input);
}
} else if (cfg.feed_nodes.count(node->name()) > 0) {
auto it = find_if(new_item->feed.begin(), new_item->feed.end(),
[&node](std::pair<string, Tensor>& f) {
return f.first == node->name();
});
DCHECK(it != new_item->feed.end());
it->second = fake_input;
}
if (!shape_proto.dim().empty())
*(node->mutable_attr()->at("shape").mutable_shape()) = shape_proto;
return absl::OkStatus();
}
}
Status RuntimeGraphOptimizer(const GraphDef& graph_def_arg,
GraphDef* output_graph_def,
const ItemConfig& cfg) {
if (!cfg.apply_optimizations && !cfg.inline_functions &&
!cfg.erase_noinline_attributes) {
if (output_graph_def != &graph_def_arg) {
*output_graph_def = graph_def_arg;
}
return absl::OkStatus();
}
SessionOptions options;
GraphDef graph_def(graph_def_arg);
if (cfg.erase_noinline_attributes) {
for (auto& func : *graph_def.mutable_library()->mutable_function()) {
func.mutable_attr()->erase("_noinline");
}
}
std::vector<std::unique_ptr<Device>> devices;
DeviceFactory* cpu_factory = DeviceFactory::GetFactory("CPU");
TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(
options, "/job:localhost/replica:0/task:0", &devices));
Device* cpu_device = devices[0].get();
auto dvc_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph_def.library());
Env* env = Env::Default();
OptimizerOptions* optimizer_opts =
options.config.mutable_graph_options()->mutable_optimizer_options();
if (cfg.apply_optimizations) {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L1);
} else {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L0);
}
optimizer_opts->set_do_function_inlining(cfg.inline_functions);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(dvc_mgr.get(), env, &options.config,
graph_def.versions().producer(),
&function_library, *optimizer_opts));
FunctionLibraryRuntime* flr = pflr->GetFLR(cpu_device->name());
GraphConstructorOptions graph_ctor_opts;
graph_ctor_opts.allow_internal_ops = true;
graph_ctor_opts.expect_device_spec = false;
std::unique_ptr<Graph> graphptr(new Graph(function_library));
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
graph_ctor_opts, std::move(graph_def), graphptr.get()));
::tensorflow::GraphOptimizer optimizer(*optimizer_opts);
optimizer.Optimize(flr, env, cpu_device, &graphptr,
tensorflow::GraphOptimizer::Options());
graphptr->ToGraphDef(output_graph_def);
return AddDefaultAttrsToGraphDef(output_graph_def, *graphptr->op_registry(),
0, true);
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDef(
const string& id, const MetaGraphDef& meta_graph, const ItemConfig& cfg) {
if (id.empty()) {
LOG(ERROR) << "id must be non-empty.";
return nullptr;
}
std::unique_ptr<GrapplerItem> new_item(new GrapplerItem());
new_item->id = id;
new_item->graph = meta_graph.graph_def();
for (const auto& feed_node : cfg.feed_nodes) {
const string feed_name = NodeName(feed_node);
new_item->feed.emplace_back(feed_name, Tensor());
}
for (const auto& fetch_node : cfg.fetch_nodes) {
new_item->fetch.emplace_back(NodeName(fetch_node));
}
if (new_item->fetch.empty() &&
meta_graph.collection_def().count("train_op") > 0) {
const CollectionDef& nodes = meta_graph.collection_def().at("train_op");
if (nodes.has_node_list()) {
for (const auto& node : nodes.node_list().value()) {
new_item->fetch.push_back(NodeName(node));
}
}
}
std::unordered_set<string> signature_feed_nodes;
std::unordered_set<string> signature_fetch_nodes;
for (const auto& name_and_signature : meta_graph.signature_def()) {
for (const auto& name_and_input : name_and_signature.second.inputs()) {
const TensorInfo& input = name_and_input.second;
if (input.has_coo_sparse()) {
int64_t dim = std::max(1, cfg.placeholder_unknown_output_shape_dim);
TensorShape shape_1d({dim});
TensorShape shape_2d({dim, dim});
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().values_tensor_name()))) {
Tensor value_tensor(input.dtype(), shape_1d);
InitializeTensor(input.dtype(), &value_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().values_tensor_name()), value_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().indices_tensor_name()))) {
Tensor indices_tensor(DT_INT64, shape_2d);
InitializeTensor(input.dtype(), &indices_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().indices_tensor_name()),
indices_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().dense_shape_tensor_name()))) {
Tensor dense_shape_tensor(DT_INT64, shape_1d);
InitializeTensor(input.dtype(), &dense_shape_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().dense_shape_tensor_name()),
dense_shape_tensor);
}
} else {
if (gtl::InsertIfNotPresent(&signature_feed_nodes,
NodeName(input.name()))) {
TensorShape shape;
TensorShapeProto shape_proto;
Status s = ReplaceUnknownShapeDim(cfg, input.tensor_shape(),
&shape_proto, &shape);
if (!s.ok()) {
LOG(ERROR) << "Invalid shape for signature input " << input.name()
<< ": " << s << ", skipping this input";
return nullptr;
}
Tensor fake_input(input.dtype(), shape);
InitializeTensor(input.dtype(), &fake_input);
new_item->feed.emplace_back(NodeName(input.name()), fake_input);
}
}
}
for (const auto& name_and_output : name_and_signature.second.outputs()) {
const TensorInfo& output = name_and_output.second;
if (output.has_coo_sparse()) {
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().values_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().values_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().indices_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().indices_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().dense_shape_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().dense_shape_tensor_name()));
}
} else {
if (gtl::InsertIfNotPresent(&signature_fetch_nodes,
NodeName(output.name()))) {
new_item->fetch.push_back(NodeName(output.name()));
}
}
}
}
for (const auto& feed : new_item->feed) {
if (feed.first.empty()) {
LOG(ERROR) << "Invalid feed node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use feed node " << feed.first;
}
}
for (const auto& fetch : new_item->fetch) {
if (fetch.empty()) {
LOG(ERROR) << "Invalid fetch node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use fetch node " << fetch;
}
}
if (new_item->fetch.empty()) {
LOG(ERROR) << "Failed to detect the fetch node(s), skipping this input";
return nullptr;
}
for (const string& var_collection :
{"variables", "local_variables", "model_variables",
"trainable_variables"}) {
if (meta_graph.collection_def().count(var_collection) == 0) {
continue;
}
const CollectionDef& vars = meta_graph.collection_def().at(var_collection);
for (const auto& raw_var : vars.bytes_list().value()) {
VariableDef var;
var.ParseFromString(raw_var);
if (!var.initializer_name().empty()) {
new_item->init_ops.push_back(NodeName(var.initializer_name()));
}
}
}
if (meta_graph.collection_def().count("table_initializer") > 0) {
const CollectionDef& inits =
meta_graph.collection_def().at("table_initializer");
if (inits.has_node_list()) {
for (const auto& node : inits.node_list().value()) {
new_item->init_ops.push_back(NodeName(node));
new_item->expected_init_time += 30 * 60;
}
}
}
std::unordered_map<string, string> asset_node_to_value;
if (!cfg.assets_directory_override.empty()) {
if (meta_graph.collection_def().count("saved_model_assets") > 0) {
const CollectionDef& collection =
meta_graph.collection_def().at("saved_model_assets");
const auto& any_assets = collection.any_list().value();
if (!any_assets.empty()) {
if (std::is_base_of<protobuf::Message, AssetFileDef>()) {
for (const auto& any_asset : any_assets) {
AssetFileDef asset_file_def;
if (!ParseAny(any_asset, &asset_file_def, "tensorflow.AssetFileDef")
.ok()) {
LOG(ERROR) << "Failed to parse AssetFile.";
continue;
}
string asset_filepath = io::JoinPath(cfg.assets_directory_override,
asset_file_def.filename());
if (!FilesExist({asset_filepath}, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files "
<< asset_filepath << ", skipping this input";
return nullptr;
}
asset_node_to_value[NodeName(asset_file_def.tensor_info().name())] =
asset_filepath;
}
} else {
LOG(ERROR) << "Can't parse AssetFileDef when using lite protos.";
return nullptr;
}
}
}
} else if (meta_graph.collection_def().count("asset_filepaths") > 0) {
const CollectionDef& file_paths =
meta_graph.collection_def().at("asset_filepaths");
std::vector<string> paths;
for (const auto& raw_path : file_paths.bytes_list().value()) {
paths.push_back(raw_path);
}
if (!FilesExist(paths, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files, skipping "
"this input";
return nullptr;
}
}
if (meta_graph.collection_def().count("queue_runners") > 0) {
const CollectionDef& vars = meta_graph.collection_def().at("queue_runners");
for (const auto& raw : vars.bytes_list().value()) {
QueueRunnerDef queue_runner;
if (!queue_runner.ParseFromString(raw)) {
LOG(ERROR) << "Could not parse queue_runners, skipping this input";
return nullptr;
}
if (queue_runner.cancel_op_name().empty()) {
LOG(ERROR) << "Queue without a cancel op, skipping this input";
return nullptr;
}
new_item->queue_runners.push_back(queue_runner);
}
}
for (const auto& col : meta_graph.collection_def()) {
const CollectionDef& collection = col.second;
for (const string& node : collection.node_list().value()) {
new_item->keep_ops.push_back(NodeName(node));
}
}
for (auto& node : *new_item->graph.mutable_node()) {
if (IsPlaceholder(node) && node.op() != "PlaceholderWithDefault") {
Status s = UpdatePlaceholderShape(cfg, signature_feed_nodes,
new_item.get(), &node);
if (!s.ok()) return nullptr;
} else if (IsConstant(node)) {
auto it = asset_node_to_value.find(node.name());
if (it != asset_node_to_value.end()) {
auto iter = node.mutable_attr()->find("value");
if (iter == node.attr().end()) {
LOG(ERROR) << "Value attribute expected in const op for asset files";
return nullptr;
}
if (!iter->second.has_tensor() ||
iter->second.tensor().string_val_size() != 1) {
LOG(INFO) << "Unexpected AttrValue proto: "
<< iter->second.DebugString();
return nullptr;
}
LOG(INFO) << "Using asset file " << it->second << " for node "
<< node.name();
*(iter->second.mutable_tensor()->mutable_string_val(0)) = it->second;
}
}
node.mutable_attr()->erase("_output_shapes");
if (cfg.ignore_user_placement) {
node.clear_device();
}
if (cfg.ignore_colocation) {
auto attr = node.mutable_attr();
auto it = attr->find("_class");
if (it != attr->end()) {
attr->erase(it);
}
}
}
if (meta_graph.collection_def().count("savers") > 0) {
const CollectionDef& savers = meta_graph.collection_def().at("savers");
for (const auto& raw : savers.bytes_list().value()) {
SaverDef saver;
if (!saver.ParseFromString(raw)) {
continue;
}
if (saver.filename_tensor_name().empty()) {
continue;
}
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
break;
}
} else {
const SaverDef& saver = meta_graph.saver_def();
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
}
Status attr_status = AddDefaultAttrsToGraphDef(
&new_item->graph,
FunctionLibraryDefinition(OpRegistry::Global(),
new_item->graph.library()),
0, true);
if (!attr_status.ok()) {
LOG(ERROR) << "Failed to instantiate default attribute values: "
<< attr_status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph before RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
Status optimize_status =
RuntimeGraphOptimizer(new_item->graph, &new_item->graph, cfg);
if (!optimize_status.ok()) {
LOG(ERROR) << "Graph preprocessing failed: " << optimize_status;
return nullptr;
}
VLOG(1) << "Number of nodes in graph after RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
if (cfg.prune_graph) {
VLOG(1) << "Pruning graph...";
auto status = PruneGraph(new_item.get());
if (!status.ok()) {
LOG(ERROR) << "Pruning failed: " << status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph after pruning: "
<< new_item->graph.node_size();
}
std::unordered_set<string> nodes;
for (const auto& node : new_item->graph.node()) {
nodes.insert(node.name());
}
for (const auto& feed : new_item->feed) {
if (nodes.find(feed.first) == nodes.end()) {
LOG(ERROR) << "Feed node " << feed.first << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& fetch : new_item->fetch) {
if (nodes.find(fetch) == nodes.end()) {
LOG(ERROR) << "Fetch node " << fetch << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& init : new_item->init_ops) {
if (nodes.find(init) == nodes.end()) {
LOG(ERROR) << "Init node " << init << " doesn't exist in graph";
return nullptr;
}
}
return new_item;
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDefFile(
const string& id, const string& meta_graph_file, const ItemConfig& cfg) {
MetaGraphDef meta_graph;
if (!ReadMetaGraphDefFromFile(meta_graph_file, &meta_graph).ok()) {
LOG(ERROR) << "Failed to read " << meta_graph_file;
return nullptr;
}
return GrapplerItemFromMetaGraphDef(id, meta_graph, cfg);
}
}
} | #include "tensorflow/core/grappler/grappler_item_builder.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemBuilderTest : public ::testing::Test {};
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node =
ops::Const(s.WithOpName("filename"), string("model"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore = ops::Restore(s.WithOpName("restore"), filename_node,
tensor_name, DataType::DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), var, restore);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename");
env->DeleteFile(filename).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename));
LOG(INFO) << filename;
AssetFileDef asset_file_def;
*asset_file_def.mutable_tensor_info()->mutable_name() = "filename";
*asset_file_def.mutable_filename() = "grappler_item_builder_test_filename";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
for (const NodeDef &node : item->graph.node()) {
if (node.name() == "filename") {
const auto iter = node.attr().find("value");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_tensor());
ASSERT_EQ(1, iter->second.tensor().string_val_size());
string tensor_string_val = iter->second.tensor().string_val(0);
EXPECT_EQ(tensor_string_val, filename);
}
}
}
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest_FileNotAccessible) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node1 =
ops::Const(s.WithOpName("filename1"), string("model1"), TensorShape());
Output filename_node2 =
ops::Const(s.WithOpName("filename2"), string("model2"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore1 = ops::Restore(s.WithOpName("restore1"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output restore2 = ops::Restore(s.WithOpName("restore2"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output assign1 = ops::Assign(s.WithOpName("assign1"), var, restore1);
Output assign2 = ops::Assign(s.WithOpName("assign2"), var, restore2);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename1 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename1).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename1, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename1));
AssetFileDef asset_file_def1;
*asset_file_def1.mutable_tensor_info()->mutable_name() = "filename1";
*asset_file_def1.mutable_filename() = "grappler_item_builder_test_filename1";
string filename2 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename2).IgnoreError();
EXPECT_FALSE(env->FileExists(filename2).ok());
AssetFileDef asset_file_def2;
*asset_file_def2.mutable_tensor_info()->mutable_name() = "filename2";
*asset_file_def2.mutable_filename() = "grappler_item_builder_test_filename2";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def1);
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def2);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign1";
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign2";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithFunctions) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}},
device)},
{
test::function::XTimesTwo(),
});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithCustomOps) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "CustomOp", {"x"}, {{"T", DT_FLOAT}}, device)},
{});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
TensorInfo input2, output2;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature2;
(*serving_signature.mutable_inputs())["input2"] = input2;
(*serving_signature.mutable_outputs())["output2"] = output2;
(*meta_graph.mutable_signature_def())["serving2"] = serving_signature2;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithIncompleteSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.mutable_coo_sparse()->set_values_tensor_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithUnknownDimInSignatureInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto shape_1d = PartialTensorShape({-1});
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape(shape_1d));
auto y = ops::Const(s.WithOpName("y"), static_cast<float>(1.0));
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
shape_1d.AsProto(input.mutable_tensor_shape());
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item1 =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item1 != nullptr);
ASSERT_EQ(item1->feed.size(), 1);
EXPECT_EQ(item1->feed[0].second.NumElements(), 64);
std::unique_ptr<GrapplerItem> item2 =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item2 != nullptr);
ASSERT_EQ(item2->feed.size(), 1);
EXPECT_EQ(item2->feed[0].second.NumElements(), 1);
}
TEST_F(GrapplerItemBuilderTest, ExplicitFeedAndFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
ItemConfig config;
config.feed_nodes.insert("x");
config.fetch_nodes.insert("z");
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, config);
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, UnknownRankPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_TRUE(shape.unknown_rank());
}
TEST_F(GrapplerItemBuilderTest, ConfigPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value {
shape {
dim {
size: -1
}
dim {
size: -1
}
}
} }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 64);
}
TEST_F(GrapplerItemBuilderTest, OutputShapePlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
attr { key: "_output_shapes" value { list {
shape {
dim {
size: -1
}
dim {
size: 32
}
}
} } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 32);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
442c8e38-f2cc-4d7b-8e1c-cf95d427ea3c | cpp | tensorflow/tensorflow | cost_estimator | tensorflow/core/grappler/costs/cost_estimator.cc | tensorflow/core/grappler/costs/cost_estimator_test.cc | #include "tensorflow/core/grappler/costs/cost_estimator.h"
namespace tensorflow {
namespace grappler {
Costs CombineCosts(const Costs& left, const Costs& right) {
CHECK_NE(left.max_memory, kMemoryUnknown);
CHECK_NE(left.max_per_op_buffers, kMemoryUnknown);
CHECK_NE(left.max_per_op_streaming, kMemoryUnknown);
Costs result = left;
result.execution_time += right.execution_time;
result.compute_time += right.compute_time;
result.memory_time += right.memory_time;
result.network_time += right.network_time;
result.intermediate_memory_time += right.intermediate_memory_time;
result.intermediate_memory_read_time += right.intermediate_memory_read_time;
result.intermediate_memory_write_time += right.intermediate_memory_write_time;
if (right.max_per_op_buffers != kMemoryUnknown) {
result.max_per_op_buffers =
std::max(left.max_per_op_buffers, right.max_per_op_buffers);
}
if (right.max_per_op_streaming != kMemoryUnknown) {
result.max_per_op_streaming =
std::max(left.max_per_op_streaming, right.max_per_op_streaming);
}
result.num_ops_total += right.num_ops_total;
if (right.inaccurate) {
result.inaccurate = true;
}
result.num_ops_with_unknown_shapes += right.num_ops_with_unknown_shapes;
if (right.max_memory != kMemoryUnknown) {
result.max_memory += right.max_memory;
}
return result;
}
Costs MultiplyCosts(const Costs& costs, int multiplier) {
CHECK_GE(multiplier, 0);
if (multiplier == 0) {
return Costs::ZeroCosts();
}
if (multiplier == 1) {
return costs;
}
Costs result = costs;
result.execution_time *= multiplier;
result.compute_time *= multiplier;
result.memory_time *= multiplier;
result.network_time *= multiplier;
result.intermediate_memory_time *= multiplier;
result.intermediate_memory_read_time *= multiplier;
result.intermediate_memory_write_time *= multiplier;
return result;
}
}
} | #include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(CostEstimatorTest, CombineCosts) {
Costs c = Costs::ZeroCosts();
c.execution_time = Costs::NanoSeconds(1);
c.compute_time = Costs::NanoSeconds(2);
c.memory_time = Costs::NanoSeconds(3);
c.intermediate_memory_time = Costs::NanoSeconds(4);
c.intermediate_memory_read_time = Costs::NanoSeconds(5);
c.intermediate_memory_write_time = Costs::NanoSeconds(6);
c.max_memory = 1;
c.max_per_op_buffers = 2;
c.max_per_op_streaming = 3;
c.num_ops_total = 1;
c.inaccurate = false;
c.num_ops_with_unknown_shapes = 0;
Costs sum = CombineCosts(c, c);
EXPECT_EQ(sum.execution_time, Costs::NanoSeconds(2));
EXPECT_EQ(sum.compute_time, Costs::NanoSeconds(4));
EXPECT_EQ(sum.memory_time, Costs::NanoSeconds(6));
EXPECT_EQ(sum.intermediate_memory_time, Costs::NanoSeconds(8));
EXPECT_EQ(sum.intermediate_memory_read_time, Costs::NanoSeconds(10));
EXPECT_EQ(sum.intermediate_memory_write_time, Costs::NanoSeconds(12));
EXPECT_EQ(sum.max_memory, 2);
EXPECT_EQ(sum.max_per_op_buffers, 2);
EXPECT_EQ(sum.max_per_op_streaming, 3);
EXPECT_EQ(sum.num_ops_total, 2);
EXPECT_FALSE(sum.inaccurate);
EXPECT_EQ(sum.num_ops_with_unknown_shapes, 0);
}
TEST(CostEstimatorTest, MultiplyCosts) {
Costs c = Costs::ZeroCosts();
c.execution_time = Costs::NanoSeconds(1);
c.compute_time = Costs::NanoSeconds(2);
c.memory_time = Costs::NanoSeconds(3);
c.intermediate_memory_time = Costs::NanoSeconds(4);
c.intermediate_memory_read_time = Costs::NanoSeconds(5);
c.intermediate_memory_write_time = Costs::NanoSeconds(6);
c.max_memory = 1;
c.max_per_op_buffers = 2;
c.max_per_op_streaming = 3;
c.num_ops_total = 1;
c.inaccurate = false;
c.num_ops_with_unknown_shapes = 0;
Costs product = MultiplyCosts(c, 10);
EXPECT_EQ(product.execution_time, Costs::NanoSeconds(10));
EXPECT_EQ(product.compute_time, Costs::NanoSeconds(20));
EXPECT_EQ(product.memory_time, Costs::NanoSeconds(30));
EXPECT_EQ(product.intermediate_memory_time, Costs::NanoSeconds(40));
EXPECT_EQ(product.intermediate_memory_read_time, Costs::NanoSeconds(50));
EXPECT_EQ(product.intermediate_memory_write_time, Costs::NanoSeconds(60));
EXPECT_EQ(product.max_memory, 1);
EXPECT_EQ(product.max_per_op_buffers, 2);
EXPECT_EQ(product.max_per_op_streaming, 3);
EXPECT_EQ(product.num_ops_total, 1);
EXPECT_FALSE(product.inaccurate);
EXPECT_EQ(product.num_ops_with_unknown_shapes, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/cost_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/cost_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab750f69-b902-438c-9461-e3c414849229 | cpp | tensorflow/tensorflow | analytical_cost_estimator | tensorflow/core/grappler/costs/analytical_cost_estimator.cc | tensorflow/core/grappler/costs/analytical_cost_estimator_test.cc | #include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include <limits>
#include <unordered_map>
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
namespace grappler {
namespace {
Status AddCostNode(ReadyNodeManager* node_manager, const OpContext& op_context,
int node_id, const Costs& node_costs,
gtl::FlatMap<string, CostGraphDef::Node*>* name_to_cost_node,
gtl::FlatMap<string, int>* name_to_id,
CostGraphDef* cost_graph) {
const string& op_name = op_context.name;
auto it = name_to_cost_node->find(op_name);
CostGraphDef::Node* node;
if (it != name_to_cost_node->end()) {
node = it->second;
node->clear_input_info();
node->clear_output_info();
} else {
node = cost_graph->add_node();
(*name_to_cost_node)[op_name] = node;
node->set_name(op_name);
node->set_id(node_id);
(*name_to_id)[node->name()] = node->id();
}
node->set_device(op_context.device_name);
node->set_compute_cost(node_costs.execution_time.asMicroSeconds().count());
node->set_compute_time(node_costs.compute_time.asMicroSeconds().count());
node->set_memory_time(node_costs.memory_time.asMicroSeconds().count());
node->set_temporary_memory_size(node_costs.temporary_memory);
node->set_persistent_memory_size(node_costs.persistent_memory);
node->set_inaccurate(node_costs.inaccurate);
for (const string& input : node_manager->GetCurrNode()->input()) {
int input_port;
string input_name = ParseNodeName(input, &input_port);
if (name_to_id->find(input_name) == name_to_id->end()) {
if (!IsMerge(*node_manager->GetCurrNode()))
VLOG(1) << "input: " << input
<< " not found for non-Merge node: " << op_name;
continue;
}
if (IsControlInput(input)) {
node->add_control_input(name_to_id->at(input_name));
} else {
auto* input_info = node->add_input_info();
input_info->set_preceding_node(name_to_id->at(input_name));
input_info->set_preceding_port(input_port);
}
}
for (const auto& output : op_context.op_info.outputs()) {
auto output_info = node->add_output_info();
output_info->set_alias_input_port(-1);
output_info->set_dtype(output.dtype());
*output_info->mutable_shape() = output.shape();
int64_t size = DataTypeSize(output.dtype());
for (const auto& dim : output.shape().dim()) {
size = MultiplyWithoutOverflow(size, std::max<int64_t>(1, dim.size()));
if (size < 0) {
return errors::InvalidArgument(
"Integer overflow encountered in dimension size.");
}
}
output_info->set_size(size);
}
return absl::OkStatus();
}
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, bool use_static_shapes,
bool use_aggressive_shape_inference)
: AnalyticalCostEstimator(
cluster, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady"), use_static_shapes,
use_aggressive_shape_inference) {}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(),
std::make_unique<VirtualPlacer>(cluster->GetDevices()));
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager,
std::unique_ptr<VirtualPlacer> placer, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(), std::move(placer));
}
Status AnalyticalCostEstimator::Initialize(const GrapplerItem& item) {
item_ = &item;
return absl::OkStatus();
}
Status AnalyticalCostEstimator::PredictCosts(const GraphDef& optimized_graph,
RunMetadata* run_metadata,
Costs* costs) const {
std::unique_ptr<GrapplerItem> item_storage;
const GrapplerItem* item;
if (&optimized_graph == &item_->graph) {
item = item_;
} else {
GraphDef graph_copy = optimized_graph;
item_storage = std::make_unique<GrapplerItem>(
item_->WithGraph(std::move(graph_copy)));
item = item_storage.get();
}
auto status = scheduler_->Init(item);
if (!status.ok()) {
if (costs) {
costs->execution_time = Costs::Duration::max();
}
return status;
}
gtl::FlatMap<string, CostGraphDef::Node*> name_to_cost_node;
CostGraphDef* cost_graph = nullptr;
if (run_metadata) {
cost_graph = run_metadata->mutable_cost_graph();
for (auto& node : *cost_graph->mutable_node()) {
name_to_cost_node[node.name()] = &node;
}
}
std::vector<string> inaccurate_nodes;
int nodes_executed = 0;
int node_id = 0;
gtl::FlatMap<string, int> name_to_id;
Costs node_costs;
do {
++nodes_executed;
OpContext op_context = scheduler_->GetCurrNode();
node_costs = node_estimator_->PredictCosts(op_context);
if (node_costs.inaccurate) {
inaccurate_nodes.push_back(op_context.name);
if (node_costs.num_ops_with_unknown_shapes > 0)
VLOG(4) << op_context.name << " has "
<< node_costs.num_ops_with_unknown_shapes << " unknown shapes";
}
if (cost_graph) {
Status s =
AddCostNode(node_manager_.get(), op_context, node_id++, node_costs,
&name_to_cost_node, &name_to_id, cost_graph);
if (!s.ok()) {
return s;
}
}
} while (scheduler_->MarkCurrNodeExecuted(node_costs));
VLOG(1) << inaccurate_nodes.size() << " out of " << nodes_executed
<< " nodes have inaccurate time estimation";
if (VLOG_IS_ON(3)) {
for (const auto& node : inaccurate_nodes) {
VLOG(4) << "Node with inaccurate time estimation: " << node;
}
}
if (costs) {
*costs = scheduler_->Summary(run_metadata);
} else if (run_metadata) {
scheduler_->GenerateRunMetadata(run_metadata);
}
if (VLOG_IS_ON(1)) {
bool verbose = VLOG_IS_ON(2);
if (run_metadata) {
VLOG(1) << GetStatsStringFromRunMetadata(*run_metadata, verbose);
} else {
RunMetadata run_metadata;
scheduler_->GenerateRunMetadata(&run_metadata);
VLOG(1) << GetStatsStringFromRunMetadata(run_metadata, verbose);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class AnalyticalCostEstimatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_num_cores(4);
cpu_device.set_frequency(2600);
cpu_device.set_bandwidth(24 * 1024 * 1024);
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.set_num_cores(12);
gpu_device.set_frequency(1100);
gpu_device.set_bandwidth(180 * 1024 * 1024);
(*gpu_device.mutable_environment())["architecture"] = "6";
devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
cluster_.reset(new VirtualCluster(devices));
}
GrapplerItem CreateMiniGraph() {
const int batch = 1;
const int width = 28;
const int height = 28;
const int num_channels = 1;
const int num_labels = 10;
const int kernel_size = 3;
const int conv_filters = 32;
Scope s = Scope::NewRootScope();
auto images = ops::RandomUniform(
s.WithOpName("image"), {batch, width, height, num_channels}, DT_FLOAT);
auto labels = ops::RandomUniform(s.WithOpName("label"), {batch, num_labels},
DT_FLOAT);
auto w = ops::Variable(
s.WithOpName("W"),
{kernel_size, kernel_size, num_channels, conv_filters}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("B"), {conv_filters}, DT_FLOAT);
auto conv =
ops::Conv2D(s.WithOpName("conv"), images, w, {1, 1, 1, 1}, "SAME");
auto bias = ops::Add(s.WithOpName("bias"), conv, b);
auto relu = ops::Relu(s.WithOpName("relu"), bias);
auto flat_shape = ops::Const(s.WithOpName("flat_shape"),
{batch, width * height * conv_filters});
auto flat = ops::Reshape(s.WithOpName("flat"), relu, flat_shape);
auto w2 =
ops::Variable(s.WithOpName("W2"),
{width * height * conv_filters, num_labels}, DT_FLOAT);
auto b2 = ops::Variable(s.WithOpName("B2"), {num_labels}, DT_FLOAT);
auto matmul = ops::MatMul(s.WithOpName("matmul"), flat, w2);
auto logits = ops::Add(s.WithOpName("logits"), matmul, b2);
auto softmax = ops::Softmax(s.WithOpName("softmax"), logits);
auto lsm = ops::Log(s.WithOpName("lsm"), softmax);
GrapplerItem item;
item.fetch.push_back("lsm");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
return item;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(AnalyticalCostEstimatorTest, SimpleTest) {
GrapplerItem item = CreateMiniGraph();
AnalyticalCostEstimator estimator(cluster_.get(), true,
true);
TF_ASSERT_OK(estimator.Initialize(item));
RunMetadata run_metadata;
Costs summary;
TF_ASSERT_OK(estimator.PredictCosts(item.graph, &run_metadata, &summary));
EXPECT_EQ(Costs::NanoSeconds(9158), summary.execution_time);
EXPECT_EQ(15, summary.num_ops_total);
EXPECT_TRUE(summary.inaccurate);
EXPECT_EQ(0, summary.num_ops_with_unknown_shapes);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/analytical_cost_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/analytical_cost_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b74e393-915d-4eaa-9c6c-9893a7f52c8c | cpp | tensorflow/tensorflow | virtual_scheduler | tensorflow/core/grappler/costs/virtual_scheduler.cc | tensorflow/core/grappler/costs/virtual_scheduler_test.cc | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
const char kAttrInputSrc[] = "input_source_";
const char kAttrSrcDevice[] = "send_device";
const char kAttrDstDevice[] = "recv_device";
const char kAttrTensorName[] = "tensor_name";
const char kChannelDevice[] = "Channel";
const char kStreaming[] = "_streaming";
namespace {
using ::tensorflow::strings::HumanReadableNumBytes;
float Round2(const float x) {
return ::round(100.0 * x) / 100.0;
}
Costs& FindOrCreateZero(const string& op_name,
std::map<string, Costs>* op_cost) {
auto it = op_cost->find(op_name);
if (it == op_cost->end()) {
it = op_cost->emplace(op_name, Costs::ZeroCosts()).first;
}
return it->second;
}
struct RecvNodeDescriptor {
const NodeDef* node;
const int port_num;
const string device;
RecvNodeDescriptor(const NodeDef* node_, const int port_num_,
const string& device_)
: node(node_), port_num(port_num_), device(device_) {}
};
struct RecvNodeDescriptorHash {
std::size_t operator()(const RecvNodeDescriptor& recv_node) const {
return std::hash<const NodeDef*>()(recv_node.node) ^
std::hash<int>()(recv_node.port_num) ^
std::hash<string>()(recv_node.device);
}
};
struct RecvNodeDescriptorEqual {
bool operator()(const RecvNodeDescriptor& a,
const RecvNodeDescriptor& b) const {
return a.node == b.node && a.port_num == b.port_num && a.device == b.device;
}
};
void UpdateDeviceAnnotationState(const NodeDef* node,
const NodeState& node_state,
DeviceState* device) {
if (node->attr().count(kOutputShapes) == 0) return;
int64_t execution_count = node->attr().count(kExecutionCount) == 0
? 1
: node->attr().at(kExecutionCount).i();
auto& shape_annotation_stats = device->shape_annotation_stats;
shape_annotation_stats.num_ops_annotated += 1;
shape_annotation_stats.num_ops_executed += execution_count;
shape_annotation_stats.num_ops_executed_more_than_once +=
execution_count > 1 ? 1 : 0;
shape_annotation_stats.num_ops_with_incompatible_shapes +=
node_state.shape_incompatible ? 1 : 0;
shape_annotation_stats.num_ops_with_dynamic_shapes +=
(execution_count > 1 && node->attr().count(kOutputSame) == 0) ? 1 : 0;
}
bool IsStreamingPort(const NodeDef& node, const int port) {
if (!node.attr().contains(kStreaming)) return false;
auto& attr_list = node.attr().at(kStreaming).list();
bool is_streaming_port = false;
if (port >= 0 && port < attr_list.b().size()) {
is_streaming_port = attr_list.b(port);
}
return is_streaming_port;
}
}
void LIFOManager::AddNode(const NodeDef* node) {
if (IsMerge(*node)) {
nodes_.push_front(node);
} else {
nodes_.push_back(node);
}
}
const NodeDef* LIFOManager::GetCurrNode() {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
if (curr_pos_ == nodes_.end()) {
curr_pos_ = --(nodes_.rbegin().base());
}
return *curr_pos_;
}
void LIFOManager::RemoveCurrNode() {
GetCurrNode();
nodes_.erase(curr_pos_);
curr_pos_ = nodes_.end();
}
HeapReadyManager::HeapReadyManager() : ReadyNodeManager() {
std::make_heap(nodes_.begin(), nodes_.end());
}
Status HeapReadyManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
nodes_.clear();
curr_node_ = nullptr;
greater_ = Greater();
return absl::OkStatus();
}
void HeapReadyManager::AddNode(const NodeDef* node) {
nodes_.push_back(node);
std::push_heap(nodes_.begin(), nodes_.end(), greater_);
}
const NodeDef* HeapReadyManager::GetCurrNode() {
if (curr_node_) return curr_node_;
if (nodes_.empty()) {
CHECK(!nodes_.empty()) << "GetCurrNode(), but there's no ready node";
}
const std::string node_name = nodes_.front()->name();
curr_node_ = nodes_.front();
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
return curr_node_;
}
void HeapReadyManager::RemoveCurrNode() {
if (curr_node_) {
curr_node_ = nullptr;
} else {
std::pop_heap(nodes_.begin(), nodes_.end(), greater_);
nodes_.pop_back();
}
}
bool HeapReadyManager::Empty() const {
return nodes_.empty() && curr_node_ == nullptr;
}
bool FirstReadyCmp(
const std::unordered_map<const NodeDef*, NodeState>* node_map,
const NodeDef* a, const NodeDef* b) {
if (node_map->at(a).time_ready == node_map->at(b).time_ready) {
return a->name().compare(b->name()) > 0;
} else {
return node_map->at(a).time_ready > node_map->at(b).time_ready;
}
}
std::function<bool(const NodeDef*, const NodeDef*)>
FirstReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
return FirstReadyCmp(node_map_, a, b);
};
return greater;
}
std::function<bool(const NodeDef*, const NodeDef*)>
PriorityReadyManager::Greater() {
auto greater = [this](const NodeDef* a, const NodeDef* b) -> bool {
auto pri_a = node_priority_.at(a->name());
auto pri_b = node_priority_.at(b->name());
if (pri_a == pri_b) {
return FirstReadyCmp(node_map_, a, b);
}
return pri_a > pri_b;
};
return greater;
}
void PriorityReadyManager::AddNode(const NodeDef* node) {
if (node_priority_.count(node->name()) == 0) {
VLOG(3) << "Priority of node " << node->name() << " not found.";
node_priority_[node->name()] = 0;
}
HeapReadyManager::AddNode(node);
}
Status PriorityReadyManager::SetPriority(
const std::unordered_map<string, int>& node_priority) {
node_priority_ = node_priority;
return absl::OkStatus();
}
CompositeNodeManager::CompositeNodeManager()
: ReadyNodeManager(), send_manager_(), recv_manager_() {}
Status CompositeNodeManager::Init(
const std::unordered_map<const NodeDef*, NodeState>* node_map) {
node_map_ = node_map;
TF_RETURN_IF_ERROR(send_manager_.Init(node_map));
TF_RETURN_IF_ERROR(recv_manager_.Init(node_map));
curr_node_ = nullptr;
return absl::OkStatus();
}
void CompositeNodeManager::AddNode(const NodeDef* node) {
if (IsSend(*node)) {
send_manager_.AddNode(node);
} else if (IsRecv(*node)) {
recv_manager_.AddNode(node);
} else {
const auto& device = node_map_->at(node).device_name;
ops_lifo_map_[device].AddNode(node);
}
}
const NodeDef* CompositeNodeManager::GetCurrNode() {
if (curr_node_) return curr_node_;
std::vector<std::pair<const NodeDef*, Costs::Duration>> candidates;
for (auto& ops_lifo : ops_lifo_map_) {
if (!ops_lifo.second.Empty()) {
const auto* op = ops_lifo.second.GetCurrNode();
candidates.emplace_back(op, node_map_->at(op).time_ready);
}
}
if (!send_manager_.Empty()) {
const auto* send = send_manager_.GetCurrNode();
candidates.emplace_back(send, node_map_->at(send).time_ready);
}
if (!recv_manager_.Empty()) {
const auto* recv = recv_manager_.GetCurrNode();
candidates.emplace_back(recv, node_map_->at(recv).time_ready);
}
CHECK(!candidates.empty());
auto first_ready = std::min_element(
candidates.begin(), candidates.end(),
[](const std::pair<const NodeDef*, Costs::Duration>& a,
const std::pair<const NodeDef*, Costs::Duration>& b) {
if (a.second == b.second) {
int a_score = 2 * IsSend(*a.first) + IsRecv(*a.first);
int b_score = 2 * IsSend(*b.first) + IsRecv(*b.first);
if (a_score == b_score) {
return a.first->name().compare(b.first->name()) < 0;
} else {
return a_score > b_score;
}
} else {
return a.second < b.second;
}
});
curr_node_ = first_ready->first;
return curr_node_;
}
void CompositeNodeManager::RemoveCurrNode() {
const auto* node = GetCurrNode();
if (IsSend(*node)) {
send_manager_.RemoveCurrNode();
} else if (IsRecv(*node)) {
recv_manager_.RemoveCurrNode();
} else {
const auto device = node_map_->at(node).device_name;
ops_lifo_map_[device].RemoveCurrNode();
}
curr_node_ = nullptr;
}
bool CompositeNodeManager::Empty() const {
bool empty = true;
for (const auto& ops_lifo : ops_lifo_map_) {
empty &= ops_lifo.second.Empty();
}
return empty && send_manager_.Empty() && recv_manager_.Empty();
}
std::unique_ptr<ReadyNodeManager> ReadyNodeManagerFactory(
const string& ready_node_manager) {
if (ready_node_manager == "FIFO") {
return std::make_unique<FIFOManager>();
} else if (ready_node_manager == "LIFO") {
return std::make_unique<LIFOManager>();
} else if (ready_node_manager == "FirstReady") {
return std::make_unique<FirstReadyManager>();
} else if (ready_node_manager == "Composite") {
return std::make_unique<CompositeNodeManager>();
}
LOG(FATAL) << "Not a valid ready node manager: " << ready_node_manager;
return nullptr;
}
SchedulerState::~SchedulerState() {}
SchedulerState::SchedulerState(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
Cluster* cluster,
std::unique_ptr<VirtualPlacer> placer)
: graph_costs_(Costs::ZeroCosts()),
cluster_(cluster),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference),
placer_(std::move(placer)) {
DCHECK(placer_);
graph_costs_.num_ops_total = 0;
initialized_ = false;
track_mem_usage_snapshot_ = VLOG_IS_ON(1);
}
Status SchedulerState::Init(const GrapplerItem* item,
std::vector<const NodeDef*>* initial_nodes,
bool create_explicit_channel_device) {
initialized_ = false;
node_map_.clear();
device_.clear();
additional_nodes_.clear();
graph_costs_ = Costs::ZeroCosts();
graph_costs_.num_ops_total = 0;
op_to_cost_.clear();
op_counts_.clear();
op_costs_.clear();
initial_nodes->clear();
graph_properties_ = std::make_unique<GraphProperties>(*item);
if (use_static_shapes_) {
TF_RETURN_IF_ERROR(graph_properties_->InferStatically(
true, use_aggressive_shape_inference_, true));
} else {
TF_RETURN_IF_ERROR(graph_properties_->InferDynamically(cluster_));
}
grappler_item_ = item;
const auto& graph = grappler_item_->graph;
const auto& fetch_nodes = grappler_item_->fetch;
std::set<string> feed_nodes;
for (const auto& f : grappler_item_->feed) {
auto iter_and_inserted_flag = feed_nodes.insert(f.first);
QCHECK(iter_and_inserted_flag.second)
<< "Duplicate feed node found: " << f.first;
}
std::unordered_map<string, const NodeDef*> name_to_node;
std::vector<const NodeDef*> fetch_fanin_nodes;
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph, fetch_nodes, &name_to_node,
&fetch_fanin_nodes));
std::unordered_map<string, const NodeDef*> name_to_send;
for (const auto& node : graph.node()) {
if (IsSend(node)) {
const auto& attr = node.attr();
name_to_send[attr.at("tensor_name").s()] = &node;
}
}
std::unordered_map<RecvNodeDescriptor, const NodeDef*, RecvNodeDescriptorHash,
RecvNodeDescriptorEqual>
cached_recv_nodes;
for (const auto* curr_node : fetch_fanin_nodes) {
auto& curr_node_state = GetNodeStateOrCreateIt(curr_node);
const string curr_node_device = DeviceName(curr_node);
std::vector<string> inputs;
if (IsRecv(*curr_node)) {
const auto& attr = curr_node->attr();
if (attr.count("tensor_name")) {
const auto& send_node_name = attr.at("tensor_name").s();
auto it = name_to_send.find(send_node_name);
if (it != name_to_send.end()) {
const NodeDef* send = it->second;
inputs = {send->name()};
}
}
} else {
for (const string& input : curr_node->input()) {
inputs.push_back(input);
}
}
for (const string& input_node_name : inputs) {
const string node_name = NodeName(input_node_name);
const NodeDef* input_node = name_to_node[node_name];
if (input_node == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("Unknown node: ", node_name));
}
const string in_device = DeviceName(input_node);
const auto input_node_port_num = NodePosition(input_node_name);
if (curr_node_device == in_device || IsControlInput(input_node_name)) {
curr_node_state.inputs.push_back(
std::make_pair(input_node, input_node_port_num));
auto& input_node_state = GetNodeStateOrCreateIt(input_node);
input_node_state.outputs[input_node_port_num].push_back(curr_node);
} else {
RecvNodeDescriptor recv_node(input_node, input_node_port_num,
curr_node_device);
auto it = cached_recv_nodes.find(recv_node);
if (it != cached_recv_nodes.end()) {
const NodeDef* recv_op = it->second;
curr_node_state.inputs.push_back(std::make_pair(recv_op, 0));
auto& input_node_state = node_map_.at(recv_op);
input_node_state.outputs[0].push_back(curr_node);
} else {
auto send_and_recv =
CreateSendRecv(input_node, curr_node, input_node, input_node_name,
create_explicit_channel_device);
const auto* send = send_and_recv.first;
const auto* recv = send_and_recv.second;
curr_node_state.inputs.push_back(std::make_pair(recv, 0));
auto& input_node_state = GetNodeStateOrCreateIt(input_node);
input_node_state.outputs[input_node_port_num].push_back(send);
cached_recv_nodes[recv_node] = recv;
}
}
}
const bool given_as_feed =
feed_nodes.find(curr_node->name()) != feed_nodes.end();
const bool has_no_inputs = inputs.empty();
if (given_as_feed || has_no_inputs) {
curr_node_state.time_ready = Costs::Duration();
initial_nodes->push_back(curr_node);
VLOG(3) << "Added ready node: " << curr_node->name();
}
feed_nodes.erase(curr_node->name());
if (IsPersistent(*curr_node)) {
auto& device_state = device_[curr_node_device];
for (int port_num = 0,
port_num_end = curr_node_state.output_properties.size();
port_num < port_num_end; ++port_num) {
device_state.persistent_nodes.insert(
std::make_pair(curr_node, port_num));
}
}
}
if (initial_nodes->empty()) {
return errors::InvalidArgument("No ready nodes in the graph.");
}
if (!feed_nodes.empty()) {
VLOG(1) << "Some feed nodes were not consumed by the fetch fanin: "
<< absl::StrJoin(feed_nodes, ",");
}
initialized_ = true;
return absl::OkStatus();
}
void SchedulerState::MaybeUpdateInputOutput(const NodeDef* node) {
CHECK(!initialized_) << "MaybeUpdateInputOutput is called after Init().";
if ((IsSend(*node) || IsRecv(*node)) && node->attr().count(kAttrInputSrc)) {
auto& node_state = node_map_[node];
auto& inputs = node_state.input_properties;
auto& outputs = node_state.output_properties;
CHECK(inputs.empty());
CHECK(outputs.empty());
const auto& attr = node->attr();
const auto& input_source_name = attr.at(kAttrInputSrc).s();
if (IsControlInput(input_source_name)) {
OpInfo::TensorProperties control_message;
control_message.set_dtype(DT_FLOAT);
control_message.mutable_shape()->add_dim()->set_size(1);
auto* value = control_message.mutable_value();
value->add_float_val(1);
inputs.push_back(control_message);
outputs.push_back(control_message);
} else {
const auto& output_properties =
graph_properties_->GetOutputProperties(NodeName(input_source_name));
if (!output_properties.empty()) {
const auto input_node_port_num = NodePosition(input_source_name);
CHECK_GT(output_properties.size(), input_node_port_num);
inputs.push_back(output_properties[input_node_port_num]);
outputs.push_back(output_properties[input_node_port_num]);
}
}
}
}
string SchedulerState::DeviceName(const NodeDef* node) const {
return placer_->get_canonical_device_name(*node);
}
string SchedulerState::SanitizedDeviceName(const NodeDef* node) const {
return absl::StrReplaceAll(placer_->get_canonical_device_name(*node),
{{":", "_"}});
}
string SchedulerState::ChannelDeviceName(const NodeDef* from,
const NodeDef* to) const {
CHECK(!initialized_) << "ChannelDeviceName is called after Init().";
return absl::StrCat(kChannelDevice, "_from_", SanitizedDeviceName(from),
"_to_", SanitizedDeviceName(to));
}
std::pair<const NodeDef*, const NodeDef*> SchedulerState::CreateSendRecv(
const NodeDef* from, const NodeDef* to, const NodeDef* input_node,
const string& input_name, bool create_channel_device) {
CHECK(!initialized_) << "CreateSendRecv is called after Init().";
auto input_node_port_num = NodePosition(input_name);
string src_name;
bool control_input = false;
if (input_node_port_num >= 0) {
src_name = absl::StrCat(from->name(), "_", input_node_port_num);
} else {
src_name = absl::StrCat(from->name(), "_minus1");
control_input = true;
}
auto* send = new NodeDef();
send->set_name("Send_" + src_name + "_from_" + SanitizedDeviceName(from) +
"_to_" + SanitizedDeviceName(to));
send->set_op("_Send");
send->add_input(from->name());
auto send_device =
create_channel_device ? ChannelDeviceName(from, to) : DeviceName(from);
send->set_device(send_device);
auto& send_attr = *(send->mutable_attr());
send_attr[kAttrInputSrc].set_s(input_name);
send_attr[kAttrSrcDevice].set_s(DeviceName(from));
send_attr[kAttrDstDevice].set_s(DeviceName(to));
if (input_node->attr().count(kAttrTensorName)) {
send_attr[kAttrTensorName].set_s(
input_node->attr().at(kAttrTensorName).s());
}
auto* recv = new NodeDef();
recv->set_name("Recv_" + src_name + "_on_" + SanitizedDeviceName(to));
recv->set_op("_Recv");
recv->add_input(send->name());
recv->set_device(DeviceName(to));
auto& recv_attr = *(recv->mutable_attr());
recv_attr[kAttrInputSrc].set_s(input_name);
if (input_node->attr().count(kAttrTensorName)) {
recv_attr[kAttrTensorName].set_s(
input_node->attr().at(kAttrTensorName).s());
}
if (from->attr().contains(kStreaming) && !control_input) {
if (input_node_port_num >= from->attr().at(kStreaming).list().b_size()) {
LOG(ERROR)
<< from->name()
<< " port index larger than length of _streaming attribute list.";
} else if (from->attr().at(kStreaming).list().b(input_node_port_num)) {
send_attr[kStreaming].mutable_list()->add_b(true);
recv_attr[kStreaming].mutable_list()->add_b(true);
}
}
auto& send_node_state = GetNodeStateOrCreateIt(send);
send_node_state.device_name = send->device();
send_node_state.inputs.push_back(std::make_pair(from, input_node_port_num));
send_node_state.outputs[0].push_back(recv);
auto& recv_node_state = GetNodeStateOrCreateIt(recv);
recv_node_state.inputs.push_back(std::make_pair(send, 0));
recv_node_state.outputs[0].push_back(to);
additional_nodes_.emplace_back(std::unique_ptr<NodeDef>(send));
additional_nodes_.emplace_back(std::unique_ptr<NodeDef>(recv));
return std::make_pair(send, recv);
}
OpContext SchedulerState::CreateOpContext(const NodeDef* node) const {
DeviceProperties device;
device = placer_->get_device(*node);
if (IsSend(*node)) {
device.set_type(kChannelDevice);
}
OpContext op_context;
const auto& node_state = node_map_.at(node);
op_context.name = node->name();
op_context.device_name = node_state.device_name;
auto& op_info = op_context.op_info;
op_info.set_op(node->op());
*op_info.mutable_attr() = node->attr();
for (auto& input : node_state.input_properties) {
*op_info.add_inputs() = input;
}
for (auto& output : node_state.output_properties) {
*op_info.add_outputs() = output;
}
op_info.mutable_device()->Swap(&device);
if (grappler_item_->graph.has_library()) {
op_context.function_library = &grappler_item_->graph.library();
}
return op_context;
}
NodeState& SchedulerState::GetNodeStateOrCreateIt(const NodeDef* node) {
CHECK(!initialized_) << "GetNodeStateOrCreateIt is called after Init().";
auto it = node_map_.find(node);
if (it != node_map_.end()) {
return it->second;
}
it = node_map_.emplace(node, NodeState()).first;
auto& node_state = it->second;
node_state.input_properties =
graph_properties_->GetInputProperties(node->name());
node_state.output_properties =
graph_properties_->GetOutputProperties(node->name());
node_state.shape_incompatible =
graph_properties_->CheckShapeIncompatible(node->name());
MaybeUpdateInputOutput(node);
if (!IsSend(*node)) {
node_state.device_name = DeviceName(node);
}
for (size_t i = 0; i < node_state.output_properties.size(); ++i) {
node_state.time_no_references[i] = Costs::Duration::max();
node_state.num_outputs_executed[i] = 0;
node_state.outputs[i] = {};
}
node_state.time_no_references[-1] = Costs::Duration::max();
node_state.num_outputs_executed[-1] = 0;
node_state.outputs[-1] = {};
node_state.time_scheduled = Costs::Duration().infinity();
return it->second;
}
void SchedulerState::GetOutputNodes(const NodeDef* node,
const Costs::Duration& curr_time,
std::vector<const NodeDef*>* output_nodes) {
int slot = -1;
if (IsSwitch(*node) && node->attr().count(kOutputSlots) > 0 &&
node->attr().at(kOutputSlots).list().i_size() > 0) {
slot = node->attr().at(kOutputSlots).list().i(0);
for (int i = 1; i < node->attr().at(kOutputSlots).list().i_size(); ++i) {
if (slot != node->attr().at(kOutputSlots).list().i(i)) {
slot = -1;
break;
}
}
}
auto& node_state = node_map_[node];
for (const auto& port_num_output_pair : node_state.outputs) {
if (slot >= 0 && port_num_output_pair.first != slot) continue;
for (auto* output_node : port_num_output_pair.second) {
auto& output_state = node_map_[output_node];
output_state.num_inputs_ready++;
int output_state_inputs_size = output_state.inputs.size();
if (output_state.num_inputs_ready == output_state_inputs_size ||
IsMerge(*output_node)) {
output_state.time_ready = curr_time;
output_nodes->push_back(output_node);
VLOG(3) << " Add output: " << output_node->name();
}
}
}
}
std::vector<const NodeDef*> SchedulerState::MarkNodeExecuted(
const NodeDef* node, const Costs& node_costs, const OpContext& op_context,
bool extract_execution_count_attr,
const std::string& override_device_name) {
auto& node_state = node_map_[node];
bool previously_executed_merge =
IsMerge(*node) && (node_state.time_finished != Costs::Duration::max());
node_state.execution_count = 1;
if (extract_execution_count_attr && node->attr().count(kExecutionCount) > 0) {
node_state.execution_count = node->attr().at(kExecutionCount).i();
}
node_state.node_costs = node_costs;
Costs total_node_costs = node_state.TotalNodeCosts();
graph_costs_ = CombineCosts(graph_costs_, total_node_costs);
const string& op_name = node->op();
auto& op_cost = FindOrCreateZero(op_name, &op_to_cost_);
op_cost = CombineCosts(op_cost, total_node_costs);
if (VLOG_IS_ON(2)) {
string node_description = GetOpDescription(op_context.op_info);
op_counts_[node_description] += 1;
op_costs_[node_description] =
std::make_pair(total_node_costs.execution_time.asMicroSeconds().count(),
!node_costs.inaccurate);
}
std::string device_name = node_state.device_name;
if (!override_device_name.empty()) {
device_name = override_device_name;
}
auto& device = device_[device_name];
device.nodes_executed.push_back(node);
if (node_state.time_scheduled == Costs::Duration().infinity()) {
node_state.time_scheduled =
std::max(device.GetCurrTime(), node_state.time_ready);
device.device_costs.execution_time = node_state.time_scheduled;
}
device.device_costs = CombineCosts(device.device_costs, total_node_costs);
auto curr_time = device.GetCurrTime();
node_state.time_finished = curr_time;
UpdateDeviceAnnotationState(node, node_state, &device);
if (!IsPersistent(*node)) {
for (const auto& port_num_output_pair : node_state.outputs) {
int port_num = port_num_output_pair.first;
if (node_state.outputs[port_num].empty()) {
node_state.time_no_references[port_num] = curr_time;
} else {
if (node_state.node_costs.persistent_output_ports.contains(port_num)) {
continue;
}
if (!IsStreamingPort(*node, port_num)) {
device.memory_usage += GetOrCalculateOutputSize(node_state, port_num);
}
device.nodes_in_memory.insert(std::make_pair(node, port_num));
}
}
}
for (const auto& port : node_costs.persistent_output_ports) {
device.persistent_nodes.insert({node, port});
}
auto& device_op_cost = FindOrCreateZero(op_name, &device.op_to_cost);
device_op_cost = CombineCosts(device_op_cost, total_node_costs);
VLOG(3) << "Op scheduled -- name: " << node->name() << ", op: " << node->op()
<< ", device: " << node->device()
<< ", execution_count: " << node_state.execution_count
<< ", ready: " << node_state.time_ready.count()
<< ", scheduled: " << node_state.time_scheduled.count()
<< ", finished: " << node_state.time_finished.count();
VLOG(5) << " Current device memory usage (before deallocation): "
<< device.memory_usage;
std::vector<const NodeDef*> new_nodes;
if (previously_executed_merge) {
VLOG(1) << "node [ " << node->name() << ", " << node->op() << " ] "
<< "is executed more than once. "
<< "Skip scheduling its output nodes.";
} else {
GetOutputNodes(node, curr_time, &new_nodes);
}
if (!IsPersistent(*node)) {
if (device.memory_usage > device.max_memory_usage) {
device.max_memory_usage = device.memory_usage;
if (track_mem_usage_snapshot_) {
device.mem_usage_snapshot_at_peak = device.nodes_in_memory;
}
}
}
if (track_mem_usage_snapshot_) {
device.temporary_memory_usage_trace.push_back(
{node->name(), device.memory_usage});
}
for (const auto& input_port : node_state.inputs) {
auto* input = input_port.first;
auto port = input_port.second;
auto& input_state = node_map_[input];
input_state.num_outputs_executed[port]++;
int input_state_outputs_size_ = input_state.outputs[port].size();
if (input_state.node_costs.persistent_output_ports.contains(port)) continue;
if (input_state.num_outputs_executed[port] == input_state_outputs_size_ &&
!IsPersistent(*input)) {
input_state.time_no_references[port] = curr_time;
auto& input_device = device_[input_state.device_name];
if (!IsStreamingPort(*input, port)) {
input_device.memory_usage -=
GetOrCalculateOutputSize(input_state, port);
}
input_device.nodes_in_memory.erase(std::make_pair(input, port));
}
}
return new_nodes;
}
Costs SchedulerState::Summary() const {
VLOG(1) << graph_costs_.num_ops_total << " ops processed in total, with "
<< graph_costs_.num_ops_with_unknown_shapes
<< " having unknown shapes";
VLOG(1) << "Expected execution time: " << graph_costs_.execution_time.count();
VLOG(1) << "Expected compute time: " << graph_costs_.compute_time.count();
VLOG(1) << "Expected memory time: " << graph_costs_.memory_time.count();
VLOG(1) << "Expected intermediate memory time: "
<< graph_costs_.intermediate_memory_time.count();
VLOG(1) << "Expected max memory: " << graph_costs_.max_memory;
VLOG(1) << "Expected max per-op buffers: " << graph_costs_.max_per_op_buffers;
VLOG(1) << "Expected max per-op streaming buffers: "
<< graph_costs_.max_per_op_streaming;
VLOG(1) << "Per-op execution time / compute time / memory time"
<< " / intermediate memory time:";
for (const auto& op_cost_pair : op_to_cost_) {
const auto& op = op_cost_pair.first;
const auto& cost = op_cost_pair.second.execution_time.count();
const auto& compute_cost = op_cost_pair.second.compute_time.count();
const auto& memory_cost = op_cost_pair.second.memory_time.count();
const auto& intermediate_memory_cost =
op_cost_pair.second.intermediate_memory_time.count();
const bool is_op_cost_accurate = !op_cost_pair.second.inaccurate;
if (cost) {
VLOG(1) << absl::StrFormat(" + %30s : %c %10d / %10d / %10d / %10d", op,
(is_op_cost_accurate ? ' ' : '~'), cost,
compute_cost, memory_cost,
intermediate_memory_cost);
}
}
VLOG(1) << "Devices:";
Costs critical_path_costs = Costs::ZeroCosts();
std::vector<string> device_names;
device_names.reserve(device_.size());
for (auto& it : device_) {
device_names.push_back(it.first);
}
std::sort(device_names.begin(), device_names.end());
for (const auto& name : device_names) {
const auto& state = device_.at(name);
std::map<string, int64_t> op_to_memory;
int64_t persistent_memory_usage = 0;
std::set<string> persistent_ops;
for (const auto& node_port : state.persistent_nodes) {
const auto* node = node_port.first;
const auto port = node_port.second;
int64_t output_size = 0;
auto it = node_map_.find(node);
if (it != node_map_.end()) {
output_size = GetOrCalculateOutputSize(it->second, port);
}
persistent_memory_usage += output_size;
op_to_memory[node->op()] += output_size;
persistent_ops.insert(node->op());
}
int64_t max_memory_usage = persistent_memory_usage + state.max_memory_usage;
critical_path_costs.estimated_max_memory_per_device[name] =
max_memory_usage;
const Costs::NanoSeconds wall_time_ns = state.GetCurrTime();
VLOG(1) << "Device = " << name
<< ", num_nodes = " << state.nodes_executed.size()
<< ", wall_time_ns = " << wall_time_ns.count() << ", memory usage: "
<< "persistent = " << HumanReadableNumBytes(persistent_memory_usage)
<< ", peak = " << HumanReadableNumBytes(state.max_memory_usage)
<< ", total = " << HumanReadableNumBytes(max_memory_usage)
<< ", at the end: " << HumanReadableNumBytes(state.memory_usage);
VLOG(1) << state.device_costs.num_ops_total
<< " ops processed in total, with "
<< state.device_costs.num_ops_with_unknown_shapes
<< " having unknown shapes";
const auto& device_annotation_stats = state.shape_annotation_stats;
if (device_annotation_stats.num_ops_annotated > 0) {
VLOG(1) << device_annotation_stats.num_ops_annotated
<< " ops with shape annotation, with "
<< device_annotation_stats.num_ops_executed_more_than_once
<< " executed more than once, "
<< device_annotation_stats.num_ops_with_dynamic_shapes
<< " with dynamic shapes, "
<< device_annotation_stats.num_ops_with_incompatible_shapes
<< " with incompatible shapes, "
<< device_annotation_stats.num_ops_executed
<< " ops executed in total.";
}
VLOG(1) << "Per-op execution time / compute time / memory time "
<< " / intermediate memory time"
<< " (and memory usage at peak memory usage):";
for (const auto& node_port : state.mem_usage_snapshot_at_peak) {
const auto* node = node_port.first;
const auto port = node_port.second;
auto it = node_map_.find(node);
if (it != node_map_.end()) {
op_to_memory[node->op()] += GetOrCalculateOutputSize(it->second, port);
}
}
Costs::NanoSeconds total_compute_time_ns;
bool is_total_cost_accurate = true;
for (const auto& op_cost_pair : state.op_to_cost) {
const auto& op = op_cost_pair.first;
const auto& cost = op_cost_pair.second.execution_time.count();
const auto& compute_cost = op_cost_pair.second.compute_time.count();
const auto& memory_cost = op_cost_pair.second.memory_time.count();
const auto& intermediate_memory_cost =
op_cost_pair.second.intermediate_memory_time.count();
total_compute_time_ns += op_cost_pair.second.execution_time;
const bool is_op_cost_accurate = !op_cost_pair.second.inaccurate;
if (!is_op_cost_accurate) {
is_total_cost_accurate = false;
}
int64_t op_mem_usage = 0;
auto it = op_to_memory.find(op);
if (it != op_to_memory.end()) {
op_mem_usage = it->second;
}
const float mem_usage_percent =
max_memory_usage > 0 ? Round2(100.0 * op_mem_usage / max_memory_usage)
: 0.0;
if (cost || mem_usage_percent > 1.0) {
VLOG(1) << absl::StrFormat(
" + %30s : %c %10d / %10d / %10d / %10d", op.c_str(),
(is_op_cost_accurate ? ' ' : '~'), cost, compute_cost,
memory_cost, intermediate_memory_cost)
<< " (" << HumanReadableNumBytes(op_mem_usage) << " ["
<< mem_usage_percent << "%] "
<< (persistent_ops.count(op) > 0 ? ": persistent op)" : ")");
}
}
int utilization = 0;
if (wall_time_ns.count() > 0) {
utilization = total_compute_time_ns.count() * 100 / wall_time_ns.count();
}
VLOG(1) << "Device = " << name << ", total_compute_time_ns = "
<< (is_total_cost_accurate ? "" : "~")
<< total_compute_time_ns.count()
<< ", utilization = " << utilization << "%";
if (critical_path_costs.execution_time <= state.GetCurrTime()) {
critical_path_costs = state.device_costs;
critical_path_costs.persistent_memory = persistent_memory_usage;
critical_path_costs.temporary_memory = state.max_memory_usage;
critical_path_costs.max_memory = max_memory_usage;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Node description, counts, cost:";
for (const auto& item : op_counts_) {
int cost;
bool is_cost_accurate;
std::tie(cost, is_cost_accurate) = op_costs_.at(item.first);
VLOG(2) << "Node: " << item.first << ", Count: " << item.second
<< ", Individual Cost: " << (is_cost_accurate ? "" : "~") << cost
<< " us";
}
}
VLOG(1) << "Critical path execution time: "
<< critical_path_costs.execution_time.count();
return critical_path_costs;
}
Costs SchedulerState::Summary(RunMetadata* metadata) {
if (metadata) GenerateRunMetadata(metadata);
return Summary();
}
void SchedulerState::GenerateRunMetadata(RunMetadata* metadata) {
StepStats* stepstats = metadata->mutable_step_stats();
for (const auto& device : device_) {
GraphDef* device_partition_graph = metadata->add_partition_graphs();
DeviceStepStats* device_stepstats = stepstats->add_dev_stats();
device_stepstats->set_device(device.first);
for (const auto& node_def : device.second.nodes_executed) {
if (node_map_.find(node_def) == node_map_.end()) {
continue;
}
const NodeState& nodestate = node_map_.at(node_def);
NodeExecStats* node_stats = device_stepstats->add_node_stats();
uint64 total_output_size = 0;
uint64_t persistent_output_size = 0;
for (int slot = 0, slot_end = nodestate.output_properties.size();
slot < slot_end; slot++) {
const auto& properties = nodestate.output_properties[slot];
NodeOutput* no = node_stats->add_output();
no->set_slot(slot);
TensorDescription* tensor_descr = no->mutable_tensor_description();
tensor_descr->set_dtype(properties.dtype());
*tensor_descr->mutable_shape() = properties.shape();
const int64_t tensor_size_requested =
CalculateOutputSize(nodestate.output_properties, slot);
const int64_t tensor_size_allocated =
GetOrCalculateOutputSize(nodestate, slot);
total_output_size += tensor_size_allocated;
if (nodestate.node_costs.persistent_output_ports.contains(slot)) {
persistent_output_size += tensor_size_allocated;
}
tensor_descr->mutable_allocation_description()->set_requested_bytes(
tensor_size_requested);
tensor_descr->mutable_allocation_description()->set_allocated_bytes(
tensor_size_allocated);
}
if (node_def->op() != "HloGenericOp") {
node_stats->set_timeline_label(node_def->op());
} else {
string timeline_label;
if (node_def->attr().count("hlo_opcode") > 0) {
absl::StrAppend(&timeline_label,
node_def->attr().at("hlo_opcode").s());
}
if (node_def->attr().count("_hlo_metadata_op_type") > 0) {
absl::StrAppend(&timeline_label, "/",
node_def->attr().at("_hlo_metadata_op_type").s());
}
node_stats->set_timeline_label(timeline_label);
}
node_stats->set_node_name(node_def->name());
node_stats->set_op_start_rel_micros(0);
node_stats->set_all_start_micros(
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_op_end_rel_micros(
nodestate.time_finished.asMicroSeconds().count() -
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_all_end_rel_micros(
nodestate.time_finished.asMicroSeconds().count() -
nodestate.time_scheduled.asMicroSeconds().count());
node_stats->set_op_start_rel_nanos(0);
node_stats->set_all_start_nanos(nodestate.time_scheduled.count());
node_stats->set_op_end_rel_nanos(nodestate.time_finished.count() -
nodestate.time_scheduled.count());
node_stats->set_all_end_rel_nanos(nodestate.time_finished.count() -
nodestate.time_scheduled.count());
auto* mem_stats = node_stats->mutable_memory_stats();
mem_stats->set_temp_memory_size(0);
int64_t persistent_memory_size = 0;
if (IsPersistent(*node_def)) {
persistent_memory_size = total_output_size;
} else {
persistent_memory_size = persistent_output_size;
}
mem_stats->set_persistent_memory_size(persistent_memory_size);
*device_partition_graph->add_node() = *node_def;
}
}
}
const std::unordered_map<string, int64_t> SchedulerState::GetPeakMemoryUsage()
const {
std::unordered_map<string, int64_t> result;
for (const auto& device : device_) {
const string& name = device.first;
const DeviceState& state = device.second;
result[name] = state.max_memory_usage;
}
return result;
}
const std::unordered_map<string, int64_t>
SchedulerState::GetPersistentMemoryUsage() const {
std::unordered_map<string, int64_t> result;
for (const auto& device : device_) {
const string& name = device.first;
const DeviceState& state = device.second;
int64_t persistent_memory_usage = 0;
for (const auto& node_port : state.persistent_nodes) {
const auto* node = node_port.first;
const auto port = node_port.second;
const auto& node_state = node_map_.at(node);
persistent_memory_usage += GetOrCalculateOutputSize(node_state, port);
}
result[name] = persistent_memory_usage;
}
return result;
}
void SchedulerState::SetNodeStateTimeScheduled(const NodeDef* node) {
auto& node_state = node_map_.at(node);
auto& device = device_[node_state.device_name];
node_state.time_scheduled = device.GetCurrTime();
}
int64_t SchedulerState::GetOrCalculateOutputSize(const NodeState& node_state,
int port_num) const {
auto& node_costs = node_state.node_costs;
auto it = node_costs.output_tensor_size_bytes.find(port_num);
if (it != node_costs.output_tensor_size_bytes.end()) {
return it->second;
}
return CalculateOutputSize(node_state.output_properties, port_num);
}
VirtualScheduler::~VirtualScheduler() {}
VirtualScheduler::VirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
Cluster* cluster,
ReadyNodeManager* ready_nodes,
std::unique_ptr<VirtualPlacer> placer)
: scheduler_state_(std::make_unique<SchedulerState>(
use_static_shapes, use_aggressive_shape_inference, cluster,
std::move(placer))),
ready_nodes_(ready_nodes) {}
VirtualScheduler::VirtualScheduler(
ReadyNodeManager* ready_nodes,
std::unique_ptr<SchedulerState> scheduler_state)
: scheduler_state_(std::move(scheduler_state)), ready_nodes_(ready_nodes) {}
Status VirtualScheduler::Init(const GrapplerItem* item) {
TF_RETURN_IF_ERROR(ready_nodes_->Init(GetNodeStates()));
std::vector<const NodeDef*> initial_nodes;
auto status = scheduler_state_->Init(item, &initial_nodes);
if (status.ok()) {
for (auto node : initial_nodes) {
ready_nodes_->AddNode(node);
}
}
return status;
}
OpContext VirtualScheduler::GetCurrNode() {
const NodeDef* node = ready_nodes_->GetCurrNode();
return scheduler_state_->CreateOpContext(node);
}
bool VirtualScheduler::MarkCurrNodeExecuted(const Costs& node_costs) {
const NodeDef* node = ready_nodes_->GetCurrNode();
auto new_nodes = scheduler_state_->MarkNodeExecuted(
node, node_costs,
scheduler_state_->CreateOpContext(ready_nodes_->GetCurrNode()));
for (auto node : new_nodes) {
ready_nodes_->AddNode(node);
}
ready_nodes_->RemoveCurrNode();
return !ready_nodes_->Empty();
}
}
} | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kCPU0[] = "/job:localhost/replica:0/task:0/cpu:0";
constexpr char kCPU1[] = "/job:localhost/replica:0/task:0/cpu:1";
constexpr char kChannelFrom0To1[] = "Channel from CPU0 to CPU1";
constexpr char kChannelFrom1To0[] = "Channel from CPU1 to CPU0";
constexpr char kConv2D[] = "Conv2D";
constexpr char kSend[] = "_Send";
constexpr char kRecv[] = "_Recv";
class ReadyNodeManagerTest : public ::testing::Test {
protected:
ReadyNodeManagerTest() {
NodeSetUp("Node1", kConv2D, kCPU0, 6000, &node1_);
NodeSetUp("Node2", kConv2D, kCPU0, 5000, &node2_);
NodeSetUp("Node3", kConv2D, kCPU0, 4000, &node3_);
NodeSetUp("Node4", kConv2D, kCPU0, 3000, &node4_);
NodeSetUp("Node5", kConv2D, kCPU0, 2000, &node5_);
NodeSetUp("Node6", kConv2D, kCPU0, 1000, &node6_);
}
void NodeSetUp(const string& name, const string& op_name,
const string& device_name, const uint64 time_ready,
NodeDef* node) {
node->set_name(name);
node->set_op(op_name);
node->set_device(device_name);
node_states_[node] = NodeState();
node_states_[node].time_ready = time_ready;
node_states_[node].device_name = device_name;
}
NodeDef node1_, node2_, node3_, node4_, node5_, node6_;
std::unordered_map<const NodeDef*, NodeState> node_states_;
};
TEST_F(ReadyNodeManagerTest, GetSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleFIFOManager) {
FIFOManager manager = FIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, AddAndRemoveMultipleLIFOManager) {
LIFOManager manager = LIFOManager();
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MergeOrderInLIFOManager) {
LIFOManager manager = LIFOManager();
node3_.set_op("Merge");
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
}
TEST_F(ReadyNodeManagerTest, GetSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetCurrNodeFirstReadyManager) {
FirstReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node2_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node3_);
manager.AddNode(&node6_);
EXPECT_EQ("Node6", manager.GetCurrNode()->name());
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU0, 5, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 4, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 3, &node9);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInFirstReadyManager) {
FirstReadyManager manager1;
TF_EXPECT_OK(manager1.Init(&node_states_));
FirstReadyManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kConv2D, kCPU0, 1000, &node8);
NodeSetUp("Node9", kConv2D, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 1000, &node10);
NodeSetUp("Node11", kConv2D, kCPU0, 1000, &node11);
NodeSetUp("Node12", kConv2D, kCPU0, 1000, &node12);
manager1.AddNode(&node7);
manager1.AddNode(&node8);
manager1.AddNode(&node9);
manager1.AddNode(&node10);
manager1.AddNode(&node11);
manager1.AddNode(&node12);
manager2.AddNode(&node8);
manager2.AddNode(&node11);
manager2.AddNode(&node9);
manager2.AddNode(&node10);
manager2.AddNode(&node7);
manager2.AddNode(&node12);
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager1.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager1.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager1.Empty());
EXPECT_TRUE(manager2.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultiplePriorityReadyManager) {
PriorityReadyManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
std::unordered_map<string, int> node_priority = {
{"Node1", 1}, {"Node2", 2}, {"Node3", 2}, {"Node4", 4}, {"Node5", 5}};
TF_EXPECT_OK(manager.SetPriority(node_priority));
manager.AddNode(&node3_);
manager.AddNode(&node1_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node2_);
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, RemoveSingleNodeCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, GetAndRemoveMultipleCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.AddNode(&node5_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.AddNode(&node6_);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, MultiDeviceSendRecvCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeSetUp("Node7", kConv2D, kCPU1, 1001, &node7);
NodeSetUp("Node8", kConv2D, kCPU1, 2001, &node8);
NodeSetUp("Node9", kConv2D, kCPU1, 3001, &node9);
NodeDef send1;
NodeDef send2;
NodeDef recv1;
NodeDef recv2;
NodeSetUp("Send1", kSend, kChannelFrom0To1, 2002, &send1);
NodeSetUp("Send2", kSend, kChannelFrom1To0, 2005, &send2);
NodeSetUp("Recv1", kRecv, kCPU0, 2003, &recv1);
NodeSetUp("Recv2", kRecv, kCPU1, 2004, &recv2);
manager.AddNode(&node1_);
manager.AddNode(&node2_);
manager.AddNode(&node3_);
manager.AddNode(&node4_);
manager.AddNode(&node5_);
manager.AddNode(&node6_);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
manager.AddNode(&send1);
manager.AddNode(&send2);
manager.AddNode(&recv1);
manager.AddNode(&recv2);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node6");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node5");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv1");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Recv2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Send2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node4");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node3");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node2");
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node1");
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
TEST_F(ReadyNodeManagerTest, DeterminismInCompositeNodeManager) {
CompositeNodeManager manager;
TF_EXPECT_OK(manager.Init(&node_states_));
CompositeNodeManager manager2;
TF_EXPECT_OK(manager2.Init(&node_states_));
NodeDef node7;
NodeDef node8;
NodeDef node9;
NodeDef node10;
NodeDef node11;
NodeDef node12;
NodeSetUp("Node7", kConv2D, kCPU0, 1000, &node7);
NodeSetUp("Node8", kSend, kCPU0, 1000, &node8);
NodeSetUp("Node9", kRecv, kCPU0, 1000, &node9);
NodeSetUp("Node10", kConv2D, kCPU0, 999, &node10);
NodeSetUp("Node11", kRecv, kCPU0, 999, &node11);
NodeSetUp("Node12", kConv2D, kCPU1, 1000, &node12);
manager.AddNode(&node7);
manager.AddNode(&node8);
manager.AddNode(&node9);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node9);
manager.AddNode(&node8);
manager.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node9");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node7");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node8);
manager.AddNode(&node10);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node10");
EXPECT_EQ(manager.GetCurrNode()->op(), kConv2D);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node11);
manager.AddNode(&node8);
EXPECT_EQ(manager.GetCurrNode()->name(), "Node11");
EXPECT_EQ(manager.GetCurrNode()->op(), kRecv);
manager.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), "Node8");
EXPECT_EQ(manager.GetCurrNode()->op(), kSend);
manager.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
manager.AddNode(&node7);
manager.AddNode(&node12);
manager2.AddNode(&node12);
manager2.AddNode(&node7);
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_EQ(manager.GetCurrNode()->name(), manager2.GetCurrNode()->name());
manager.RemoveCurrNode();
manager2.RemoveCurrNode();
EXPECT_TRUE(manager.Empty());
}
class TestVirtualScheduler : public VirtualScheduler {
public:
TestVirtualScheduler(const bool use_static_shapes,
const bool use_aggressive_shape_inference,
ReadyNodeManager* ready_node_manager, Cluster* cluster)
: VirtualScheduler(
use_static_shapes, use_aggressive_shape_inference, cluster,
ready_node_manager,
std::make_unique<VirtualPlacer>(cluster->GetDevices())) {
enable_mem_usage_tracking();
}
FRIEND_TEST(VirtualSchedulerTest, MemoryUsage);
FRIEND_TEST(VirtualSchedulerTest, ControlDependency);
FRIEND_TEST(VirtualSchedulerTest, ComplexDependency);
FRIEND_TEST(VirtualSchedulerTest, Variable);
FRIEND_TEST(VirtualSchedulerTest, InterDeviceTransfer);
};
class VirtualSchedulerTest : public ::testing::Test {
protected:
VirtualSchedulerTest() {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device = GetDummyCPUDevice();
devices[kCPU0] = cpu_device;
devices[kCPU1] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
scheduler_ = std::make_unique<TestVirtualScheduler>(
true,
true, &first_ready_manager_,
cluster_.get());
}
DeviceProperties GetDummyCPUDevice() {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(4000);
cpu_device.set_num_cores(2);
cpu_device.set_bandwidth(2000000);
return cpu_device;
}
void CreateGrapplerItemWithConv2Ds() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::RandomUniform(
s.WithOpName("y"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::RandomUniform(
s.WithOpName("f"), {kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto c0 = ops::Conv2D(s.WithOpName("c0"), x, f, strides, "SAME");
auto c1 = ops::Conv2D(s.WithOpName("c1"), y, f, strides, "SAME");
auto c2 = ops::Conv2D(s.WithOpName("c2"), z, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_graph";
grappler_item_->fetch = {"c0", "c1"};
dependency_["c0"] = {"x", "f"};
dependency_["c1"] = {"y", "f"};
}
void CreateGrapplerItemWithConv2DAndVariable() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto f = ops::Variable(s.WithOpName("f"),
{kernel_, kernel_, depth_in_, depth_out_}, DT_FLOAT);
std::vector<int> strides = {1, 1, 1, 1};
auto y = ops::Conv2D(s.WithOpName("y"), x, f, strides, "SAME");
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_var_graph";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"x", "f"};
}
void CreateGrapplerItemWithMatmulChain() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto a = ops::RandomUniform(s.WithOpName("a"), {3200, 3200}, DT_FLOAT);
auto b = ops::RandomUniform(s.WithOpName("b").WithControlDependencies(a),
{3200, 3200}, DT_FLOAT);
auto c = ops::RandomUniform(s.WithOpName("c").WithControlDependencies(b),
{3200, 3200}, DT_FLOAT);
auto d = ops::RandomUniform(s.WithOpName("d").WithControlDependencies(c),
{3200, 3200}, DT_FLOAT);
auto e = ops::RandomUniform(s.WithOpName("e").WithControlDependencies(d),
{3200, 3200}, DT_FLOAT);
auto ab = ops::MatMul(s.WithOpName("ab").WithControlDependencies(e), a, b);
auto abc = ops::MatMul(s.WithOpName("abc"), ab, c);
auto abcd = ops::MatMul(s.WithOpName("abcd"), abc, d);
auto abcde = ops::MatMul(s.WithOpName("abcde"), abcd, e);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_matmul_sequence_graph";
grappler_item_->fetch = {"abcde"};
dependency_["ab"] = {"a", "b"};
dependency_["abc"] = {"ab", "c"};
dependency_["abcd"] = {"abc", "d"};
dependency_["abcde"] = {"abcd", "e"};
}
void CreateGrapplerItemWithAddN() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(s.WithOpName("x"), {10, 10, 10, 10}, DT_FLOAT);
auto y = ops::RandomUniform(s.WithOpName("y"), {10, 10, 10, 10}, DT_FLOAT);
auto z = ops::RandomUniform(s.WithOpName("z"), {10, 10, 10, 10}, DT_FLOAT);
auto w = ops::RandomUniform(s.WithOpName("w"), {10, 10, 10, 10}, DT_FLOAT);
OutputList input_tensors = {x, y, z, w};
auto add = ops::AddN(s.WithOpName("add"), input_tensors);
auto out = ops::Identity(s.WithOpName("out"), add);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_addn_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = {"x", "y", "z", "w", "add"};
}
void CreateGrapplerItemWithUnnecessaryPlaceholderNodes() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto unnecessary = ops::Placeholder(s.WithOpName("unnecessary"), DT_FLOAT);
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_extra_placeholders";
grappler_item_->fetch = {"x"};
grappler_item_->feed = {{"x", Tensor()}, {"unnecessary", Tensor()}};
}
void CreateGrapplerItemWithControlDependency() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
std::vector<string> input_noop_names = {"x", "y", "z", "w", "u", "v", "t"};
std::vector<Operation> input_tensors;
for (const auto& input : input_noop_names) {
auto x = ops::NoOp(s.WithOpName(input));
input_tensors.push_back(x.operation);
}
auto out =
ops::NoOp(s.WithControlDependencies(input_tensors).WithOpName("out"));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_control_dependency_graph";
grappler_item_->fetch = {"out"};
dependency_["out"] = input_noop_names;
}
void CreateGrapplerItemWithAddFromOneTensor() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = tensorflow::ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = tensorflow::ops::Add(s.WithOpName("y"), x, x);
Output fetch = ops::Identity(s.WithOpName("fetch"), y);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_from_one_tensor";
grappler_item_->fetch = {"fetch"};
dependency_["fetch"] = {"y"};
dependency_["y"] = {"x"};
}
void CreateGrapplerItemWithSwitchMergeInput() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto pred = ops::Const(s.WithOpName("pred"), false, {});
auto sw = ops::Switch(s.WithOpName("switch"), x, pred);
auto b = ops::RandomUniform(
s.WithOpName("b"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto a = ops::Add(s.WithOpName("a"), sw.output_true, b);
auto m = ops::Merge(s.WithOpName("m"), {sw.output_false, a.z});
auto z = ops::RandomUniform(
s.WithOpName("z"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto y = ops::Add(s.WithOpName("y"), m.output, z);
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_add_merge_switch";
grappler_item_->fetch = {"y"};
dependency_["y"] = {"m", "z"};
}
void CreateGrapplerItemWithBatchNorm() {
Scope s = Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto scale =
ops::RandomUniform(s.WithOpName("scale"), {depth_in_}, DT_FLOAT);
auto offset =
ops::RandomUniform(s.WithOpName("offset"), {depth_in_}, DT_FLOAT);
auto mean = ops::RandomUniform(s.WithOpName("mean"), {0}, DT_FLOAT);
auto var = ops::RandomUniform(s.WithOpName("var"), {0}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNorm(
s.WithOpName("bn"), x, scale, offset, mean, var,
ops::FusedBatchNorm::IsTraining(true).Epsilon(0.1f));
auto y = batch_norm.y;
auto batch_mean = batch_norm.batch_mean;
auto batch_var = batch_norm.batch_variance;
auto z1 = ops::Add(s.WithOpName("z1"), x, y);
auto z2 = ops::Add(s.WithOpName("z2"), batch_var, batch_var);
auto z3 = ops::Add(s.WithOpName("z3"), batch_var, batch_var);
std::vector<Operation> input_tensors = {
batch_mean.op(),
z1.z.op(),
z2.z.op(),
z3.z.op(),
};
auto z4 = ops::NoOp(s.WithControlDependencies(batch_var).WithOpName("z4"));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_complex_dependency_graph";
grappler_item_->fetch = {"z1", "z2", "z3", "z4"};
dependency_["bn"] = {"x", "scale", "offset", "mean", "var"};
dependency_["z1"] = {"x", "bn"};
dependency_["z2"] = {"bn"};
dependency_["z3"] = {"bn"};
dependency_["z4"] = {"bn"};
}
void CreateGrapplerItemWithSendRecv() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim { size: 128 }
dim { size: 32 }
}
float_val: 3.1415
}
}
}
}
node {
name: "Send"
op: "_Send"
input: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
}
node {
name: "Recv"
op: "_Recv"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "_output_shapes"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "shape"
value {
list { shape {
dim { size: 128 }
dim { size: 32 }
}}}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
library {
}
versions {
producer: 24
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Recv"};
}
void CreateGrapplerItemWithRecvWithoutSend() {
const string gdef_ascii = R"EOF(
node {
name: "Recv"
op: "_Recv"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "client_terminated"
value {
b: false
}
}
attr {
key: "recv_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device"
value {
s: "/job:localhost/replica:0/task:0/device:CPU:0"
}
}
attr {
key: "send_device_incarnation"
value {
i: 0
}
}
attr {
key: "tensor_name"
value {
s: "test"
}
}
attr {
key: "tensor_type"
value {
type: DT_FLOAT
}
}
}
library {
}
versions {
producer: 24
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Recv"};
}
void CreateGrapplerItemWithLoop() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "ones"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
float_val: 1.0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "ones"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/add"
op: "Add"
input: "while/Identity"
input: "while/add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/concat/axis"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/concat"
op: "ConcatV2"
input: "while/Identity_1"
input: "while/Identity_1"
input: "while/concat/axis"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/concat"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 21
}
)EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"while/Exit", "while/Exit_1"};
}
void CreateGrapplerItemWithLoopAnnotated() {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "ones"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
float_val: 1.0
}
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Enter_1"
op: "Enter"
input: "ones"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Merge_1"
op: "Merge"
input: "while/Enter_1"
input: "while/NextIteration_1"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
attr {
key: "_execution_count"
value {
i: 11
}
}
attr {
key: "_output_slot_vector"
value {
list {
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 0
}
}
}
}
node {
name: "while/Switch_1"
op: "Switch"
input: "while/Merge_1"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge_1"
}
}
}
attr {
key: "_execution_count"
value {
i: 11
}
}
attr {
key: "_output_slot_vector"
value {
list {
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 1
i: 0
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Identity_1"
op: "Identity"
input: "while/Switch_1:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/add"
op: "Add"
input: "while/Identity"
input: "while/add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/concat/axis"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/concat"
op: "ConcatV2"
input: "while/Identity_1"
input: "while/Identity_1"
input: "while/concat/axis"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "Tidx"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/add"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/NextIteration_1"
op: "NextIteration"
input: "while/concat"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 10
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
node {
name: "while/Exit_1"
op: "Exit"
input: "while/Switch_1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
attr {
key: "_execution_count"
value {
i: 1
}
}
}
versions {
producer: 21
}
)EOF";
grappler_item_.reset(new GrapplerItem);
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"while/Exit", "while/Exit_1"};
}
void CreateGrapplerItemWithCondition() {
const string gdef_ascii = R"EOF(
node {
name: "a"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 2.0
}
}
}
}
node {
name: "Less"
op: "Const"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
tensor_content: "\001"
}
}
}
}
node {
name: "Switch"
op: "Switch"
input: "a"
input: "Less"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "First"
op: "Identity"
input: "Switch"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Second"
op: "Identity"
input: "Switch:1"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
node {
name: "Merge"
op: "Merge"
input: "First"
input: "Second"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_FLOAT
}
}
}
versions {
producer: 27
})EOF";
grappler_item_ = std::make_unique<GrapplerItem>();
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii,
&grappler_item_->graph));
grappler_item_->id = "test_graph";
grappler_item_->fetch = {"Merge"};
}
void CreateGrapplerItemWithInterDeviceTransfers() {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(kCPU0);
auto x = ops::RandomUniform(
s.WithOpName("x"), {batch_size_, width_, height_, depth_in_}, DT_FLOAT);
auto scale =
ops::RandomUniform(s.WithOpName("scale"), {depth_in_}, DT_FLOAT);
auto offset =
ops::RandomUniform(s.WithOpName("offset"), {depth_in_}, DT_FLOAT);
auto mean = ops::RandomUniform(s.WithOpName("mean"), {0}, DT_FLOAT);
auto var = ops::RandomUniform(s.WithOpName("var"), {0}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNorm(
s.WithOpName("bn"), x, scale, offset, mean, var,
ops::FusedBatchNorm::IsTraining(true).Epsilon(0.1f));
auto y = batch_norm.y;
auto batch_mean = batch_norm.batch_mean;
auto batch_var = batch_norm.batch_variance;
auto y1 = ops::Identity(s.WithOpName("y1").WithDevice(kCPU1), y);
auto y2 = ops::Identity(s.WithOpName("y2").WithDevice(kCPU1), y);
auto batch_mean1 = ops::Identity(
s.WithOpName("batch_mean1").WithDevice(kCPU1), batch_mean);
auto batch_var1 =
ops::Identity(s.WithOpName("batch_var1").WithDevice(kCPU1), batch_var);
auto control_dep = ops::NoOp(s.WithOpName("control_dep")
.WithControlDependencies(y)
.WithDevice(kCPU1));
grappler_item_ = std::make_unique<GrapplerItem>();
TF_CHECK_OK(s.ToGraphDef(&grappler_item_->graph));
grappler_item_->id = "test_conv2d_graph";
grappler_item_->fetch = {"y1", "y2", "batch_mean1", "batch_var1",
"control_dep"};
dependency_["bn"] = {"x", "mean", "var"};
dependency_["y1"] = {"bn"};
dependency_["y2"] = {"bn"};
dependency_["batch_mean1"] = {"bn"};
dependency_["batch_var1"] = {"bn"};
dependency_["control_dep"] = {"bn"};
}
void InitScheduler() { TF_ASSERT_OK(scheduler_->Init(grappler_item_.get())); }
Costs SimplePredictCosts(const OpContext& op_context) const {
Costs c;
int64_t exec_cost = 0;
if (op_context.op_info.op() == "MatMul") {
exec_cost = 2000000000;
} else if (op_context.op_info.op() == "RandomUniform") {
exec_cost = 1000000000;
} else {
exec_cost = 1000;
}
c.execution_time = Costs::NanoSeconds(exec_cost);
return c;
}
std::unordered_map<string, OpContext> RunScheduler(
const string& target_node) {
std::unordered_map<string, OpContext> ops_executed;
bool more_nodes = true;
do {
OpContext op_context = scheduler_->GetCurrNode();
ops_executed[op_context.name] = op_context;
std::cout << op_context.name << std::endl;
Costs node_costs = SimplePredictCosts(op_context);
auto it = dependency_.find(op_context.name);
if (it != dependency_.end()) {
for (const auto& preceding_node : it->second) {
EXPECT_GT(ops_executed.count(preceding_node), 0);
}
}
more_nodes = scheduler_->MarkCurrNodeExecuted(node_costs);
if (op_context.name == target_node) {
break;
}
} while (more_nodes);
return ops_executed;
}
template <typename T>
void ExpectVectorEq(const std::vector<T>& expected,
const std::vector<T>& test_elements) {
std::set<T> expected_set(expected.begin(), expected.end());
for (const auto& element : test_elements) {
EXPECT_GT(expected_set.count(element), 0);
}
EXPECT_EQ(expected.size(), test_elements.size());
}
void ValidateNodeDefs(const std::vector<string>& expected,
const std::vector<const NodeDef*>& node_defs) {
std::vector<string> node_names;
std::transform(node_defs.begin(), node_defs.end(),
std::back_inserter(node_names),
[](const NodeDef* node) { return node->name(); });
ExpectVectorEq(expected, node_names);
}
template <typename T>
void ExpectSetEq(const std::set<T>& expected,
const std::set<T>& test_elements) {
for (const auto& element : test_elements) {
EXPECT_GT(expected.count(element), 0);
}
EXPECT_EQ(expected.size(), test_elements.size());
}
template <typename T, typename U>
void ExpectUnorderedMapEq(const std::unordered_map<T, U>& expected,
const std::unordered_map<T, U>& test_map) {
EXPECT_EQ(expected.size(), test_map.size());
for (const auto& key_val : expected) {
EXPECT_GT(test_map.count(key_val.first), 0);
EXPECT_EQ(test_map.at(key_val.first), key_val.second);
}
}
void ValidateMemoryUsageSnapshot(
const std::vector<string>& expected_names, const int port_num_expected,
const std::unordered_set<std::pair<const NodeDef*, int>,
DeviceState::NodePairHash>& mem_usage_snapshot) {
std::set<std::pair<string, int>> nodes_at_peak_mem_usage;
std::transform(
mem_usage_snapshot.begin(), mem_usage_snapshot.end(),
std::inserter(nodes_at_peak_mem_usage, nodes_at_peak_mem_usage.begin()),
[](const std::pair<const NodeDef*, int>& node_port) {
return std::make_pair(node_port.first->name(), node_port.second);
});
std::set<std::pair<string, int>> expected;
std::transform(expected_names.begin(), expected_names.end(),
std::inserter(expected, expected.begin()),
[port_num_expected](const string& name) {
return std::make_pair(name, port_num_expected);
});
ExpectSetEq(expected, nodes_at_peak_mem_usage);
}
void ValidateDependencyChain(
const std::unordered_map<string, int64_t>& start_times,
const std::vector<string>& nodes_in_dependency_order) {
int64_t prev_node_time = -1;
for (const auto& node : nodes_in_dependency_order) {
int64_t curr_node_time = start_times.at(node);
EXPECT_GE(curr_node_time, prev_node_time);
prev_node_time = curr_node_time;
}
}
std::unique_ptr<VirtualCluster> cluster_;
std::unique_ptr<TestVirtualScheduler> scheduler_;
FirstReadyManager first_ready_manager_;
CompositeNodeManager composite_node_manager_;
std::unique_ptr<GrapplerItem> grappler_item_;
std::unordered_map<string, std::vector<string>> dependency_;
const int batch_size_ = 4;
const int width_ = 10;
const int height_ = 10;
const int depth_in_ = 8;
const int kernel_ = 3;
const int depth_out_ = 16;
};
TEST_F(VirtualSchedulerTest, SummaryCostTest) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
auto ops_executed = RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(13000005, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size(), c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
TEST_F(VirtualSchedulerTest, SummaryCostStepStatsTest) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
auto ops_executed = RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
StepStats stepstats = metadata.step_stats();
EXPECT_EQ(13000005, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size(), c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
EXPECT_EQ(1, stepstats.dev_stats().size());
std::map<string, std::pair<int64_t, int64_t>> start_end_times;
for (const auto& device_step_stats : stepstats.dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
int64_t start = stats.all_start_micros();
int64_t end = start + stats.all_end_rel_micros();
start_end_times[stats.node_name()] =
std::pair<int64_t, int64_t>(start, end);
if (stats.timeline_label() == "MatMul" ||
stats.timeline_label() == "RandomUniform") {
EXPECT_EQ(1, stats.output().size());
for (const auto& output : stats.output()) {
EXPECT_EQ(DT_FLOAT, output.tensor_description().dtype());
EXPECT_EQ(2, output.tensor_description().shape().dim().size());
for (const auto& dim : output.tensor_description().shape().dim()) {
EXPECT_EQ(3200, dim.size());
}
}
}
}
}
int64_t cur_time = static_cast<int64_t>(5000005);
int64_t increment = static_cast<int64_t>(2000000);
auto op_names = {"ab", "abc", "abcd", "abcde"};
for (const auto& op_name : op_names) {
int64_t actual_start = start_end_times[op_name].first;
int64_t actual_end = start_end_times[op_name].second;
int64_t expected_start = cur_time;
int64_t expected_end = cur_time + increment;
EXPECT_EQ(expected_start, actual_start);
EXPECT_EQ(expected_end, actual_end);
cur_time += increment;
}
}
TEST_F(VirtualSchedulerTest, InitAndBasicScheduling) {
CreateGrapplerItemWithConv2Ds();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_EQ(8, ops_executed.size());
EXPECT_GT(ops_executed.count("x"), 0);
EXPECT_GT(ops_executed.count("y"), 0);
EXPECT_GT(ops_executed.count("f"), 0);
EXPECT_GT(ops_executed.count("c0"), 0);
EXPECT_GT(ops_executed.count("c1"), 0);
EXPECT_EQ(ops_executed.count("z"), 0);
EXPECT_EQ(ops_executed.count("c2"), 0);
EXPECT_EQ(1, ops_executed["x"].op_info.outputs_size());
EXPECT_EQ(1, ops_executed["y"].op_info.outputs_size());
EXPECT_EQ(1, ops_executed["f"].op_info.outputs_size());
EXPECT_EQ(2, ops_executed["c0"].op_info.inputs_size());
EXPECT_EQ(2, ops_executed["c1"].op_info.inputs_size());
}
TEST_F(VirtualSchedulerTest, MemoryUsage) {
CreateGrapplerItemWithAddN();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> expected_names = {"x", "y", "z", "w", "add"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state.max_memory_usage);
ValidateMemoryUsageSnapshot(expected_names, 0 ,
cpu_state.mem_usage_snapshot_at_peak);
ASSERT_EQ(cpu_state.temporary_memory_usage_trace.size(), 10);
const std::pair<std::string, int64_t>& x_usage =
cpu_state.temporary_memory_usage_trace.at(4);
EXPECT_EQ(x_usage.first, "x");
EXPECT_EQ(x_usage.second, one_input_node_size);
const std::pair<std::string, int64_t>& add_usage =
cpu_state.temporary_memory_usage_trace.at(8);
EXPECT_EQ(add_usage.first, "add");
EXPECT_EQ(add_usage.second, 5 * one_input_node_size);
const std::pair<std::string, int64_t>& out_usage =
cpu_state.temporary_memory_usage_trace.at(9);
EXPECT_EQ(out_usage.first, "out");
EXPECT_EQ(out_usage.second, one_input_node_size);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 64)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 200000)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, MemoryUsageForStreamingOps) {
CreateGrapplerItemWithAddN();
auto& graph = grappler_item_->graph;
for (auto& node : *graph.mutable_node()) {
if (node.name() == "out" || node.name() == "add") {
node.set_device(kCPU1);
}
if (node.name() == "z" || node.name() == "w")
(*node.mutable_attr())[kStreaming].mutable_list()->add_b(true);
}
InitScheduler();
auto ops_executed = RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
const auto& cpu_state_1 = device_states->at(kCPU1);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> cpu_0_expected_tensors = {"x", "y"};
const std::vector<string> cpu_1_expected_tensors = {"x", "y", "add"};
EXPECT_EQ(cpu_0_expected_tensors.size() * one_input_node_size,
cpu_state_0.max_memory_usage);
EXPECT_EQ(cpu_1_expected_tensors.size() * one_input_node_size,
cpu_state_1.max_memory_usage);
EXPECT_EQ(cpu_state_0.memory_usage, 0);
EXPECT_EQ(cpu_state_1.memory_usage, 0);
}
TEST_F(VirtualSchedulerTest, MemoryUsageWithExecutionCount) {
CreateGrapplerItemWithAddN();
auto& graph = grappler_item_->graph;
for (auto& node : *graph.mutable_node()) {
(*node.mutable_attr())[kExecutionCount].set_i(10000);
}
InitScheduler();
auto ops_executed = RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
int64_t one_input_node_size = 4 * 10 * 10 * 10 * 10;
const std::vector<string> expected_names = {"x", "y", "z", "w", "add"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state_0.max_memory_usage);
EXPECT_EQ(cpu_state_0.memory_usage, 0);
Costs c = scheduler_->Summary();
EXPECT_EQ(64, c.persistent_memory);
EXPECT_EQ(200000, c.temporary_memory);
EXPECT_EQ(200064, c.max_memory);
}
TEST_F(VirtualSchedulerTest, UnnecessaryFeedNodes) {
CreateGrapplerItemWithUnnecessaryPlaceholderNodes();
InitScheduler();
auto ops_executed = RunScheduler("");
ASSERT_EQ(1, ops_executed.size());
ASSERT_EQ(ops_executed.count("x"), 1);
}
TEST_F(VirtualSchedulerTest, ControlDependency) {
CreateGrapplerItemWithControlDependency();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
int64_t one_input_node_size = 4;
const std::vector<string> expected_names = {"x", "y", "z", "w",
"u", "v", "t"};
EXPECT_EQ(expected_names.size() * one_input_node_size,
cpu_state.max_memory_usage);
ValidateMemoryUsageSnapshot(expected_names, -1 ,
cpu_state.mem_usage_snapshot_at_peak);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 0)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 28)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, ComplexDependency) {
CreateGrapplerItemWithBatchNorm();
InitScheduler();
RunScheduler("bn");
const auto& device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
const int x_size = batch_size_ * width_ * height_ * depth_in_;
int64_t expected_size =
4 * (2 * x_size + depth_in_ +
1 );
EXPECT_EQ(expected_size, cpu_state.memory_usage);
std::set<std::pair<string, int>> nodes_in_memory;
std::transform(
cpu_state.nodes_in_memory.begin(), cpu_state.nodes_in_memory.end(),
std::inserter(nodes_in_memory, nodes_in_memory.begin()),
[](const std::pair<const NodeDef*, int>& node_port) {
return std::make_pair(node_port.first->name(), node_port.second);
});
std::set<std::pair<string, int>> expected = {
std::make_pair("bn", -1),
std::make_pair("bn", 0),
std::make_pair("bn", 2),
std::make_pair("x", 0),
};
ExpectSetEq(expected, nodes_in_memory);
const auto* node_states = scheduler_->GetNodeStates();
const NodeState* bn_node = nullptr;
const NodeState* x_node = nullptr;
for (const auto& nodedef_node_state : *node_states) {
const NodeDef* node = nodedef_node_state.first;
const NodeState& node_state = nodedef_node_state.second;
if (node->name() == "bn") {
bn_node = &node_state;
}
if (node->name() == "x") {
x_node = &node_state;
}
}
CHECK_NOTNULL(bn_node);
CHECK_NOTNULL(x_node);
ValidateNodeDefs({"bn", "z1"}, x_node->outputs.at(0));
ValidateNodeDefs({"z4"}, bn_node->outputs.at(-1));
ValidateNodeDefs({"z1"}, bn_node->outputs.at(0));
ValidateNodeDefs({"z2", "z3", "z2", "z3"}, bn_node->outputs.at(2));
}
TEST_F(VirtualSchedulerTest, Variable) {
CreateGrapplerItemWithConv2DAndVariable();
InitScheduler();
RunScheduler("");
const auto* device_states = scheduler_->GetDeviceStates();
const auto& cpu_state = device_states->at(kCPU0);
ValidateMemoryUsageSnapshot({"f", "Const/Const"}, 0,
cpu_state.persistent_nodes);
ValidateMemoryUsageSnapshot({"x"}, 0,
cpu_state.mem_usage_snapshot_at_peak);
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 4624)},
scheduler_->GetPersistentMemoryUsage());
ExpectUnorderedMapEq(
{std::make_pair("/job:localhost/replica:0/task:0/cpu:0", 12800)},
scheduler_->GetPeakMemoryUsage());
}
TEST_F(VirtualSchedulerTest, WhileLoop) {
CreateGrapplerItemWithLoop();
InitScheduler();
RunScheduler("");
RunMetadata metadata;
scheduler_->Summary(&metadata);
int num_next_iteration = 0;
int num_next_iteration_1 = 0;
int num_exit = 0;
int num_exit_1 = 0;
int64_t next_iter_start_micro;
int64_t next_iter_1_start_micro;
int64_t exit_start_micro;
int64_t exit_1_start_micro;
std::unordered_map<string, int64_t> start_times;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
start_times[stats.node_name()] = stats.all_start_micros();
if (stats.node_name() == "while/NextIteration") {
++num_next_iteration;
next_iter_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/NextIteration_1") {
++num_next_iteration_1;
next_iter_1_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/Exit") {
++num_exit;
exit_start_micro = stats.all_start_micros();
} else if (stats.node_name() == "while/Exit_1") {
++num_exit_1;
exit_1_start_micro = stats.all_start_micros();
}
}
}
EXPECT_EQ(1, num_next_iteration);
EXPECT_EQ(1, num_next_iteration_1);
EXPECT_EQ(1, num_exit);
EXPECT_EQ(1, num_exit_1);
EXPECT_NE(next_iter_start_micro, next_iter_1_start_micro);
EXPECT_NE(exit_start_micro, exit_1_start_micro);
ValidateDependencyChain(
start_times,
{"Const", "while/Enter",
"while/Less/y", "while/Less", "while/LoopCond", "while/Switch",
"while/Identity", "while/add/y", "while/add", "while/NextIteration"});
ValidateDependencyChain(start_times,
{"ones", "while/Enter_1",
"while/Switch_1", "while/Identity_1", "while/concat",
"while/NextIteration_1"});
ValidateDependencyChain(start_times, {"while/Switch", "while/Exit"});
ValidateDependencyChain(
start_times, {"while/Identity", "while/concat/axis", "while/concat"});
ValidateDependencyChain(start_times, {"while/Identity", "while/add"});
ValidateDependencyChain(start_times, {"while/Switch_1", "while/Exit_1"});
}
TEST_F(VirtualSchedulerTest, AnnotatedWhileLoop) {
{
CreateGrapplerItemWithLoop();
InitScheduler();
RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(23, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 2, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
{
CreateGrapplerItemWithLoopAnnotated();
InitScheduler();
RunScheduler("");
Costs c = scheduler_->Summary();
EXPECT_EQ(178, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 2, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
}
TEST_F(VirtualSchedulerTest, Condition) {
{
CreateGrapplerItemWithCondition();
InitScheduler();
RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
int num_a = 0;
int num_less = 0;
int num_switch = 0;
int num_first = 0;
int num_second = 0;
int num_merge = 0;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
if (stats.node_name() == "a") {
++num_a;
} else if (stats.node_name() == "Less") {
++num_less;
} else if (stats.node_name() == "Switch") {
++num_switch;
} else if (stats.node_name() == "First") {
++num_first;
} else if (stats.node_name() == "Second") {
++num_second;
} else if (stats.node_name() == "Merge") {
++num_merge;
}
}
}
EXPECT_EQ(1, num_a);
EXPECT_EQ(1, num_less);
EXPECT_EQ(1, num_switch);
EXPECT_EQ(1, num_first);
EXPECT_EQ(1, num_second);
EXPECT_EQ(2, num_merge);
EXPECT_EQ(7, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() + 1, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
{
CreateGrapplerItemWithCondition();
for (auto& node : *grappler_item_->graph.mutable_node()) {
if (node.name() == "Switch") {
AttrValue attr_output_info;
(*attr_output_info.mutable_list()).add_i(0);
AddNodeAttr(kOutputSlots, attr_output_info, &node);
}
}
InitScheduler();
RunScheduler("");
RunMetadata metadata;
Costs c = scheduler_->Summary(&metadata);
int num_a = 0;
int num_less = 0;
int num_switch = 0;
int num_first = 0;
int num_second = 0;
int num_merge = 0;
for (const auto& device_step_stats : metadata.step_stats().dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
if (stats.node_name() == "a") {
++num_a;
} else if (stats.node_name() == "Less") {
++num_less;
} else if (stats.node_name() == "Switch") {
++num_switch;
} else if (stats.node_name() == "First") {
++num_first;
} else if (stats.node_name() == "Second") {
++num_second;
} else if (stats.node_name() == "Merge") {
++num_merge;
}
}
}
EXPECT_EQ(1, num_a);
EXPECT_EQ(1, num_less);
EXPECT_EQ(1, num_switch);
EXPECT_EQ(1, num_first);
EXPECT_EQ(0, num_second);
EXPECT_EQ(1, num_merge);
EXPECT_EQ(5, c.execution_time.asMicroSeconds().count());
EXPECT_EQ(grappler_item_->graph.node_size() - 1, c.num_ops_total);
EXPECT_FALSE(c.inaccurate);
EXPECT_EQ(0, c.num_ops_with_unknown_shapes);
}
}
TEST_F(VirtualSchedulerTest, InterDeviceTransfer) {
CreateGrapplerItemWithInterDeviceTransfers();
InitScheduler();
auto ops_executed = RunScheduler("");
auto get_port_num = [](const string& name) -> int {
if (absl::StrContains(name, "bn_0")) {
return 0;
} else if (absl::StrContains(name, "bn_1")) {
return 1;
} else if (absl::StrContains(name, "bn_2")) {
return 2;
} else if (absl::StrContains(name, "bn_minus1")) {
return -1;
}
return -999;
};
std::unordered_map<string, int> op_count;
std::unordered_map<int, string> recv_op_names;
std::unordered_map<int, string> send_op_names;
for (const auto& x : ops_executed) {
const auto& name = x.first;
const auto& node_info = x.second;
const auto& op = node_info.op_info.op();
if (op == kRecv) {
recv_op_names[get_port_num(name)] = name;
} else if (op == kSend) {
send_op_names[get_port_num(name)] = name;
}
op_count[op]++;
}
EXPECT_EQ(op_count.at(kSend), op_count.at(kRecv));
EXPECT_EQ(op_count.at(kRecv), 3);
EXPECT_EQ(op_count.at(kSend), 3);
auto get_output_size = [this, ops_executed](const string& name) -> int64 {
const auto& output_properties_ = ops_executed.at(name).op_info.outputs();
std::vector<OpInfo::TensorProperties> output_properties;
for (const auto& output_property : output_properties_) {
output_properties.push_back(output_property);
}
return CalculateOutputSize(output_properties, 0);
};
int input_size = 4 * batch_size_ * width_ * height_ * depth_in_;
EXPECT_EQ(get_output_size(recv_op_names[0]), input_size);
EXPECT_EQ(get_output_size(send_op_names[0]), input_size);
EXPECT_EQ(get_output_size(recv_op_names[1]), 4 * depth_in_);
EXPECT_EQ(get_output_size(send_op_names[1]), 4 * depth_in_);
EXPECT_EQ(get_output_size(recv_op_names[2]), 4 * depth_in_);
EXPECT_EQ(get_output_size(send_op_names[2]), 4 * depth_in_);
}
TEST_F(VirtualSchedulerTest, GraphWithSendRecv) {
CreateGrapplerItemWithSendRecv();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Const"), 0);
EXPECT_GT(ops_executed.count("Send"), 0);
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, GraphWithSendRecvDifferentDevice) {
CreateGrapplerItemWithSendRecv();
auto& graph = grappler_item_->graph;
const string recv_device = kCPU1;
for (int i = 0; i < graph.node_size(); i++) {
auto* node = graph.mutable_node(i);
if (node->name() == "Recv") {
node->set_device(recv_device);
auto* attr = node->mutable_attr();
(*attr)["recv_device"].set_s(recv_device);
} else if (node->name() == "Send") {
auto* attr = node->mutable_attr();
(*attr)["recv_device"].set_s(recv_device);
}
}
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Const"), 0);
EXPECT_GT(ops_executed.count("Send"), 0);
EXPECT_GT(ops_executed.count("Send_Send_0_from_/job_localhost/replica_0/"
"task_0/cpu_0_to_/job_localhost"
"/replica_0/task_0/cpu_1"),
0);
EXPECT_GT(ops_executed.count(
"Recv_Send_0_on_/job_localhost/replica_0/task_0/cpu_1"),
0);
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, GraphWihtOnlyRecv) {
CreateGrapplerItemWithRecvWithoutSend();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("Recv"), 0);
}
TEST_F(VirtualSchedulerTest, AddMergeSwitch) {
scheduler_ = std::make_unique<TestVirtualScheduler>(
true,
true, &composite_node_manager_,
cluster_.get());
CreateGrapplerItemWithSwitchMergeInput();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("z"), 0);
}
TEST_F(VirtualSchedulerTest, AddFromOneTensor) {
CreateGrapplerItemWithAddFromOneTensor();
InitScheduler();
auto ops_executed = RunScheduler("");
EXPECT_GT(ops_executed.count("y"), 0);
EXPECT_GT(ops_executed.count("x"), 0);
}
TEST_F(VirtualSchedulerTest, TestNodeCostOutputTensorSize) {
CreateGrapplerItemWithMatmulChain();
InitScheduler();
RunScheduler("ab");
int32_t persistent_memory_before =
scheduler_->GetPersistentMemoryUsage().at(kCPU0);
auto* device_states = scheduler_->GetDeviceStates();
int32_t memory_usage = device_states->at(kCPU0).memory_usage;
Costs node_costs = Costs::ZeroCosts(false);
const int32_t node_one_cost = 12345;
const int32_t node_two_cost = 98765;
const int32_t input_size = 4 * 3200 * 3200;
node_costs.persistent_memory = node_one_cost;
node_costs.temporary_memory = 0;
node_costs.output_tensor_size_bytes = {{0, node_one_cost}};
node_costs.persistent_output_ports = {0};
scheduler_->MarkCurrNodeExecuted(node_costs);
device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_0 = device_states->at(kCPU0);
memory_usage -= 2 * input_size;
EXPECT_EQ(cpu_state_0.memory_usage, memory_usage);
int64_t persistent_memory = node_one_cost + persistent_memory_before;
EXPECT_EQ(scheduler_->GetPersistentMemoryUsage().at(kCPU0),
persistent_memory);
node_costs = Costs::ZeroCosts(false);
node_costs.persistent_memory = 0;
node_costs.temporary_memory = node_two_cost;
node_costs.output_tensor_size_bytes = {{0, node_two_cost}};
scheduler_->MarkCurrNodeExecuted(node_costs);
device_states = scheduler_->GetDeviceStates();
const auto& cpu_state_1 = device_states->at(kCPU0);
memory_usage += node_two_cost - input_size;
EXPECT_EQ(cpu_state_1.memory_usage, memory_usage);
EXPECT_EQ(scheduler_->GetPersistentMemoryUsage().at(kCPU0),
persistent_memory);
bool more_nodes = true;
do {
OpContext op_context = scheduler_->GetCurrNode();
node_costs = SimplePredictCosts(op_context);
more_nodes = scheduler_->MarkCurrNodeExecuted(node_costs);
} while (more_nodes);
RunMetadata metadata;
Costs final_cost = scheduler_->Summary(&metadata);
EXPECT_EQ(final_cost.persistent_memory, persistent_memory);
StepStats stepstats = metadata.step_stats();
for (const auto& device_step_stats : stepstats.dev_stats()) {
for (const auto& stats : device_step_stats.node_stats()) {
const auto& allocation_description =
stats.output().at(0).tensor_description().allocation_description();
if (stats.node_name() == "abc") {
EXPECT_NE(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
const auto& mem_stats = stats.memory_stats();
EXPECT_EQ(mem_stats.persistent_memory_size(), node_one_cost);
} else if (stats.node_name() == "abcd") {
EXPECT_NE(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
} else {
EXPECT_EQ(allocation_description.allocated_bytes(),
allocation_description.requested_bytes());
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_scheduler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1e67b253-9a27-4432-91a1-3609c13e9dfd | cpp | tensorflow/tensorflow | graph_memory | tensorflow/core/grappler/costs/graph_memory.cc | tensorflow/core/grappler/costs/graph_memory_test.cc | #include "tensorflow/core/grappler/costs/graph_memory.h"
#include <deque>
#include "tensorflow/core/framework/allocation_description.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
Status GraphMemory::InferStatically(
const std::unordered_map<string, DeviceProperties>& devices) {
VirtualCluster cluster(devices);
TF_RETURN_IF_ERROR(cluster.Provision());
TF_RETURN_IF_ERROR(cluster.Initialize(item_));
RunMetadata metadata;
Status s = cluster.Run(item_, &metadata);
if (!s.ok() && s.code() != error::RESOURCE_EXHAUSTED) {
return s;
}
InferFromTrace(metadata.step_stats());
return absl::OkStatus();
}
Status GraphMemory::InferDynamically(Cluster* cluster) {
if (!cluster->DetailedStatsEnabled()) {
return errors::Unavailable("Detailed stats collection must be enabled");
}
TF_RETURN_IF_ERROR(cluster->Initialize(item_));
RunMetadata metadata;
TF_RETURN_IF_ERROR(cluster->Run(item_, &metadata));
InferFromTrace(metadata.step_stats());
return absl::OkStatus();
}
int64_t GraphMemory::GetWorstCaseMemoryUsage() const {
int64_t worst_case = -1;
for (const auto& peak_usage : peak_usage_) {
worst_case = std::max(worst_case, peak_usage.second.used_memory);
}
return worst_case;
}
void GraphMemory::InferMemUsageForNodes(
const std::vector<const NodeDef*>& nodes, GraphProperties* properties,
int64_t* worst_case_memory_usage, int64_t* best_case_memory_usage) const {
*worst_case_memory_usage = 0;
*best_case_memory_usage = 0;
for (const auto& node : item_.graph.node()) {
std::vector<OpInfo::TensorProperties> outputs =
properties->GetOutputProperties(node.name());
int64_t node_memory_usage = InferMemUsageForNeighbors(outputs);
*worst_case_memory_usage += node_memory_usage;
std::vector<OpInfo::TensorProperties> inputs =
properties->GetInputProperties(node.name());
node_memory_usage += InferMemUsageForNeighbors(inputs);
*best_case_memory_usage =
std::max(*best_case_memory_usage, node_memory_usage);
}
}
int64_t GraphMemory::InferMemUsageForNeighbors(
const std::vector<OpInfo::TensorProperties>& props) const {
int64_t neighbors_memory_usage = 0;
for (const auto& prop : props) {
DataType dtype = prop.dtype();
int size = DataTypeSize(dtype);
TensorShapeProto shape = prop.shape();
if (shape.unknown_rank()) {
continue;
}
for (int i = 0; i < shape.dim_size(); ++i) {
if (shape.dim(i).size() < 0) {
shape.mutable_dim(i)->set_size(1);
}
}
int num_elems = TensorShape(shape).num_elements();
neighbors_memory_usage += num_elems * size;
}
return neighbors_memory_usage;
}
static GraphMemory::LiveTensor* FindOrCreateLiveTensor(
const string& node_name, int output_id,
std::unordered_map<string, GraphMemory::LiveTensor*>* live_tensors,
std::deque<GraphMemory::LiveTensor>* device_tensors) {
string name = strings::StrCat(node_name, ":", output_id);
GraphMemory::LiveTensor* live;
auto it = live_tensors->find(name);
if (it == live_tensors->end()) {
GraphMemory::LiveTensor temp;
temp.node = node_name;
temp.output_id = output_id;
temp.allocation_time = 0;
temp.deallocation_time = 0;
device_tensors->push_front(temp);
live = &device_tensors->front();
(*live_tensors)[name] = live;
} else {
live = it->second;
}
return live;
}
namespace {
struct Event {
Event(int64_t _timestamp, bool _allocated,
const GraphMemory::LiveTensor* _tensor)
: timestamp(_timestamp), allocated(_allocated), tensor(_tensor) {}
int64_t timestamp;
bool allocated;
const GraphMemory::LiveTensor* tensor;
bool operator<(const Event& other) const {
return timestamp < other.timestamp;
}
};
}
void GraphMemory::InferFromTrace(const StepStats& timeline) {
std::unordered_map<string, string> node_placement;
for (const auto& dev_stats : timeline.dev_stats()) {
for (const auto& node_stats : dev_stats.node_stats()) {
node_placement[node_stats.node_name()] = dev_stats.device();
}
}
std::unordered_map<string, LiveTensor*> live_tensors;
std::unordered_map<string, std::deque<LiveTensor>> live_tensors_per_device;
std::unordered_map<string, const NodeDef*> node_map;
for (const NodeDef& node : item_.graph.node()) {
node_map[node.name()] = &node;
}
for (const auto& dev_stats : timeline.dev_stats()) {
const string& device_name = dev_stats.device();
const bool is_gpu = (device_name.find("GPU:") || device_name.find("gpu:"));
std::deque<LiveTensor>& device_tensors =
live_tensors_per_device[dev_stats.device()];
for (const auto& node_stats : dev_stats.node_stats()) {
for (int i = 0; i < node_stats.output_size(); ++i) {
const auto& output = node_stats.output(i);
LiveTensor* live = FindOrCreateLiveTensor(
node_stats.node_name(), i, &live_tensors, &device_tensors);
live->memory_used = output.tensor_description()
.allocation_description()
.allocated_bytes();
live->allocation_time =
Costs::MicroSeconds(node_stats.all_start_micros());
live->deallocation_time = std::max<Costs::Duration>(
live->deallocation_time,
Costs::NanoSeconds(1) +
Costs::MicroSeconds(node_stats.all_start_micros() +
node_stats.op_end_rel_micros()));
}
auto it = node_map.find(node_stats.node_name());
if (it == node_map.end()) {
continue;
}
const NodeDef* node = it->second;
std::unordered_set<int> swapped_inputs;
if (is_gpu) {
auto it = node->attr().find("_swap_to_host");
if (it != node->attr().end()) {
const AttrValue& val = it->second;
for (int port_id : val.list().i()) {
swapped_inputs.insert(port_id);
}
}
}
for (int i = 0; i < node->input_size(); ++i) {
if (swapped_inputs.find(i) != swapped_inputs.end()) {
continue;
}
const string& input = node->input(i);
int position;
string input_node = ParseNodeName(input, &position);
if (position < 0) {
continue;
}
LiveTensor* live = FindOrCreateLiveTensor(
input_node, position, &live_tensors,
&live_tensors_per_device[node_placement[input_node]]);
live->deallocation_time = std::max<Costs::Duration>(
live->deallocation_time,
Costs::NanoSeconds(1) +
Costs::MicroSeconds(node_stats.all_start_micros() +
node_stats.op_end_rel_micros()));
}
}
}
for (const auto& live_per_device : live_tensors_per_device) {
std::vector<Event> events;
events.reserve(2 * live_per_device.second.size());
for (const auto& live : live_per_device.second) {
events.emplace_back(static_cast<int64_t>(live.allocation_time.count()),
true, &live);
events.emplace_back(static_cast<int64_t>(live.deallocation_time.count()),
false, &live);
}
std::stable_sort(events.begin(), events.end());
size_t peak = 0;
std::unordered_set<const LiveTensor*> live_at_peak;
size_t current = 0;
std::unordered_set<const LiveTensor*> currently_live;
int events_size = events.size();
for (int i = 0; i < events_size; ++i) {
const auto& event = events[i];
if (event.allocated) {
VLOG(1) << "At time " << event.timestamp << " allocated "
<< event.tensor->memory_used << " for tensor "
<< event.tensor->node << ":" << event.tensor->output_id;
current += event.tensor->memory_used;
currently_live.insert(event.tensor);
} else {
VLOG(1) << "At time " << event.timestamp << " deallocated "
<< event.tensor->memory_used << " for tensor "
<< event.tensor->node << ":" << event.tensor->output_id;
current -= event.tensor->memory_used;
currently_live.erase(event.tensor);
}
if (i + 1 == events_size || event.timestamp != events[i + 1].timestamp) {
if (current > peak) {
peak = current;
live_at_peak = currently_live;
}
}
}
MemoryUsage& peak_mem_usage = peak_usage_[live_per_device.first];
peak_mem_usage.used_memory = peak;
peak_mem_usage.live_tensors.clear();
peak_mem_usage.live_tensors.reserve(live_at_peak.size());
for (const auto& live : live_at_peak) {
peak_mem_usage.live_tensors.push_back(*live);
}
}
}
}
} | #include "tensorflow/core/grappler/costs/graph_memory.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GraphMemoryTest : public ::testing::Test {
protected:
std::unordered_map<string, DeviceProperties> devices_;
public:
GraphMemoryTest() {
devices_["/CPU:0"].set_type("CPU");
devices_["/CPU:0"].set_num_cores(1);
devices_["/CPU:0"].set_frequency(1);
devices_["/CPU:0"].set_bandwidth(1);
devices_["/GPU:0"].set_type("GPU");
devices_["/GPU:0"].set_num_cores(1);
devices_["/GPU:0"].set_frequency(1);
devices_["/CPU:0"].set_bandwidth(1);
(*devices_["/GPU:0"].mutable_environment())["architecture"] = "3";
}
};
TEST_F(GraphMemoryTest, Basic) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"/CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& mem_usage =
memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(120, mem_usage.used_memory);
std::set<string> tensors;
for (const auto& t : mem_usage.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("Sign:0");
expected.insert("Sign_1:0");
expected.insert("x:0");
EXPECT_EQ(expected, tensors);
}
TEST_F(GraphMemoryTest, UnknownBatchSize) {
TrivialTestGraphInputYielder fake_input(4, 1, -1, false, {"/CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& mem_usage =
memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(16, mem_usage.used_memory);
std::set<string> tensors;
for (const auto& t : mem_usage.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("Const/Const:0");
expected.insert("Sign:0");
expected.insert("x:0");
EXPECT_EQ(expected, tensors);
}
TEST_F(GraphMemoryTest, MultiDevice) {
TrivialTestGraphInputYielder fake_input(4, 2, 1024 * 1024, false,
{"/CPU:0", "/GPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& cpu_mem = memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(16777216, cpu_mem.used_memory);
std::set<string> cpu_tensors;
for (const auto& t : cpu_mem.live_tensors) {
cpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> cpu_expected;
cpu_expected.insert("Recv_Sign_1_0_on_/CPU_0:0");
cpu_expected.insert("Sign:0");
cpu_expected.insert("x:0");
cpu_expected.insert("AddN:0");
EXPECT_EQ(cpu_expected, cpu_tensors);
const GraphMemory::MemoryUsage& gpu_mem = memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(16777216, gpu_mem.used_memory);
std::set<string> gpu_tensors;
for (const auto& t : gpu_mem.live_tensors) {
gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> gpu_expected;
gpu_expected.insert("Recv_AddN_0_on_/GPU_0:0");
gpu_expected.insert("Sign_1:0");
gpu_expected.insert("AddN_1:0");
gpu_expected.insert("AddN_3:0");
EXPECT_EQ(gpu_expected, gpu_tensors);
}
TEST_F(GraphMemoryTest, GpuSwapping) {
TrivialTestGraphInputYielder fake_input(4, 2, 1024 * 1024, false, {"/GPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
item.feed.clear();
{
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& gpu_mem =
memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(20971520, gpu_mem.used_memory);
std::set<string> gpu_tensors;
for (const auto& t : gpu_mem.live_tensors) {
gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> gpu_expected;
gpu_expected.insert("Sign:0");
gpu_expected.insert("Sign_1:0");
gpu_expected.insert("AddN:0");
gpu_expected.insert("AddN_1:0");
gpu_expected.insert("AddN_2:0");
EXPECT_EQ(gpu_expected, gpu_tensors);
}
{
for (auto& node : *item.graph.mutable_node()) {
if (node.name() == "AddN_1") {
(*node.mutable_attr())["_swap_to_host"].mutable_list()->add_i(0);
}
}
GraphMemory memory(item);
Status s = memory.InferStatically(devices_);
TF_CHECK_OK(s);
const GraphMemory::MemoryUsage& new_gpu_mem =
memory.GetPeakMemoryUsage("/GPU:0");
EXPECT_EQ(20971520, new_gpu_mem.used_memory);
std::set<string> new_gpu_tensors;
for (const auto& t : new_gpu_mem.live_tensors) {
new_gpu_tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> new_gpu_expected;
new_gpu_expected.insert("AddN:0");
new_gpu_expected.insert("AddN_1:0");
new_gpu_expected.insert("AddN_2:0");
new_gpu_expected.insert("AddN_3:0");
new_gpu_expected.insert("AddN_4:0");
EXPECT_EQ(new_gpu_expected, new_gpu_tensors);
}
}
TEST_F(GraphMemoryTest, CtrlDependencies) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a").WithDevice("/CPU:0"), 10.0f, {3});
Output v =
ops::Variable(s.WithOpName("v").WithDevice("/CPU:0"), {3}, DT_FLOAT);
Output assign =
ops::Assign(s.WithOpName("assign").WithDevice("/CPU:0"), v, a);
ops::NoOp init(
s.WithOpName("init").WithDevice("/CPU:0").WithControlDependencies(
assign));
GrapplerItem item;
item.fetch.push_back("init");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphMemory memory(item);
Status status = memory.InferStatically(devices_);
TF_CHECK_OK(status);
const GraphMemory::MemoryUsage& mem = memory.GetPeakMemoryUsage("/CPU:0");
EXPECT_EQ(36, mem.used_memory);
std::set<string> tensors;
for (const auto& t : mem.live_tensors) {
tensors.insert(strings::StrCat(t.node, ":", t.output_id));
}
std::set<string> expected;
expected.insert("a:0");
expected.insert("v:0");
expected.insert("assign:0");
EXPECT_EQ(expected, tensors);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/graph_memory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/graph_memory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7015e367-82f3-4d91-a11d-8439c8d9ae65 | cpp | tensorflow/tensorflow | op_level_cost_estimator | tensorflow/core/grappler/costs/op_level_cost_estimator.cc | tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc | #include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_context.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/util/overflow.h"
#include "tensorflow/core/util/padding.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
constexpr int kOpsPerMac = 2;
constexpr char kGuaranteeConst[] = "GuaranteeConst";
constexpr char kAddN[] = "AddN";
constexpr char kBitCast[] = "BitCast";
constexpr char kConcatV2[] = "ConcatV2";
constexpr char kConv2d[] = "Conv2D";
constexpr char kConv2dBackpropFilter[] = "Conv2DBackpropFilter";
constexpr char kConv2dBackpropInput[] = "Conv2DBackpropInput";
constexpr char kFusedConv2dBiasActivation[] = "FusedConv2DBiasActivation";
constexpr char kDataFormatVecPermute[] = "DataFormatVecPermute";
constexpr char kDepthToSpace[] = "DepthToSpace";
constexpr char kDepthwiseConv2dNative[] = "DepthwiseConv2dNative";
constexpr char kDepthwiseConv2dNativeBackpropFilter[] =
"DepthwiseConv2dNativeBackpropFilter";
constexpr char kDepthwiseConv2dNativeBackpropInput[] =
"DepthwiseConv2dNativeBackpropInput";
constexpr char kMatMul[] = "MatMul";
constexpr char kXlaEinsum[] = "XlaEinsum";
constexpr char kEinsum[] = "Einsum";
constexpr char kExpandDims[] = "ExpandDims";
constexpr char kFill[] = "Fill";
constexpr char kSparseMatMul[] = "SparseMatMul";
constexpr char kSparseTensorDenseMatMul[] = "SparseTensorDenseMatMul";
constexpr char kPlaceholder[] = "Placeholder";
constexpr char kIdentity[] = "Identity";
constexpr char kIdentityN[] = "IdentityN";
constexpr char kRefIdentity[] = "RefIdentity";
constexpr char kNoOp[] = "NoOp";
constexpr char kReshape[] = "Reshape";
constexpr char kSplit[] = "Split";
constexpr char kSqueeze[] = "Squeeze";
constexpr char kRecv[] = "_Recv";
constexpr char kSend[] = "_Send";
constexpr char kBatchMatMul[] = "BatchMatMul";
constexpr char kBatchMatMulV2[] = "BatchMatMulV2";
constexpr char kOneHot[] = "OneHot";
constexpr char kPack[] = "Pack";
constexpr char kRank[] = "Rank";
constexpr char kRange[] = "Range";
constexpr char kShape[] = "Shape";
constexpr char kShapeN[] = "ShapeN";
constexpr char kSize[] = "Size";
constexpr char kStopGradient[] = "StopGradient";
constexpr char kPreventGradient[] = "PreventGradient";
constexpr char kGather[] = "Gather";
constexpr char kGatherNd[] = "GatherNd";
constexpr char kGatherV2[] = "GatherV2";
constexpr char kScatterAdd[] = "ScatterAdd";
constexpr char kScatterDiv[] = "ScatterDiv";
constexpr char kScatterMax[] = "ScatterMax";
constexpr char kScatterMin[] = "ScatterMin";
constexpr char kScatterMul[] = "ScatterMul";
constexpr char kScatterSub[] = "ScatterSub";
constexpr char kScatterUpdate[] = "ScatterUpdate";
constexpr char kSlice[] = "Slice";
constexpr char kStridedSlice[] = "StridedSlice";
constexpr char kSpaceToDepth[] = "SpaceToDepth";
constexpr char kTranspose[] = "Transpose";
constexpr char kTile[] = "Tile";
constexpr char kMaxPool[] = "MaxPool";
constexpr char kMaxPoolGrad[] = "MaxPoolGrad";
constexpr char kAvgPool[] = "AvgPool";
constexpr char kAvgPoolGrad[] = "AvgPoolGrad";
constexpr char kFusedBatchNorm[] = "FusedBatchNorm";
constexpr char kFusedBatchNormGrad[] = "FusedBatchNormGrad";
constexpr char kQuantizedMatMul[] = "QuantizedMatMul";
constexpr char kQuantizedMatMulV2[] = "QuantizedMatMulV2";
constexpr char kUnpack[] = "Unpack";
constexpr char kSoftmax[] = "Softmax";
constexpr char kResizeBilinear[] = "ResizeBilinear";
constexpr char kCropAndResize[] = "CropAndResize";
constexpr char kSwitch[] = "Switch";
constexpr char kMerge[] = "Merge";
constexpr char kEnter[] = "Enter";
constexpr char kExit[] = "Exit";
constexpr char kNextIteration[] = "NextIteration";
constexpr char kConst[] = "Const";
constexpr char kVariable[] = "Variable";
constexpr char kVariableV2[] = "VariableV2";
constexpr char kAutoReloadVariable[] = "AutoReloadVariable";
constexpr char kVarHandleOp[] = "VarHandleOp";
constexpr char kVarHandlesOp[] = "_VarHandlesOp";
constexpr char kReadVariableOp[] = "ReadVariableOp";
constexpr char kReadVariablesOp[] = "_ReadVariablesOp";
constexpr char kAssignVariableOp[] = "AssignVariableOp";
constexpr char kAssignAddVariableOp[] = "AssignAddVariableOp";
constexpr char kAssignSubVariableOp[] = "AssignSubVariableOp";
static const Costs::Duration kMinComputeTime(1);
static const int64_t kMinComputeOp = 1;
namespace {
std::string GetDataFormat(const OpInfo& op_info) {
std::string data_format = "NHWC";
if (op_info.attr().find("data_format") != op_info.attr().end()) {
data_format = op_info.attr().at("data_format").s();
}
return data_format;
}
std::string GetFilterFormat(const OpInfo& op_info) {
std::string filter_format = "HWIO";
if (op_info.attr().find("filter_format") != op_info.attr().end()) {
filter_format = op_info.attr().at("filter_format").s();
}
return filter_format;
}
Padding GetPadding(const OpInfo& op_info) {
if (op_info.attr().find("padding") != op_info.attr().end() &&
op_info.attr().at("padding").s() == "VALID") {
return Padding::VALID;
}
return Padding::SAME;
}
bool IsTraining(const OpInfo& op_info) {
if (op_info.attr().find("is_training") != op_info.attr().end() &&
op_info.attr().at("is_training").b()) {
return true;
}
return false;
}
std::vector<int64_t> GetStrides(const OpInfo& op_info) {
if (op_info.attr().find("strides") != op_info.attr().end()) {
const auto strides = op_info.attr().at("strides").list().i();
DCHECK(strides.size() == 4)
<< "Attr strides is not a length-4 vector: " << op_info.DebugString();
if (strides.size() != 4) return {1, 1, 1, 1};
return {strides[0], strides[1], strides[2], strides[3]};
}
return {1, 1, 1, 1};
}
std::vector<int64_t> GetKernelSize(const OpInfo& op_info) {
if (op_info.attr().find("ksize") != op_info.attr().end()) {
const auto ksize = op_info.attr().at("ksize").list().i();
DCHECK(ksize.size() == 4)
<< "Attr ksize is not a length-4 vector: " << op_info.DebugString();
if (ksize.size() != 4) return {1, 1, 1, 1};
return {ksize[0], ksize[1], ksize[2], ksize[3]};
}
return {1, 1, 1, 1};
}
int64_t GetOutputSize(const int64_t input, const int64_t filter,
const int64_t stride, const Padding& padding) {
if (padding == Padding::VALID) {
return (input - filter + stride) / stride;
} else {
return (input + stride - 1) / stride;
}
}
int64_t CwiseOutputElementCount(const OpInfo& op_info) {
int max_rank = 1;
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
max_rank = std::max(max_rank, input_properties.shape().dim_size());
}
TensorShapeProto output_shape;
output_shape.mutable_dim()->Reserve(max_rank);
for (int i = 0; i < max_rank; ++i) {
output_shape.add_dim();
}
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
const TensorShapeProto& input_shape = input_properties.shape();
for (int i = input_shape.dim_size() - 1; i >= 0; --i) {
int output_shape_dim_index =
i + output_shape.dim_size() - input_shape.dim_size();
output_shape.mutable_dim(output_shape_dim_index)
->set_size(std::max(output_shape.dim(output_shape_dim_index).size(),
input_shape.dim(i).size()));
}
}
int64_t count = 1;
for (int i = 0; i < output_shape.dim_size(); i++) {
count *= output_shape.dim(i).size();
}
return count;
}
bool CheckRepeatedDimensions(const absl::string_view dim_str) {
int str_size = dim_str.size();
for (int idx = 0; idx < str_size - 1; idx++) {
if (dim_str.find(dim_str[idx], idx + 1) != std::string::npos) {
return true;
}
}
return false;
}
bool IsEinsumCorrectlyFormed(const OpContext& einsum_context) {
const auto& op_info = einsum_context.op_info;
auto it = op_info.attr().find("equation");
if (it == op_info.attr().end()) return false;
const absl::string_view equation = it->second.s();
std::vector<std::string> equation_split = absl::StrSplit(equation, "->");
if (equation_split.empty()) {
LOG(WARNING) << "Einsum with malformed equation";
return false;
}
std::vector<absl::string_view> input_split =
absl::StrSplit(equation_split[0], ',');
if (op_info.inputs_size() != 2 || equation_split.size() != 2) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
absl::string_view rhs_str = equation_split[1];
absl::string_view a_input_str = input_split[0];
absl::string_view b_input_str = input_split[1];
if (absl::StrContains(a_input_str, "...") ||
absl::StrContains(b_input_str, "...")) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", ellipsis not supported";
return false;
}
constexpr int kMatrixRank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(kMatrixRank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(kMatrixRank, b_input.shape().dim_size()),
&b_input_shape_unknown);
if (a_input_str.size() != a_input_shape.size() ||
b_input_str.size() != b_input_shape.size()) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", equation subscripts don't match tensor rank.";
return false;
}
if (CheckRepeatedDimensions(a_input_str) ||
CheckRepeatedDimensions(b_input_str) ||
CheckRepeatedDimensions(rhs_str)) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", Subscripts where axis appears more than once for a single "
"input are not yet supported";
return false;
}
return true;
}
}
std::vector<int64_t> MaybeGetMinimumShape(
const TensorShapeProto& original_shape, int rank,
bool* found_unknown_shapes) {
std::vector<int64_t> minimal_shape(rank, 1L);
if (original_shape.dim_size() == 0) {
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
*found_unknown_shapes |= original_shape.dim_size() != rank;
for (int i = 0; i < std::min(rank, original_shape.dim_size()); ++i) {
if (original_shape.dim(i).size() < 0) {
*found_unknown_shapes = true;
} else {
minimal_shape[i] = original_shape.dim(i).size();
}
}
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
OpLevelCostEstimator::OpLevelCostEstimator() {
typedef absl::Status (OpLevelCostEstimator::*CostImpl)(
const OpContext& op_context, NodeCosts*) const;
auto wrap = [this](CostImpl impl)
-> std::function<absl::Status(const OpContext&, NodeCosts*)> {
return [this, impl](const OpContext& op_context, NodeCosts* node_costs) {
return (this->*impl)(op_context, node_costs);
};
};
device_cost_impl_.emplace(kConv2d,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kConv2dBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kConv2dBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(
kFusedConv2dBiasActivation,
wrap(&OpLevelCostEstimator::PredictFusedConv2DBiasActivation));
device_cost_impl_.emplace(kDepthwiseConv2dNative,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(kMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kSparseMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(
kSparseTensorDenseMatMul,
wrap(&OpLevelCostEstimator::PredictSparseTensorDenseMatMul));
device_cost_impl_.emplace(kBatchMatMul,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kBatchMatMulV2,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kQuantizedMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kQuantizedMatMulV2,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kXlaEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kNoOp, wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGuaranteeConst,
wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGather,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherNd,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherV2,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kScatterAdd,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterDiv,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMax,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMin,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMul,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterSub,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterUpdate,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kSlice,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kStridedSlice,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kPlaceholder,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kIdentity,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kIdentityN,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kRefIdentity,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kStopGradient,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kPreventGradient,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kReshape,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kRecv,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kSend,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kSwitch,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kMerge,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kEnter,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kExit,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kNextIteration,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kBitCast,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kConcatV2,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kDataFormatVecPermute,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kDepthToSpace,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kExpandDims,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kFill,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kOneHot,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kPack,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kRange,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSpaceToDepth,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSplit,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSqueeze,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kTranspose,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kTile,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kUnpack,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kRank,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kShape,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kShapeN,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kSize,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kMaxPool,
wrap(&OpLevelCostEstimator::PredictMaxPool));
device_cost_impl_.emplace(kMaxPoolGrad,
wrap(&OpLevelCostEstimator::PredictMaxPoolGrad));
device_cost_impl_.emplace(kAvgPool,
wrap(&OpLevelCostEstimator::PredictAvgPool));
device_cost_impl_.emplace(kAvgPoolGrad,
wrap(&OpLevelCostEstimator::PredictAvgPoolGrad));
device_cost_impl_.emplace(kFusedBatchNorm,
wrap(&OpLevelCostEstimator::PredictFusedBatchNorm));
device_cost_impl_.emplace(
kFusedBatchNormGrad,
wrap(&OpLevelCostEstimator::PredictFusedBatchNormGrad));
device_cost_impl_.emplace(kSoftmax,
wrap(&OpLevelCostEstimator::PredictSoftmax));
device_cost_impl_.emplace(kResizeBilinear,
wrap(&OpLevelCostEstimator::PredictResizeBilinear));
device_cost_impl_.emplace(kCropAndResize,
wrap(&OpLevelCostEstimator::PredictCropAndResize));
device_cost_impl_.emplace(
kAssignVariableOp, wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(
kAssignAddVariableOp,
wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(
kAssignSubVariableOp,
wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(kAddN, wrap(&OpLevelCostEstimator::PredictNaryOp));
persistent_ops_ = {
kConst, kVariable, kVariableV2, kAutoReloadVariable,
kVarHandleOp, kReadVariableOp, kVarHandlesOp, kReadVariablesOp};
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const int quantize_v2_cost =
EIGEN_COST(scalar_product_op<float>) + EIGEN_COST(scalar_max_op<float>) +
EIGEN_COST(scalar_min_op<float>) + EIGEN_COST(scalar_round_op<float>);
const int quantize_and_dequantize_v2_cost =
quantize_v2_cost + EIGEN_COST(scalar_product_op<float>);
elementwise_ops_.emplace("Acos", EIGEN_COST(scalar_acos_op<float>));
elementwise_ops_.emplace("All", EIGEN_COST(scalar_boolean_and_op<bool>));
elementwise_ops_.emplace("ArgMax", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Asin", EIGEN_COST(scalar_asin_op<float>));
elementwise_ops_.emplace("Atan", EIGEN_COST(scalar_atan_op<float>));
elementwise_ops_.emplace("Atan2", EIGEN_COST(scalar_quotient_op<float>) +
EIGEN_COST(scalar_atan_op<float>));
elementwise_ops_.emplace(
"Cast", Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<float, int16>>::Cost);
elementwise_ops_.emplace("Ceil", EIGEN_COST(scalar_ceil_op<float>));
elementwise_ops_.emplace("Cos", EIGEN_COST(scalar_cos_op<float>));
elementwise_ops_.emplace("Dequantize", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("Erf", 1);
elementwise_ops_.emplace("Erfc", 1);
elementwise_ops_.emplace("Exp", EIGEN_COST(scalar_exp_op<float>));
elementwise_ops_.emplace("Expm1", EIGEN_COST(scalar_expm1_op<float>));
elementwise_ops_.emplace("Floor", EIGEN_COST(scalar_floor_op<float>));
elementwise_ops_.emplace("Inv", EIGEN_COST(scalar_inverse_op<float>));
elementwise_ops_.emplace("InvGrad", 1);
elementwise_ops_.emplace("Lgamma", 1);
elementwise_ops_.emplace("Log", EIGEN_COST(scalar_log_op<float>));
elementwise_ops_.emplace("Log1p", EIGEN_COST(scalar_log1p_op<float>));
elementwise_ops_.emplace("Max", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Min", EIGEN_COST(scalar_min_op<float>));
elementwise_ops_.emplace("Neg", EIGEN_COST(scalar_opposite_op<float>));
elementwise_ops_.emplace("Prod", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("QuantizeAndDequantizeV2",
quantize_and_dequantize_v2_cost);
elementwise_ops_.emplace("QuantizeAndDequantizeV4",
quantize_and_dequantize_v2_cost);
elementwise_ops_.emplace("QuantizedSigmoid",
EIGEN_COST(scalar_logistic_op<float>));
elementwise_ops_.emplace("QuantizeV2", quantize_v2_cost);
elementwise_ops_.emplace("Reciprocal", EIGEN_COST(scalar_inverse_op<float>));
elementwise_ops_.emplace("Relu", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Relu6", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Rint", 1);
elementwise_ops_.emplace("Round", EIGEN_COST(scalar_round_op<float>));
elementwise_ops_.emplace("Rsqrt", EIGEN_COST(scalar_rsqrt_op<float>));
elementwise_ops_.emplace("Sigmoid", EIGEN_COST(scalar_logistic_op<float>));
elementwise_ops_.emplace("Sign", EIGEN_COST(scalar_sign_op<float>));
elementwise_ops_.emplace("Sin", EIGEN_COST(scalar_sin_op<float>));
elementwise_ops_.emplace("Sqrt", EIGEN_COST(scalar_sqrt_op<float>));
elementwise_ops_.emplace("Square", EIGEN_COST(scalar_square_op<float>));
elementwise_ops_.emplace("Sum", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("Tan", EIGEN_COST(scalar_tan_op<float>));
elementwise_ops_.emplace("Tanh", EIGEN_COST(scalar_tanh_op<float>));
elementwise_ops_.emplace("TopKV2", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Add", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("AddV2", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("ApproximateEqual", 1);
elementwise_ops_.emplace("BiasAdd", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("QuantizedBiasAdd",
EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("Div", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("Equal", 1);
elementwise_ops_.emplace("FloorDiv", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("FloorMod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Greater", 1);
elementwise_ops_.emplace("GreaterEqual", 1);
elementwise_ops_.emplace("Less", 1);
elementwise_ops_.emplace("LessEqual", 1);
elementwise_ops_.emplace("LogicalAnd",
EIGEN_COST(scalar_boolean_and_op<bool>));
elementwise_ops_.emplace("LogicalNot", 1);
elementwise_ops_.emplace("LogicalOr", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("Maximum", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Minimum", EIGEN_COST(scalar_min_op<float>));
elementwise_ops_.emplace("Mod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Mul", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("NotEqual", 1);
elementwise_ops_.emplace("QuantizedAdd", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("QuantizedMul",
EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("RealDiv", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("ReluGrad", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Select", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("SelectV2", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("SquaredDifference",
EIGEN_COST(scalar_square_op<float>) +
EIGEN_COST(scalar_difference_op<float>));
elementwise_ops_.emplace("Sub", EIGEN_COST(scalar_difference_op<float>));
elementwise_ops_.emplace("TruncateDiv",
EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("TruncateMod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Where", 1);
#undef EIGEN_COST
compute_memory_overlap_ = false;
}
Costs OpLevelCostEstimator::PredictCosts(const OpContext& op_context) const {
Costs costs;
NodeCosts node_costs;
if (PredictNodeCosts(op_context, &node_costs).ok()) {
if (node_costs.has_costs) {
return node_costs.costs;
}
if (node_costs.minimum_cost_op) {
costs.compute_time = kMinComputeTime;
costs.execution_time = kMinComputeTime;
costs.memory_time = 0;
costs.intermediate_memory_time = 0;
costs.intermediate_memory_read_time = 0;
costs.intermediate_memory_write_time = 0;
} else {
costs = PredictOpCountBasedCost(
node_costs.num_compute_ops, node_costs.num_total_read_bytes(),
node_costs.num_total_write_bytes(), op_context.op_info);
}
VLOG(1) << "Operation " << op_context.op_info.op() << " takes "
<< costs.execution_time.count() << " ns.";
costs.max_memory = node_costs.max_memory;
costs.persistent_memory = node_costs.persistent_memory;
costs.temporary_memory = node_costs.temporary_memory;
costs.inaccurate = node_costs.inaccurate;
costs.num_ops_with_unknown_shapes =
node_costs.num_nodes_with_unknown_shapes;
costs.num_ops_total = node_costs.num_nodes;
return costs;
}
LOG(WARNING) << "Error in PredictCost() for the op: "
<< op_context.op_info.ShortDebugString();
costs = Costs::ZeroCosts(true);
costs.num_ops_with_unknown_shapes = node_costs.num_nodes_with_unknown_shapes;
return costs;
}
absl::Status OpLevelCostEstimator::PredictNodeCosts(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
auto it = device_cost_impl_.find(op_info.op());
if (it != device_cost_impl_.end()) {
std::function<absl::Status(const OpContext&, NodeCosts*)> estimator =
it->second;
return estimator(op_context, node_costs);
}
if (persistent_ops_.find(op_info.op()) != persistent_ops_.end()) {
return PredictVariable(op_context, node_costs);
}
if (elementwise_ops_.find(op_info.op()) != elementwise_ops_.end()) {
return PredictCwiseOp(op_context, node_costs);
}
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
node_costs->num_nodes_with_unknown_op_type = 1;
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
DeviceInfo OpLevelCostEstimator::GetDeviceInfo(
const DeviceProperties& device) const {
double gflops = -1;
double gb_per_sec = -1;
if (device.type() == "CPU") {
gflops = device.num_cores() * device.frequency() * 1e-3;
if (gb_per_sec < 0) {
if (device.bandwidth() > 0) {
gb_per_sec = device.bandwidth() / 1e6;
} else {
gb_per_sec = 32;
}
}
} else if (device.type() == "GPU") {
const auto& device_env = device.environment();
auto it = device_env.find("architecture");
if (it != device_env.end()) {
const std::string architecture = device_env.at("architecture");
int cores_per_multiprocessor;
if (architecture < "3") {
cores_per_multiprocessor = 32;
} else if (architecture < "4") {
cores_per_multiprocessor = 192;
} else if (architecture < "6") {
cores_per_multiprocessor = 128;
} else {
cores_per_multiprocessor = 64;
}
gflops = device.num_cores() * device.frequency() * 1e-3 *
cores_per_multiprocessor * kOpsPerMac;
if (device.bandwidth() > 0) {
gb_per_sec = device.bandwidth() / 1e6;
} else {
gb_per_sec = 100;
}
} else {
gflops = 100;
gb_per_sec = 12;
}
} else {
LOG_EVERY_N(WARNING, 1000) << "Unknown device type: " << device.type()
<< ", assuming PCIe between CPU and GPU.";
gflops = 1;
gb_per_sec = 12;
}
VLOG(1) << "Device: " << device.type() << " gflops: " << gflops
<< " gb_per_sec: " << gb_per_sec;
return DeviceInfo(gflops, gb_per_sec);
}
absl::Status OpLevelCostEstimator::PredictCwiseOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t op_count = CalculateLargestInputCount(op_info, &found_unknown_shapes);
if (op_info.outputs_size() > 0) {
op_count = std::max(
op_count,
CalculateTensorElementCount(op_info.outputs(0), &found_unknown_shapes));
}
if (op_info.inputs_size() >= 2) {
op_count = std::max(op_count, CwiseOutputElementCount(op_info));
}
int op_cost = 1;
auto it = elementwise_ops_.find(op_info.op());
if (it != elementwise_ops_.end()) {
op_cost = it->second;
} else {
return errors::InvalidArgument("Not a cwise op: ", op_info.op());
}
return PredictDefaultNodeCosts(op_count * op_cost, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictCostOfAnUnknownOp(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
node_costs->inaccurate = true;
return PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes,
node_costs);
}
Costs OpLevelCostEstimator::PredictOpCountBasedCost(
double operations, const OpInfo& op_info) const {
bool unknown_shapes = false;
const double input_size = CalculateInputSize(op_info, &unknown_shapes);
const double output_size = CalculateOutputSize(op_info, &unknown_shapes);
Costs costs =
PredictOpCountBasedCost(operations, input_size, output_size, op_info);
costs.inaccurate = unknown_shapes;
costs.num_ops_with_unknown_shapes = unknown_shapes;
costs.max_memory = output_size;
return costs;
}
Costs OpLevelCostEstimator::PredictOpCountBasedCost(
double operations, double input_io_bytes, double output_io_bytes,
const OpInfo& op_info) const {
double total_io_bytes = input_io_bytes + output_io_bytes;
const DeviceInfo device_info = GetDeviceInfo(op_info.device());
if (device_info.gigaops <= 0 || device_info.gb_per_sec <= 0 ||
device_info.intermediate_read_gb_per_sec <= 0 ||
device_info.intermediate_write_gb_per_sec <= 0) {
VLOG(1) << "BAD DEVICE. Op:" << op_info.op()
<< " device type:" << op_info.device().type()
<< " device model:" << op_info.device().model();
}
Costs::NanoSeconds compute_cost(std::ceil(operations / device_info.gigaops));
VLOG(1) << "Op:" << op_info.op() << " GOps:" << operations / 1e9
<< " Compute Time (ns):" << compute_cost.count();
Costs::NanoSeconds memory_cost(
std::ceil(total_io_bytes / device_info.gb_per_sec));
VLOG(1) << "Op:" << op_info.op() << " Size (KB):" << (total_io_bytes) / 1e3
<< " Memory Time (ns):" << memory_cost.count();
double intermediate_read_time =
(input_io_bytes > 0)
? std::ceil(input_io_bytes / device_info.intermediate_read_gb_per_sec)
: 0;
double intermediate_write_time =
(output_io_bytes > 0)
? std::ceil(output_io_bytes /
device_info.intermediate_write_gb_per_sec)
: 0;
Costs::NanoSeconds intermediate_memory_cost =
compute_memory_overlap_
? std::max(intermediate_read_time, intermediate_write_time)
: (intermediate_read_time + intermediate_write_time);
VLOG(1) << "Op:" << op_info.op() << " Size (KB):" << (total_io_bytes) / 1e3
<< " Intermediate Memory Time (ns):"
<< intermediate_memory_cost.count();
Costs costs = Costs::ZeroCosts();
costs.compute_time = compute_cost;
costs.memory_time = memory_cost;
costs.intermediate_memory_time = intermediate_memory_cost;
costs.intermediate_memory_read_time =
Costs::NanoSeconds(intermediate_read_time);
costs.intermediate_memory_write_time =
Costs::NanoSeconds(intermediate_write_time);
CombineCostsAndUpdateExecutionTime(compute_memory_overlap_, &costs);
return costs;
}
int64_t OpLevelCostEstimator::CountConv2DOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountConv2DOperations(op_info, nullptr, found_unknown_shapes);
}
OpLevelCostEstimator::ConvolutionDimensions
OpLevelCostEstimator::ConvolutionDimensionsFromInputs(
const TensorShapeProto& original_image_shape,
const TensorShapeProto& original_filter_shape, const OpInfo& op_info,
bool* found_unknown_shapes) {
VLOG(2) << "op features: " << op_info.DebugString();
VLOG(2) << "Original image shape: " << original_image_shape.DebugString();
VLOG(2) << "Original filter shape: " << original_filter_shape.DebugString();
int x_index, y_index, major_channel_index, minor_channel_index = -1;
const std::string& data_format = GetDataFormat(op_info);
if (data_format == "NCHW") {
major_channel_index = 1;
y_index = 2;
x_index = 3;
} else if (data_format == "NCHW_VECT_C") {
minor_channel_index = 1;
y_index = 2;
x_index = 3;
major_channel_index = 4;
} else {
y_index = 1;
x_index = 2;
major_channel_index = 3;
}
const std::string& filter_format = GetFilterFormat(op_info);
int filter_x_index, filter_y_index, in_major_channel_index, out_channel_index,
in_minor_channel_index = -1;
if (filter_format == "HWIO") {
filter_y_index = 0;
filter_x_index = 1;
in_major_channel_index = 2;
out_channel_index = 3;
} else if (filter_format == "OIHW_VECT_I") {
out_channel_index = 0;
in_minor_channel_index = 1;
filter_y_index = 2;
filter_x_index = 3;
in_major_channel_index = 4;
} else {
out_channel_index = 0;
in_major_channel_index = 1;
filter_y_index = 2;
filter_x_index = 3;
}
std::vector<int64_t> image_shape = MaybeGetMinimumShape(
original_image_shape, minor_channel_index >= 0 ? 5 : 4,
found_unknown_shapes);
std::vector<int64_t> filter_shape = MaybeGetMinimumShape(
original_filter_shape, in_minor_channel_index >= 0 ? 5 : 4,
found_unknown_shapes);
VLOG(2) << "Image shape: " << absl::StrJoin(image_shape, ", ");
VLOG(2) << "Filter shape: " << absl::StrJoin(filter_shape, ", ");
int64_t batch = image_shape[0];
int64_t ix = image_shape[x_index];
int64_t iy = image_shape[y_index];
int64_t iz = minor_channel_index >= 0 ? image_shape[minor_channel_index] *
image_shape[major_channel_index]
: image_shape[major_channel_index];
int64_t kx = filter_shape[filter_x_index];
int64_t ky = filter_shape[filter_y_index];
int64_t kz = in_minor_channel_index >= 0
? filter_shape[in_major_channel_index] *
filter_shape[in_minor_channel_index]
: filter_shape[in_major_channel_index];
std::vector<int64_t> strides = GetStrides(op_info);
const auto padding = GetPadding(op_info);
int64_t sx = strides[x_index];
int64_t sy = strides[y_index];
int64_t ox = GetOutputSize(ix, kx, sx, padding);
int64_t oy = GetOutputSize(iy, ky, sy, padding);
int64_t oz = filter_shape[out_channel_index];
if (iz != 1 && kz != 1) {
DCHECK_EQ(iz % kz, 0) << "Input channel " << iz
<< " is not a multiple of filter channel " << kz
<< ".";
if (iz % kz) {
*found_unknown_shapes = true;
}
} else {
iz = kz = std::max<int64_t>(iz, kz);
}
OpLevelCostEstimator::ConvolutionDimensions conv_dims = {
batch, ix, iy, iz, kx, ky, kz, oz, ox, oy, sx, sy, padding};
VLOG(1) << "Batch Size:" << batch;
VLOG(1) << "Image Dims:" << ix << "," << iy;
VLOG(1) << "Input Depth:" << iz;
VLOG(1) << "Kernel Dims:" << kx << "," << ky;
VLOG(1) << "Kernel Depth:" << kz;
VLOG(1) << "Output Dims:" << ox << "," << oy;
VLOG(1) << "Output Depth:" << oz;
VLOG(1) << "Strides:" << sx << "," << sy;
VLOG(1) << "Padding:" << (padding == Padding::VALID ? "VALID" : "SAME");
return conv_dims;
}
int64_t OpLevelCostEstimator::CountConv2DOperations(
const OpInfo& op_info, ConvolutionDimensions* conv_info,
bool* found_unknown_shapes) {
DCHECK(op_info.op() == kConv2d || op_info.op() == kDepthwiseConv2dNative)
<< "Invalid Operation: not Conv2D nor DepthwiseConv2dNative";
if (op_info.inputs_size() < 2) {
*found_unknown_shapes = true;
return 0;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
op_info.inputs(0).shape(), op_info.inputs(1).shape(), op_info,
found_unknown_shapes);
int64_t ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2d) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
if (conv_info != nullptr) {
*conv_info = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountMatMulOperations(op_info, nullptr, found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, MatMulDimensions* mat_mul,
bool* found_unknown_shapes) {
bool transpose_a = false;
if (auto it = op_info.attr().find("transpose_a");
it != op_info.attr().end()) {
if (it->second.b()) transpose_a = true;
}
bool transpose_b = false;
if (auto it = op_info.attr().find("transpose_b");
it != op_info.attr().end()) {
if (it->second.b()) transpose_b = true;
}
return CountMatMulOperations(op_info, transpose_a, transpose_b, mat_mul,
found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, bool transpose_a, bool transpose_b,
MatMulDimensions* mat_mul, bool* found_unknown_shapes) {
double ops = 0;
if (op_info.inputs_size() < 2) {
LOG(ERROR) << "Need 2 inputs but got " << op_info.inputs_size();
*found_unknown_shapes = true;
return 0;
}
auto& a_matrix = op_info.inputs(0);
auto& b_matrix = op_info.inputs(1);
VLOG(1) << "transpose_a:" << transpose_a;
VLOG(1) << "transpose_b:" << transpose_b;
std::vector<int64_t> a_matrix_shape =
MaybeGetMinimumShape(a_matrix.shape(), 2, found_unknown_shapes);
std::vector<int64_t> b_matrix_shape =
MaybeGetMinimumShape(b_matrix.shape(), 2, found_unknown_shapes);
double m_dim, n_dim, k_dim, k_dim_b = 0;
if (transpose_a) {
m_dim = a_matrix_shape[1];
k_dim = a_matrix_shape[0];
} else {
m_dim = a_matrix_shape[0];
k_dim = a_matrix_shape[1];
}
if (transpose_b) {
k_dim_b = b_matrix_shape[1];
n_dim = b_matrix_shape[0];
} else {
k_dim_b = b_matrix_shape[0];
n_dim = b_matrix_shape[1];
}
VLOG(1) << "M, N, K: " << m_dim << "," << n_dim << "," << k_dim;
if (k_dim_b != 1 && k_dim != 1 && k_dim_b != k_dim) {
LOG(ERROR) << "Incompatible Matrix dimensions";
return ops;
} else {
k_dim = std::max(k_dim, k_dim_b);
}
ops = m_dim * n_dim * k_dim * 2;
VLOG(1) << "Operations for Matmul: " << ops;
if (mat_mul != nullptr) {
mat_mul->m = m_dim;
mat_mul->n = n_dim;
mat_mul->k = k_dim;
}
return ops;
}
bool OpLevelCostEstimator::GenerateBatchMatmulContextFromEinsum(
const OpContext& einsum_context, OpContext* batch_matmul_context,
bool* found_unknown_shapes) const {
if (batch_matmul_context == nullptr) {
VLOG(1) << "Output context should not be a nullptr.";
return false;
}
if (!IsEinsumCorrectlyFormed(einsum_context)) return false;
const auto& op_info = einsum_context.op_info;
std::vector<std::string> equation_split =
absl::StrSplit(op_info.attr().find("equation")->second.s(), "->");
std::vector<absl::string_view> input_split =
absl::StrSplit(equation_split[0], ',');
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
absl::string_view rhs_str = equation_split[1];
absl::string_view a_input_str = input_split[0];
absl::string_view b_input_str = input_split[1];
constexpr int kMatrixRank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(kMatrixRank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(kMatrixRank, b_input.shape().dim_size()),
&b_input_shape_unknown);
*found_unknown_shapes = a_input_shape_unknown || b_input_shape_unknown ||
(a_input.shape().dim_size() < kMatrixRank) ||
(b_input.shape().dim_size() < kMatrixRank);
OpInfo batch_matmul_op_info = op_info;
batch_matmul_op_info.mutable_inputs()->Clear();
batch_matmul_op_info.set_op("BatchMatMul");
AttrValue transpose_attribute;
transpose_attribute.set_b(false);
(*batch_matmul_op_info.mutable_attr())["transpose_a"] = transpose_attribute;
(*batch_matmul_op_info.mutable_attr())["transpose_b"] = transpose_attribute;
OpInfo::TensorProperties* a_matrix = batch_matmul_op_info.add_inputs();
TensorShapeProto* a_matrix_shape = a_matrix->mutable_shape();
a_matrix->set_dtype(a_input.dtype());
OpInfo::TensorProperties* b_matrix = batch_matmul_op_info.add_inputs();
b_matrix->set_dtype(b_input.dtype());
TensorShapeProto* b_matrix_shape = b_matrix->mutable_shape();
TensorShapeProto_Dim m_dim;
TensorShapeProto_Dim n_dim;
TensorShapeProto_Dim k_dim;
m_dim.set_size(1);
n_dim.set_size(1);
k_dim.set_size(1);
for (int i_idx = 0, a_input_str_size = a_input_str.size();
i_idx < a_input_str_size; ++i_idx) {
if (!absl::StrContains(b_input_str, a_input_str[i_idx])) {
if (!absl::StrContains(rhs_str, a_input_str[i_idx])) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
m_dim.set_size(m_dim.size() * a_input_shape[i_idx]);
continue;
} else if (!absl::StrContains(rhs_str, a_input_str[i_idx])) {
k_dim.set_size(k_dim.size() * a_input_shape[i_idx]);
continue;
}
a_matrix_shape->add_dim()->set_size(a_input_shape[i_idx]);
b_matrix_shape->add_dim()->set_size(a_input_shape[i_idx]);
}
for (int i_idx = 0, b_input_str_size = b_input_str.size();
i_idx < b_input_str_size; ++i_idx) {
if (!absl::StrContains(a_input_str, b_input_str[i_idx])) {
if (!absl::StrContains(rhs_str, b_input_str[i_idx])) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
n_dim.set_size(n_dim.size() * b_input_shape[i_idx]);
}
}
*(a_matrix_shape->add_dim()) = m_dim;
*(a_matrix_shape->add_dim()) = k_dim;
*(b_matrix_shape->add_dim()) = k_dim;
*(b_matrix_shape->add_dim()) = n_dim;
*batch_matmul_context = einsum_context;
batch_matmul_context->op_info = batch_matmul_op_info;
return true;
}
int64_t OpLevelCostEstimator::CountBatchMatMulOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountBatchMatMulOperations(op_info, nullptr, found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountBatchMatMulOperations(
const OpInfo& op_info, BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes) {
if (op_info.op() != kBatchMatMul && op_info.op() != kBatchMatMulV2) {
LOG(ERROR) << "Invalid Operation: " << op_info.op();
*found_unknown_shapes = true;
return 0;
}
if (op_info.inputs_size() != 2) {
LOG(ERROR) << "Expected 2 inputs but got " << op_info.inputs_size();
*found_unknown_shapes = true;
return 0;
}
double ops = 0;
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
const int matrix_rank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(matrix_rank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(matrix_rank, b_input.shape().dim_size()),
&b_input_shape_unknown);
*found_unknown_shapes = a_input_shape_unknown || b_input_shape_unknown ||
(a_input.shape().dim_size() < matrix_rank) ||
(b_input.shape().dim_size() < matrix_rank);
std::vector<int64_t>* bigger_rank_shape = &a_input_shape;
std::vector<int64_t>* smaller_rank_shape = &b_input_shape;
if (b_input_shape.size() > a_input_shape.size()) {
bigger_rank_shape = &b_input_shape;
smaller_rank_shape = &a_input_shape;
}
int num_matmuls = 1;
for (int b_i = 0,
s_i = smaller_rank_shape->size() - bigger_rank_shape->size();
b_i < bigger_rank_shape->size() - matrix_rank; ++b_i, ++s_i) {
int b_dim = (*bigger_rank_shape)[b_i];
int s_dim = 1;
if (s_i >= 0) {
s_dim = (*smaller_rank_shape)[s_i];
}
if (batch_mat_mul != nullptr) {
batch_mat_mul->batch_dims.push_back(s_dim);
}
num_matmuls *= std::max(b_dim, s_dim);
}
OpInfo matmul_op_info;
matmul_op_info.set_op("MatMul");
bool transpose_a = false;
bool transpose_b = false;
if (auto it = op_info.attr().find("adj_x"); it != op_info.attr().end()) {
transpose_a = it->second.b();
} else if (auto it = op_info.attr().find("transpose_a");
it != op_info.attr().end()) {
transpose_a = it->second.b();
}
if (auto it = op_info.attr().find("adj_y"); it != op_info.attr().end()) {
transpose_b = it->second.b();
} else if (auto it = op_info.attr().find("transpose_b");
it != op_info.attr().end()) {
transpose_b = it->second.b();
}
OpInfo::TensorProperties* a_matrix = matmul_op_info.add_inputs();
a_matrix->set_dtype(a_input.dtype());
TensorShapeProto* a_matrix_shape = a_matrix->mutable_shape();
for (int i = std::max<int>(0, a_input_shape.size() - matrix_rank);
i < a_input_shape.size(); ++i) {
a_matrix_shape->add_dim()->set_size(a_input_shape[i]);
}
OpInfo::TensorProperties* b_matrix = matmul_op_info.add_inputs();
b_matrix->set_dtype(b_input.dtype());
TensorShapeProto* b_matrix_shape = b_matrix->mutable_shape();
for (int i = std::max<int>(0, b_input_shape.size() - matrix_rank);
i < b_input_shape.size(); ++i) {
b_matrix_shape->add_dim()->set_size(b_input_shape[i]);
}
if (batch_mat_mul != nullptr) {
batch_mat_mul->matmul_dims.m = (transpose_a)
? a_matrix_shape->dim(1).size()
: a_matrix_shape->dim(0).size();
batch_mat_mul->matmul_dims.k = (transpose_a)
? a_matrix_shape->dim(0).size()
: a_matrix_shape->dim(1).size();
batch_mat_mul->matmul_dims.n = (transpose_b)
? b_matrix_shape->dim(0).size()
: b_matrix_shape->dim(1).size();
}
ops += num_matmuls * CountMatMulOperations(matmul_op_info, transpose_a,
transpose_b, nullptr,
found_unknown_shapes);
return ops;
}
bool GetTensorShapeProtoFromTensorProto(const TensorProto& tensor_proto,
TensorShapeProto* tensor_shape_proto) {
tensor_shape_proto->Clear();
Tensor tensor(tensor_proto.dtype());
if (!tensor.FromProto(tensor_proto)) {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "failed to parse TensorProto: "
<< tensor_proto.DebugString();
return false;
}
if (tensor.dims() != 1) {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "tensor is not 1D: " << tensor.dims();
return false;
}
TensorProto temp_tensor;
tensor.AsProtoField(&temp_tensor);
#define TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(type) \
do { \
for (const auto& value : temp_tensor.type##_val()) { \
tensor_shape_proto->add_dim()->set_size(value); \
} \
} while (0)
if (tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT16 ||
tensor.dtype() == DT_INT8 || tensor.dtype() == DT_UINT8) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(int);
} else if (tensor.dtype() == DT_INT64) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(int64);
} else if (tensor.dtype() == DT_UINT32) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(uint32);
} else if (tensor.dtype() == DT_UINT64) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(uint64);
} else {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "Unsupported dtype: " << tensor.dtype();
return false;
}
#undef TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO
return true;
}
int64_t OpLevelCostEstimator::CountConv2DBackpropInputOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) {
int64_t ops = 0;
DCHECK(op_info.op() == kConv2dBackpropInput ||
op_info.op() == kDepthwiseConv2dNativeBackpropInput)
<< "Invalid Operation: not kConv2dBackpropInput nor"
"kDepthwiseConv2dNativeBackpropInput";
if (op_info.inputs_size() < 2) {
*found_unknown_shapes = true;
return ops;
}
TensorShapeProto input_shape;
bool shape_found = false;
if (op_info.inputs(0).has_value()) {
const TensorProto& value = op_info.inputs(0).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &input_shape);
}
if (!shape_found && op_info.outputs_size() == 1) {
input_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
input_shape.Clear();
for (int i = 0; i < 4; ++i) {
input_shape.add_dim()->set_size(1);
}
*found_unknown_shapes = true;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
input_shape, op_info.inputs(1).shape(), op_info, found_unknown_shapes);
ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2dBackpropInput) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
VLOG(1) << "Operations for" << op_info.op() << " " << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CountConv2DBackpropFilterOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) {
int64_t ops = 0;
DCHECK(op_info.op() == kConv2dBackpropFilter ||
op_info.op() == kDepthwiseConv2dNativeBackpropFilter)
<< "Invalid Operation: not kConv2dBackpropFilter nor"
"kDepthwiseConv2dNativeBackpropFilter";
TensorShapeProto filter_shape;
bool shape_found = false;
if (op_info.inputs_size() >= 2 && op_info.inputs(1).has_value()) {
const TensorProto& value = op_info.inputs(1).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &filter_shape);
}
if (!shape_found && op_info.outputs_size() == 1) {
filter_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
filter_shape.Clear();
for (int i = 0; i < 4; ++i) {
filter_shape.add_dim()->set_size(1);
}
*found_unknown_shapes = true;
}
if (op_info.inputs_size() < 1) {
*found_unknown_shapes = true;
return ops;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
op_info.inputs(0).shape(), filter_shape, op_info, found_unknown_shapes);
ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2dBackpropFilter) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
VLOG(1) << "Operations for" << op_info.op() << " " << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CalculateTensorElementCount(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
VLOG(2) << " with " << DataTypeString(tensor.dtype()) << " tensor of shape "
<< tensor.shape().DebugString();
int64_t tensor_size = 1;
int num_dims = std::max(1, tensor.shape().dim_size());
auto tensor_shape =
MaybeGetMinimumShape(tensor.shape(), num_dims, found_unknown_shapes);
for (int64_t dim : tensor_shape) {
int64_t new_tensor_size = MultiplyWithoutOverflow(tensor_size, dim);
if (new_tensor_size < 0) {
VLOG(1) << "Overflow encountered when computing element count of a "
"tensor, multiplying "
<< tensor_size << " with " << dim;
return -1;
}
tensor_size = new_tensor_size;
}
return tensor_size;
}
int64_t OpLevelCostEstimator::CalculateTensorSize(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
int64_t count = CalculateTensorElementCount(tensor, found_unknown_shapes);
int size = DataTypeSize(BaseType(tensor.dtype()));
VLOG(2) << "Count: " << count << " DataTypeSize: " << size;
int64_t tensor_size = MultiplyWithoutOverflow(count, size);
if (tensor_size < 0) {
VLOG(1) << "Overflow encountered when computing tensor size, multiplying "
<< count << " with " << size;
return -1;
}
return tensor_size;
}
int64_t OpLevelCostEstimator::CalculateInputSize(const OpInfo& op_info,
bool* found_unknown_shapes) {
int64_t total_input_size = 0;
for (auto& input : op_info.inputs()) {
int64_t input_size = CalculateTensorSize(input, found_unknown_shapes);
total_input_size += input_size;
VLOG(1) << "Input Size: " << input_size
<< " Total Input Size:" << total_input_size;
}
return total_input_size;
}
std::vector<int64_t> OpLevelCostEstimator::CalculateInputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes) {
std::vector<int64_t> input_tensor_size;
input_tensor_size.reserve(op_info.inputs().size());
for (auto& input : op_info.inputs()) {
input_tensor_size.push_back(
CalculateTensorSize(input, found_unknown_shapes));
}
return input_tensor_size;
}
int64_t OpLevelCostEstimator::CalculateLargestInputCount(
const OpInfo& op_info, bool* found_unknown_shapes) {
int64_t largest_input_count = 0;
for (auto& input : op_info.inputs()) {
int64_t input_count =
CalculateTensorElementCount(input, found_unknown_shapes);
if (input_count > largest_input_count) {
largest_input_count = input_count;
}
VLOG(1) << "Input Count: " << input_count
<< " Largest Input Count:" << largest_input_count;
}
return largest_input_count;
}
int64_t OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info,
bool* found_unknown_shapes) {
int64_t total_output_size = 0;
for (const auto& output : op_info.outputs()) {
DataType dt = output.dtype();
const auto& original_output_shape = output.shape();
int64_t output_size = DataTypeSize(BaseType(dt));
int num_dims = std::max(1, original_output_shape.dim_size());
std::vector<int64_t> output_shape = MaybeGetMinimumShape(
original_output_shape, num_dims, found_unknown_shapes);
for (int64_t dim : output_shape) {
int64_t new_output_size = MultiplyWithoutOverflow(output_size, dim);
if (new_output_size < 0) {
VLOG(1) << "Overflow encountered when estimating cost, multiplying "
<< output_size << " with " << dim;
return -1;
}
output_size = new_output_size;
}
total_output_size += output_size;
VLOG(1) << "Output Size: " << output_size
<< " Total Output Size:" << total_output_size;
}
return total_output_size;
}
std::vector<int64_t> OpLevelCostEstimator::CalculateOutputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes) {
std::vector<int64_t> output_tensor_size;
output_tensor_size.reserve(op_info.outputs().size());
for (const auto& output : op_info.outputs()) {
DataType dt = output.dtype();
const auto& original_output_shape = output.shape();
int64_t output_size = DataTypeSize(BaseType(dt));
int num_dims = std::max(1, original_output_shape.dim_size());
auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims,
found_unknown_shapes);
for (int64_t dim : output_shape) {
int64_t new_output_size = MultiplyWithoutOverflow(output_size, dim);
if (new_output_size < 0) {
VLOG(1) << "Overflow encountered when estimating cost, multiplying "
<< output_size << " with " << dim;
}
output_size = new_output_size;
}
output_tensor_size.push_back(output_size);
}
return output_tensor_size;
}
absl::Status OpLevelCostEstimator::PredictDefaultNodeCosts(
const int64_t num_compute_ops, const OpContext& op_context,
bool* found_unknown_shapes, NodeCosts* node_costs) {
const auto& op_info = op_context.op_info;
node_costs->num_compute_ops = num_compute_ops;
node_costs->num_input_bytes_accessed =
CalculateInputTensorSize(op_info, found_unknown_shapes);
node_costs->num_output_bytes_accessed =
CalculateOutputTensorSize(op_info, found_unknown_shapes);
node_costs->max_memory = node_costs->num_total_output_bytes();
if (*found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
bool HasZeroDim(const OpInfo& op_info) {
for (int i = 0; i < op_info.inputs_size(); ++i) {
const auto& input = op_info.inputs(i);
for (int j = 0; j < input.shape().dim_size(); ++j) {
const auto& dim = input.shape().dim(j);
if (dim.size() == 0) {
VLOG(1) << "Convolution config has zero dim "
<< op_info.ShortDebugString();
return true;
}
}
}
return false;
}
absl::Status OpLevelCostEstimator::PredictConv2D(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument("Conv2D op includes zero dimension: ",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountConv2DOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictConv2DBackpropInput(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument(
"Conv2DBackpropInput op includes zero dimension",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops = CountConv2DBackpropInputOperations(
op_info, nullptr, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictConv2DBackpropFilter(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument(
"Conv2DBackpropFilter op includes zero dimension",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops = CountConv2DBackpropFilterOperations(
op_info, nullptr, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictFusedConv2DBiasActivation(
const OpContext& op_context, NodeCosts* node_costs) const {
std::string data_format = GetDataFormat(op_context.op_info);
if (data_format != "NCHW" && data_format != "NHWC" &&
data_format != "NCHW_VECT_C") {
return errors::InvalidArgument(
"Unsupported data format (", data_format,
") for op: ", op_context.op_info.ShortDebugString());
}
std::string filter_format = GetFilterFormat(op_context.op_info);
if (filter_format != "HWIO" && filter_format != "OIHW" &&
filter_format != "OIHW_VECT_I") {
return errors::InvalidArgument(
"Unsupported filter format (", filter_format,
") for op: ", op_context.op_info.ShortDebugString());
}
auto& conv_input = op_context.op_info.inputs(0);
auto& filter = op_context.op_info.inputs(1);
auto& side_input = op_context.op_info.inputs(3);
auto& conv_input_scale = op_context.op_info.inputs(4);
auto& side_input_scale = op_context.op_info.inputs(5);
bool found_unknown_shapes = false;
auto dims = ConvolutionDimensionsFromInputs(
conv_input.shape(), filter.shape(), op_context.op_info,
&found_unknown_shapes);
OpInfo::TensorProperties output;
if (data_format == "NCHW" || data_format == "NCHW_VECT_C") {
output = DescribeTensor(DT_FLOAT, {dims.batch, dims.oz, dims.oy, dims.ox});
} else if (data_format == "NHWC") {
output = DescribeTensor(DT_FLOAT, {dims.batch, dims.oy, dims.ox, dims.oz});
}
std::vector<OpContext> component_ops = {
FusedChildContext(op_context, "Conv2D", output, {conv_input, filter}),
FusedChildContext(op_context, "Mul", output, {output, conv_input_scale}),
FusedChildContext(
op_context, "BiasAdd", output,
{output, output}),
FusedChildContext(op_context, "Relu", output, {output})};
if (side_input.shape().dim_size() > 0) {
component_ops.push_back(FusedChildContext(op_context, "Mul", side_input,
{side_input, side_input_scale}));
component_ops.push_back(FusedChildContext(
op_context, "Add", output,
{output, output}));
}
auto op_context_with_output = op_context;
op_context_with_output.op_info.mutable_outputs()->Clear();
*op_context_with_output.op_info.mutable_outputs()->Add() = output;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return PredictFusedOp(op_context_with_output, component_ops, node_costs);
}
absl::Status OpLevelCostEstimator::PredictMatMul(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountMatMulOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictEinsum(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
auto it = op_info.attr().find("equation");
if (it == op_info.attr().end()) {
return errors::InvalidArgument("Einsum op doesn't have equation attr: ",
op_info.ShortDebugString());
}
OpContext batch_matmul_op_context;
bool found_unknown_shapes = false;
bool success = GenerateBatchMatmulContextFromEinsum(
op_context, &batch_matmul_op_context, &found_unknown_shapes);
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
if (!success) {
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
return PredictNodeCosts(batch_matmul_op_context, node_costs);
}
absl::Status OpLevelCostEstimator::PredictSparseTensorDenseMatMul(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_elems_in_a =
CalculateTensorElementCount(op_info.inputs(1), &found_unknown_shapes);
auto b_matrix = op_info.inputs(3);
auto b_matrix_shape =
MaybeGetMinimumShape(b_matrix.shape(), 2, &found_unknown_shapes);
int64_t n_dim = b_matrix_shape[1];
const int64_t op_count = kOpsPerMac * num_elems_in_a * n_dim;
int64_t a_indices_input_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
int64_t a_values_input_size =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
int64_t a_shape_input_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
int64_t b_input_size =
num_elems_in_a * n_dim * DataTypeSize(BaseType(b_matrix.dtype()));
int64_t output_size = CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_compute_ops = op_count;
node_costs->num_input_bytes_accessed = {a_indices_input_size,
a_values_input_size,
a_shape_input_size, b_input_size};
node_costs->num_output_bytes_accessed = {output_size};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictNoOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Execution Time 0 (ns)";
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictPureMemoryOp(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
node_costs->num_nodes_with_pure_memory_op = 1;
return PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictIdentity(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Minimum cost for Identity";
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->max_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictVariable(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Minimum cost for Variable";
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->persistent_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictBatchMatMul(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountBatchMatMulOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictMetadata(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->max_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictGatherOrSlice(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
const int inputs_needed = op_info.op() == "Slice" ? 3 : 2;
if (op_info.outputs_size() == 0 || op_info.inputs_size() < inputs_needed) {
return errors::InvalidArgument(
op_info.op(),
" Op doesn't have valid input / output: ", op_info.ShortDebugString());
}
bool unknown_shapes = false;
const int64_t op_count =
CalculateTensorElementCount(op_info.outputs(0), &unknown_shapes);
node_costs->num_compute_ops = op_count;
const int64_t output_size = CalculateOutputSize(op_info, &unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->num_input_bytes_accessed.reserve(op_info.inputs().size());
int64_t input_size = output_size;
node_costs->num_input_bytes_accessed.push_back(input_size);
int begin_input_index = 1;
int end_input_index;
if (op_info.op() == "Slice") {
end_input_index = 3;
} else if (op_info.op() == "StridedSlice") {
end_input_index = 4;
} else {
end_input_index = 2;
}
for (int i = begin_input_index; i < end_input_index; ++i) {
node_costs->num_input_bytes_accessed.push_back(
CalculateTensorElementCount(op_info.inputs(i), &unknown_shapes));
}
if (unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictScatter(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
const int64_t num_indices =
CalculateTensorElementCount(op_info.inputs(1), &found_unknown_shapes);
int64_t num_elems_in_ref_per_index = 1;
std::vector<int64_t> ref_tensor_shape = MaybeGetMinimumShape(
op_info.inputs(0).shape(), op_info.inputs(0).shape().dim_size(),
&found_unknown_shapes);
for (int i = 1; i < ref_tensor_shape.size(); ++i) {
num_elems_in_ref_per_index *= ref_tensor_shape[i];
}
const int64_t op_count = num_indices * num_elems_in_ref_per_index;
node_costs->num_compute_ops = op_count;
int64_t ref_input_size =
op_count * DataTypeSize(BaseType(op_info.inputs(0).dtype()));
int64_t indices_input_size =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
int64_t updates_input_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {ref_input_size, indices_input_size,
updates_input_size};
int64_t output_size =
op_count * DataTypeSize(BaseType(op_info.outputs(0).dtype()));
node_costs->num_output_bytes_accessed = {output_size};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictFusedOp(
const OpContext& op_context,
const std::vector<OpContext>& fused_op_contexts,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
absl::Status s =
PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes, node_costs);
for (auto& fused_op : fused_op_contexts) {
NodeCosts fused_node_costs;
s.Update(PredictNodeCosts(fused_op, &fused_node_costs));
node_costs->num_compute_ops += fused_node_costs.num_compute_ops;
node_costs->inaccurate |= fused_node_costs.inaccurate;
node_costs->num_nodes_with_unknown_shapes |=
fused_node_costs.num_nodes_with_unknown_shapes;
node_costs->num_nodes_with_unknown_op_type |=
fused_node_costs.num_nodes_with_unknown_op_type;
node_costs->num_nodes_with_pure_memory_op |=
fused_node_costs.num_nodes_with_pure_memory_op;
}
return absl::OkStatus();
}
OpContext OpLevelCostEstimator::FusedChildContext(
const OpContext& parent, const std::string& op_name,
const OpInfo::TensorProperties& output,
const std::vector<OpInfo::TensorProperties>& inputs) {
OpContext new_context;
new_context.name = op_name;
new_context.device_name = parent.device_name;
new_context.op_info = parent.op_info;
new_context.op_info.set_op(op_name);
new_context.op_info.mutable_inputs()->Clear();
for (const auto& input : inputs) {
*new_context.op_info.mutable_inputs()->Add() = input;
}
new_context.op_info.mutable_outputs()->Clear();
*new_context.op_info.mutable_outputs()->Add() = output;
return new_context;
}
OpInfo::TensorProperties OpLevelCostEstimator::DescribeTensor(
DataType type, const std::vector<int64_t>& dims) {
OpInfo::TensorProperties ret;
ret.set_dtype(type);
auto shape = ret.mutable_shape();
for (const int dim : dims) {
shape->add_dim()->set_size(dim);
}
return ret;
}
absl::StatusOr<OpLevelCostEstimator::ConvolutionDimensions>
OpLevelCostEstimator::OpDimensionsFromInputs(
const TensorShapeProto& original_image_shape, const OpInfo& op_info,
bool* found_unknown_shapes) {
VLOG(2) << "op features: " << op_info.DebugString();
VLOG(2) << "Original image shape: " << original_image_shape.DebugString();
*found_unknown_shapes = false;
auto image_shape =
MaybeGetMinimumShape(original_image_shape, 4, found_unknown_shapes);
VLOG(2) << "Image shape: " << absl::StrJoin(image_shape, ", ");
int x_index, y_index, channel_index;
const std::string& data_format = GetDataFormat(op_info);
if (data_format == "NCHW") {
channel_index = 1;
y_index = 2;
x_index = 3;
} else {
y_index = 1;
x_index = 2;
channel_index = 3;
}
int64_t batch = image_shape[0];
int64_t ix = image_shape[x_index];
int64_t iy = image_shape[y_index];
int64_t iz = image_shape[channel_index];
std::vector<int64_t> ksize = GetKernelSize(op_info);
int64_t kx = ksize[x_index];
int64_t ky = ksize[y_index];
int64_t kz = iz;
std::vector<int64_t> strides = GetStrides(op_info);
int64_t sx = strides[x_index];
int64_t sy = strides[y_index];
if (sx == 0 || sy == 0) {
return errors::InvalidArgument(
"Stride must be > 0 for Height and Width, but got (", sy, ", ", sx,
")");
}
const auto padding = GetPadding(op_info);
int64_t ox = GetOutputSize(ix, kx, sx, padding);
int64_t oy = GetOutputSize(iy, ky, sy, padding);
int64_t oz = iz;
OpLevelCostEstimator::ConvolutionDimensions conv_dims = {
batch, ix, iy, iz, kx, ky, kz, oz, ox, oy, sx, sy, padding};
return conv_dims;
}
absl::Status OpLevelCostEstimator::PredictMaxPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int per_output_ops = dims.kx * dims.ky == 1 ? 1 : dims.kx * dims.ky - 1;
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * per_output_ops;
node_costs->num_compute_ops = ops;
int64_t input_size = 0;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else {
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictMaxPoolGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
if (op_info.inputs_size() < 3) {
return errors::InvalidArgument("MaxPoolGrad op has invalid inputs: ",
op_info.ShortDebugString());
}
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int64_t ops = 0;
if (dims.kx == 1 && dims.ky == 1) {
ops = dims.batch * dims.ix * dims.iy * dims.iz;
} else if (dims.kx <= dims.sx && dims.ky <= dims.sy) {
ops = dims.batch * dims.iz *
(dims.ox * dims.oy * (dims.kx * dims.ky - 1) + dims.ix * dims.iy);
} else {
ops = dims.batch * dims.iz *
(dims.ox * dims.oy * (dims.kx * dims.ky - 1) + dims.ix * dims.iy * 2);
}
node_costs->num_compute_ops = ops;
const int64_t input0_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
const int64_t input2_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {input0_size, 0, input2_size};
const int64_t output_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAssignVariableOps(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
if (op_info.inputs_size() != 2) {
return errors::InvalidArgument("AssignVariable op has invalid input: ",
op_info.ShortDebugString());
}
const int64_t ops = op_info.op() == kAssignVariableOp
? 0
: CalculateTensorElementCount(op_info.inputs(1),
&found_unknown_shapes);
node_costs->num_compute_ops = ops;
const int64_t input_size = CalculateInputSize(op_info, &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {input_size};
node_costs->num_output_bytes_accessed = {0};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky;
node_costs->num_compute_ops = ops;
int64_t input_size;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else {
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAvgPoolGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
bool shape_found = false;
TensorShapeProto x_shape;
if (op_info.inputs_size() >= 1 && op_info.inputs(0).has_value()) {
const TensorProto& value = op_info.inputs(0).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &x_shape);
}
if (!shape_found && op_info.outputs_size() > 0) {
x_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
x_shape.Clear();
for (int i = 0; i < 4; ++i) {
x_shape.add_dim()->set_size(1);
}
found_unknown_shapes = true;
}
TF_ASSIGN_OR_RETURN(
ConvolutionDimensions dims,
OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes));
int64_t ops = 0;
if (dims.kx <= dims.sx && dims.ky <= dims.sy) {
ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy);
} else {
ops = dims.batch * dims.iz *
(dims.ix * dims.iy + dims.ox * dims.oy * (dims.kx * dims.ky + 1));
}
auto s = PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
node_costs->max_memory = node_costs->num_total_output_bytes();
return s;
}
absl::Status OpLevelCostEstimator::PredictFusedBatchNorm(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
const bool is_training = IsTraining(op_info);
int64_t ops = 0;
const auto rsqrt_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_rsqrt_op<float>>::Cost;
if (is_training) {
ops = dims.iz * (dims.batch * dims.ix * dims.iy * 4 + 6 + rsqrt_cost);
} else {
ops = dims.batch * dims.ix * dims.iy * dims.iz * 2;
}
node_costs->num_compute_ops = ops;
const int64_t size_nhwc =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
const int64_t size_c =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
if (is_training) {
node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c};
node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c, size_c,
size_c};
node_costs->internal_read_bytes = size_nhwc;
} else {
node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c, size_c,
size_c};
node_costs->num_output_bytes_accessed = {size_nhwc};
}
node_costs->max_memory = node_costs->num_total_output_bytes();
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictFusedBatchNormGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(1).shape(), op_info,
&found_unknown_shapes));
int64_t ops = 0;
const auto rsqrt_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_rsqrt_op<float>>::Cost;
ops = dims.iz * (dims.batch * dims.ix * dims.iy * 11 + 5 + rsqrt_cost);
node_costs->num_compute_ops = ops;
const int64_t size_nhwc =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
const int64_t size_c =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {size_nhwc, size_nhwc, size_c, size_c};
node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c};
node_costs->internal_read_bytes = size_nhwc;
node_costs->max_memory = node_costs->num_total_output_bytes();
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictNaryOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t op_count = CalculateLargestInputCount(op_info, &found_unknown_shapes);
if (op_info.outputs_size() > 0) {
op_count = std::max(
op_count,
CalculateTensorElementCount(op_info.outputs(0), &found_unknown_shapes));
}
if (op_info.inputs_size() >= 2) {
op_count = std::max(op_count, CwiseOutputElementCount(op_info));
}
op_count *= op_info.inputs_size() - 1;
const auto sum_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_sum_op<float>>::Cost;
return PredictDefaultNodeCosts(op_count * sum_cost, op_context,
&found_unknown_shapes, node_costs);
}
int64_t OpLevelCostEstimator::GetSoftmaxComputeOps(
const OpContext& op_context) const {
bool found_unknown_shapes = false;
const int64_t logits_size = CalculateTensorElementCount(
op_context.op_info.inputs(0), &found_unknown_shapes);
TensorShapeProto logits_shape = op_context.op_info.inputs(0).shape();
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
int64_t ops =
(EIGEN_COST(scalar_exp_op<float>) + EIGEN_COST(scalar_sum_op<float>) +
EIGEN_COST(scalar_product_op<float>)) *
logits_size +
EIGEN_COST(scalar_inverse_op<float>) * logits_shape.dim(0).size();
#undef EIGEN_COST
return ops;
}
absl::Status OpLevelCostEstimator::PredictSoftmax(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
TensorShapeProto logits_shape = op_context.op_info.inputs(0).shape();
if (logits_shape.unknown_rank() || logits_shape.dim_size() == 0) {
return errors::InvalidArgument("Softmax op has invalid input: ",
op_context.op_info.ShortDebugString());
}
int64_t ops = GetSoftmaxComputeOps(op_context);
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictResizeBilinear(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
if (op_context.op_info.outputs().empty() ||
op_context.op_info.inputs().empty()) {
return errors::InvalidArgument(
"ResizeBilinear op has invalid input / output ",
op_context.op_info.ShortDebugString());
}
const int64_t output_elements = CalculateTensorElementCount(
op_context.op_info.outputs(0), &found_unknown_shapes);
const auto half_pixel_centers =
op_context.op_info.attr().find("half_pixel_centers");
bool use_half_pixel_centers = false;
if (half_pixel_centers == op_context.op_info.attr().end()) {
LOG(WARNING) << "half_pixel_centers attr not set for ResizeBilinear.";
return PredictCostOfAnUnknownOp(op_context, node_costs);
} else {
use_half_pixel_centers = half_pixel_centers->second.b();
}
int64_t ops = 0;
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const auto sub_cost_float = EIGEN_COST(scalar_difference_op<float>);
const auto sub_cost_int = EIGEN_COST(scalar_difference_op<int64_t>);
const auto add_cost = EIGEN_COST(scalar_sum_op<float>);
const auto mul_cost = EIGEN_COST(scalar_product_op<float>);
const auto floor_cost = EIGEN_COST(scalar_floor_op<float>);
const auto max_cost = EIGEN_COST(scalar_max_op<int64_t>);
const auto min_cost = EIGEN_COST(scalar_min_op<int64_t>);
const auto cast_to_int_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<float, int64_t>>::Cost;
const auto cast_to_float_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<int64_t, float>>::Cost;
const auto ceil_cost = EIGEN_COST(scalar_ceil_op<float>);
#undef EIGEN_COST
const std::vector<int64_t> output_shape = MaybeGetMinimumShape(
op_context.op_info.outputs(0).shape(), 4, &found_unknown_shapes);
const int64_t output_height = output_shape[1];
const int64_t output_width = output_shape[2];
int64_t interp_weight_cost = floor_cost + max_cost + min_cost +
sub_cost_float + sub_cost_int + ceil_cost +
cast_to_int_cost * 2;
if (use_half_pixel_centers) {
interp_weight_cost +=
add_cost + mul_cost + sub_cost_float + cast_to_float_cost;
} else {
interp_weight_cost += cast_to_float_cost + mul_cost;
}
ops += interp_weight_cost * (output_height + output_width);
ops += (add_cost * 3 + sub_cost_float * 3 + mul_cost * 3) * output_elements;
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictCropAndResize(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto method = op_context.op_info.attr().find("method");
std::optional<bool> use_bilinear_interp;
if (method == op_context.op_info.attr().end() ||
method->second.s() == "bilinear") {
use_bilinear_interp = true;
} else if (method->second.s() == "nearest") {
use_bilinear_interp = false;
}
if (!use_bilinear_interp.has_value() ||
op_context.op_info.outputs().empty()) {
LOG(WARNING) << "method attr in CropAndResize invalid; expected bilinear "
"or nearest.";
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
const int64_t num_boxes = op_context.op_info.inputs(1).shape().dim(0).size();
const std::vector<int64_t> crop_shape = MaybeGetMinimumShape(
op_context.op_info.outputs(0).shape(), 4, &found_unknown_shapes);
const int64_t crop_height = crop_shape[1];
const int64_t crop_width = crop_shape[2];
const int64_t output_elements = CalculateTensorElementCount(
op_context.op_info.outputs(0), &found_unknown_shapes);
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const auto sub_cost = EIGEN_COST(scalar_difference_op<float>);
const auto add_cost = EIGEN_COST(scalar_sum_op<float>);
const auto mul_cost = EIGEN_COST(scalar_product_op<float>);
auto div_cost = EIGEN_COST(scalar_div_cost<float>);
const auto floor_cost = EIGEN_COST(scalar_floor_op<float>);
const auto ceil_cost = EIGEN_COST(scalar_ceil_op<float>);
auto round_cost = EIGEN_COST(scalar_round_op<float>);
const auto cast_to_float_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<int64_t, float>>::Cost;
#undef EIGEN_COST
int64_t crop_area = MultiplyWithoutOverflow(crop_height, crop_width);
if (crop_area < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_height, " with ", crop_width,
" would overflow");
int64_t crop_volume = MultiplyWithoutOverflow(crop_area, num_boxes);
if (crop_volume < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_area, " with ", num_boxes,
" would overflow");
int64_t crop_depth = MultiplyWithoutOverflow(crop_height, num_boxes);
if (crop_depth < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_height, " with ", num_boxes,
" would overflow");
int64_t ops = (sub_cost * 6 + mul_cost * 2 + div_cost * 2) * num_boxes;
ops += (mul_cost * 2 + sub_cost + add_cost) * crop_depth;
ops += (mul_cost * 2 + sub_cost + add_cost) * crop_volume;
if (*use_bilinear_interp) {
ops += (floor_cost + ceil_cost + sub_cost) * crop_depth;
ops += (floor_cost + ceil_cost + sub_cost) * crop_volume;
ops +=
(cast_to_float_cost * 4 + add_cost * 3 + sub_cost * 3 + mul_cost * 3) *
output_elements;
} else {
ops += round_cost * 2 * crop_volume;
ops += cast_to_float_cost * output_elements;
}
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
}
} | #include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <unordered_set>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
using ::testing::ElementsAreArray;
namespace {
class TestOpLevelCostEstimator : public OpLevelCostEstimator {
public:
TestOpLevelCostEstimator() {
compute_memory_overlap_ = true;
device_info_ = DeviceInfo();
}
~TestOpLevelCostEstimator() override {}
void SetDeviceInfo(const DeviceInfo& device_info) {
device_info_ = device_info;
}
void SetComputeMemoryOverlap(bool value) { compute_memory_overlap_ = value; }
protected:
DeviceInfo GetDeviceInfo(const DeviceProperties& device) const override {
return device_info_;
}
DeviceInfo device_info_;
};
void ExpectZeroCost(const Costs& cost) {
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.compute_time, Costs::Duration::zero());
EXPECT_EQ(cost.execution_time, Costs::Duration::zero());
EXPECT_EQ(cost.memory_time, Costs::Duration::zero());
}
void DescribeMatrix(int rows, int columns, OpInfo* op_info) {
auto input = op_info->add_inputs();
auto shape = input->mutable_shape();
auto shape_rows = shape->add_dim();
shape_rows->set_size(rows);
auto shape_columns = shape->add_dim();
shape_columns->set_size(columns);
input->set_dtype(DT_FLOAT);
}
void SetCpuDevice(OpInfo* op_info) {
auto device = op_info->mutable_device();
device->set_type("CPU");
device->set_num_cores(10);
device->set_bandwidth(10000000);
device->set_frequency(1000);
}
OpContext DescribeMatMul(int m, int n, int l, int k) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("MatMul");
DescribeMatrix(m, l, &op_context.op_info);
DescribeMatrix(k, n, &op_context.op_info);
return op_context;
}
void DescribeArbitraryRankInput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto input = op_info->add_inputs();
input->set_dtype(dtype);
auto shape = input->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
void DescribeArbitraryRankOutput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto output = op_info->add_outputs();
output->set_dtype(dtype);
auto shape = output->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
OpContext DescribeSparseTensorDenseMatMul(const int nnz_a,
const std::vector<int>& dims_b,
const std::vector<int>& dims_out) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("SparseTensorDenseMatMul");
DescribeArbitraryRankInput({nnz_a, 2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({nnz_a}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput(dims_out, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeXlaEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("XlaEinsum");
AttrValue equation_attribute;
equation_attribute.set_s(equation);
(*op_context.op_info.mutable_attr())["equation"] = equation_attribute;
if (!dims_a.empty())
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
if (!dims_b.empty())
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context = DescribeXlaEinsum(dims_a, dims_b, equation);
op_context.op_info.set_op("Einsum");
return op_context;
}
void DescribeDummyTensor(OpInfo::TensorProperties* tensor) {
}
void DescribeTensor1D(int dim0, OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor4D(int dim0, int dim1, int dim2, int dim3,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor5D(int dim0, int dim1, int dim2, int dim3, int dim4,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
shape->add_dim()->set_size(dim4);
tensor->set_dtype(DT_FLOAT);
}
OpContext DescribeConvolution(int batch, int ix, int iy, int iz1, int iz2,
int kx, int ky, int oz) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Conv2D");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeDepthwiseConv2dNative(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int cm) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("DepthwiseConv2dNative");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, cm, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeFusedConv2DBiasActivation(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int ox,
int oy, int oz, bool has_side_input,
const string& data_format,
const string& filter_format) {
const int kVecWidth = 4;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("FusedConv2DBiasActivation");
auto* attr_data_format = op_context.op_info.mutable_attr();
SetAttrValue(data_format, &(*attr_data_format)["data_format"]);
auto* attr_filter_format = op_context.op_info.mutable_attr();
SetAttrValue(filter_format, &(*attr_filter_format)["filter_format"]);
if (data_format == "NHWC") {
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, iz1, ix, iy, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(iz1 % kVecWidth, 0);
DescribeTensor5D(batch, iz1 / kVecWidth, ix, iy, kVecWidth,
op_context.op_info.add_inputs());
}
if (filter_format == "HWIO") {
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
} else if (filter_format == "OIHW") {
DescribeTensor4D(oz, iz2, kx, ky, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(filter_format, "OIHW_VECT_I");
EXPECT_EQ(iz2 % kVecWidth, 0);
DescribeTensor5D(oz, iz2 / kVecWidth, kx, ky, kVecWidth,
op_context.op_info.add_inputs());
}
DescribeTensor1D(oz, op_context.op_info.add_inputs());
auto side_input = op_context.op_info.add_inputs();
if (has_side_input) {
if (data_format == "NHWC") {
DescribeTensor4D(batch, ox, oy, oz, side_input);
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, oz, ox, oy, side_input);
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(oz % kVecWidth, 0);
DescribeTensor5D(batch, oz / kVecWidth, ox, oy, kVecWidth, side_input);
}
}
DescribeTensor1D(1, op_context.op_info.add_inputs());
DescribeTensor1D(1, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeUnaryOp(const string& op, int size1) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBinaryOp(const string& op, int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBiasAdd(int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("BiasAdd");
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_inputs());
DescribeTensor1D(size1, op_context.op_info.add_inputs());
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_outputs());
return op_context;
}
int GetOutputSize(const int x, const int k, const int s,
const string& padding) {
if (padding == "SAME") {
return (x + s - 1) / s;
} else {
return (x - k + s) / s;
}
}
std::vector<int> GetPoolingOutputSize(const std::vector<int>& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format,
const string& padding) {
int h_index = 1;
int w_index = 2;
int c_index = 3;
if (data_format == "NCHW") {
h_index = 2;
w_index = 3;
c_index = 1;
}
int n = input[0];
int h = input[h_index];
int w = input[w_index];
int c = input[c_index];
int sx = strides[h_index];
int sy = strides[w_index];
int kx = ksize[h_index];
int ky = ksize[w_index];
int ho = GetOutputSize(h, kx, sx, padding);
int wo = GetOutputSize(w, ky, sy, padding);
std::vector<int> output;
if (data_format == "NHWC") {
output = {n, ho, wo, c};
} else {
output = {n, c, ho, wo};
}
return output;
}
void GetTensorProto(const DataType dtype, const std::vector<int64_t>& shape,
const std::vector<int64_t> values,
const bool tensor_content, TensorProto* tensor_proto) {
tensor_proto->Clear();
TensorProto temp_tensor_proto;
temp_tensor_proto.set_dtype(dtype);
for (const auto& x : shape) {
temp_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(x);
}
for (const auto& x : values) {
if (dtype == DT_INT64) {
temp_tensor_proto.add_int64_val(x);
} else if (dtype == DT_INT32 || dtype == DT_INT16 || dtype == DT_INT8 ||
dtype == DT_UINT8) {
temp_tensor_proto.add_int_val(x);
} else if (dtype == DT_UINT32) {
temp_tensor_proto.add_uint32_val(x);
} else if (dtype == DT_UINT64) {
temp_tensor_proto.add_uint64_val(x);
} else {
CHECK(false) << "Unsupported dtype: " << dtype;
}
}
Tensor tensor(dtype);
CHECK(tensor.FromProto(temp_tensor_proto));
if (tensor_content) {
tensor.AsProtoTensorContent(tensor_proto);
} else {
tensor.AsProtoField(tensor_proto);
}
}
OpContext DescribePoolingOp(const string& op_name, const std::vector<int>& x,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format, const string& padding) {
OpContext op_context;
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op(op_name);
const std::vector<int> y =
GetPoolingOutputSize(x, ksize, strides, data_format, padding);
if (op_name == "AvgPool" || op_name == "MaxPool") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_outputs());
} else if (op_name == "AvgPoolGrad") {
DescribeArbitraryRankInput({4}, DT_INT32, &op_info);
auto* tensor_proto = op_info.mutable_inputs(0)->mutable_value();
GetTensorProto(DT_INT32, {4}, {x[0], x[1], x[2], x[3]},
false, tensor_proto);
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
} else if (op_name == "MaxPoolGrad") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
}
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
return op_context;
}
OpContext DescribeFusedBatchNorm(const bool is_training, const bool is_grad,
const std::vector<int>& x,
const string& data_format) {
OpContext op_context = DescribePoolingOp("MaxPool", x, {1, 1, 1, 1},
{1, 1, 1, 1}, data_format, "SAME");
auto& op_info = op_context.op_info;
if (is_grad) {
op_info.set_op("FusedBatchNormGrad");
} else {
op_info.set_op("FusedBatchNorm");
}
if (is_grad) {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
}
int num_1d_inputs = is_grad ? 3 : 4;
for (int i = 0; i < num_1d_inputs; i++) {
auto* tensor = op_info.add_inputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
for (int i = 0; i < 4; i++) {
auto* tensor = op_info.add_outputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
auto* attr = op_context.op_info.mutable_attr();
attr->erase("ksize");
attr->erase("strides");
attr->erase("padding");
SetAttrValue(is_training, &(*attr)["is_training"]);
return op_context;
}
}
class OpLevelCostEstimatorTest : public ::testing::Test {
protected:
using BatchMatMulDimensions = OpLevelCostEstimator::BatchMatMulDimensions;
Costs PredictCosts(const OpContext& op_context) const {
return estimator_.PredictCosts(op_context);
}
int64_t CountMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, batch_mat_mul,
found_unknown_shapes);
}
void SetComputeMemoryOverlap(bool value) {
estimator_.compute_memory_overlap_ = value;
}
void ValidateOpDimensionsFromInputs(const int n, const int h, const int w,
const int c, const int kx, const int ky,
const int sx, const int sy,
const string& data_format,
const string& padding) {
OpContext op_context;
int ho;
int wo;
if (data_format == "NHWC") {
op_context = DescribePoolingOp("MaxPool", {n, h, w, c}, {1, kx, ky, 1},
{1, sx, sy, 1}, "NHWC", padding);
ho = op_context.op_info.outputs(0).shape().dim(1).size();
wo = op_context.op_info.outputs(0).shape().dim(2).size();
} else {
op_context = DescribePoolingOp("MaxPool", {n, c, h, w}, {1, 1, kx, ky},
{1, 1, sx, sy}, "NCHW", padding);
ho = op_context.op_info.outputs(0).shape().dim(2).size();
wo = op_context.op_info.outputs(0).shape().dim(3).size();
}
bool found_unknown_shapes;
TF_ASSERT_OK_AND_ASSIGN(
auto dims, OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes));
Padding padding_enum;
if (padding == "VALID") {
padding_enum = Padding::VALID;
} else {
padding_enum = Padding::SAME;
}
EXPECT_EQ(n, dims.batch);
EXPECT_EQ(h, dims.ix);
EXPECT_EQ(w, dims.iy);
EXPECT_EQ(c, dims.iz);
EXPECT_EQ(kx, dims.kx);
EXPECT_EQ(ky, dims.ky);
EXPECT_EQ(sx, dims.sx);
EXPECT_EQ(sy, dims.sy);
EXPECT_EQ(ho, dims.ox);
EXPECT_EQ(wo, dims.oy);
EXPECT_EQ(c, dims.oz);
EXPECT_EQ(padding_enum, dims.padding);
}
absl::StatusOr<OpLevelCostEstimator::ConvolutionDimensions>
CallOpDimensionsFromInputs(const int n, const int h, const int w, const int c,
const int kx, const int ky, const int sx,
const int sy, const string& data_format,
const string& padding) {
OpContext op_context;
const std::vector<int> x = {n, h, w, c};
const std::vector<int> ksize = {1, kx, ky, 1};
std::vector<int> strides;
if (data_format == "NHWC") {
strides = {1, sy, sx, 1};
} else {
strides = {1, 1, sy, sx};
}
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op("MaxPool");
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
bool found_unknown_shapes;
return OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes);
}
OpLevelCostEstimator estimator_;
};
class OpLevelBatchMatMulCostEstimatorTest
: public OpLevelCostEstimatorTest,
public ::testing::WithParamInterface<const char*> {
protected:
OpContext DescribeBatchMatMul(const std::vector<int>& dims_a,
const std::vector<int>& dims_b) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(GetParam());
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulDimProduct(const OpInfo& op_info,
bool* found_unknown_shapes) const {
BatchMatMulDimensions batch_mat_mul;
batch_mat_mul.matmul_dims.n = 0;
batch_mat_mul.matmul_dims.m = 0;
batch_mat_mul.matmul_dims.k = 0;
OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, &batch_mat_mul, found_unknown_shapes);
int dimension_product = 1;
for (auto dim : batch_mat_mul.batch_dims) dimension_product *= dim;
dimension_product *= batch_mat_mul.matmul_dims.n;
dimension_product *= batch_mat_mul.matmul_dims.m;
dimension_product *= batch_mat_mul.matmul_dims.k;
return dimension_product;
}
};
TEST_F(OpLevelCostEstimatorTest, TestPersistentOpCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
std::unordered_set<string> persistent_ops = {
"Const", "Variable", "VariableV2", "AutoReloadVariable",
"VarHandleOp", "ReadVariableOp",
};
for (const auto& op : persistent_ops) {
op_context.op_info.set_op(op);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(1), cost.compute_time);
EXPECT_EQ(Costs::Duration(1), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCosts) {
std::vector<std::string> gather_ops = {"Gather", "GatherNd", "GatherV2"};
for (const auto& op : gather_ops) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({16, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(130), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(146), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCostsWithoutOutput) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Gather");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(0), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Slice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestStridedSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("StridedSlice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestScatterOps) {
std::vector<string> scatter_ops = {"ScatterAdd", "ScatterDiv", "ScatterMax",
"ScatterMin", "ScatterMul", "ScatterSub",
"ScatterUpdate"};
for (const auto& op : scatter_ops) {
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({16, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(205), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(221), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT32, &op_context.op_info);
DescribeArbitraryRankInput({}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(135), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(151), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
}
}
TEST_F(OpLevelCostEstimatorTest, BiasAddExecutionTime) {
auto cost = PredictCosts(DescribeBiasAdd(1000, 10));
EXPECT_EQ(Costs::Duration(8400), cost.memory_time);
EXPECT_EQ(Costs::Duration(1000), cost.compute_time);
EXPECT_EQ(Costs::Duration(9400), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, Conv2DExecutionTime) {
auto cost = PredictCosts(DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(233780), cost.memory_time);
EXPECT_EQ(Costs::Duration(354877440), cost.compute_time);
EXPECT_EQ(Costs::Duration(355111220), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, InvalidConv2DConfig) {
const std::vector<std::string> conv_ops = {
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
};
const std::vector<int> valid_conv_config = {16, 19, 19, 48, 48, 5, 5, 256};
for (const auto& op : conv_ops) {
for (int i = 0; i < valid_conv_config.size(); ++i) {
std::vector<int> conv_config(valid_conv_config);
conv_config[i] = 0;
auto op_context = DescribeConvolution(
conv_config[0], conv_config[1], conv_config[2], conv_config[3],
conv_config[4], conv_config[5], conv_config[6], conv_config[7]);
op_context.op_info.set_op(op);
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(0), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
}
}
TEST_F(OpLevelCostEstimatorTest, DepthwiseConv2dNativeExecutionTime) {
auto cost =
PredictCosts(DescribeDepthwiseConv2dNative(16, 19, 19, 48, 48, 5, 5, 3));
EXPECT_EQ(Costs::Duration(112340), cost.memory_time);
EXPECT_EQ(Costs::Duration(4158720), cost.compute_time);
EXPECT_EQ(Costs::Duration(4271060), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, DummyExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Dummy", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, ExecutionTimeSumOrMax) {
SetComputeMemoryOverlap(true);
auto cost = PredictCosts(DescribeBinaryOp("Dummy", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
SetComputeMemoryOverlap(false);
}
TEST_F(OpLevelCostEstimatorTest,
FusedConv2DBiasActivationNCHW_HWIO_NoSideInput) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, false,
"NCHW", "HWIO"));
EXPECT_EQ(Costs::Duration(825345), cost.memory_time);
EXPECT_EQ(Costs::Duration(355321037), cost.compute_time);
EXPECT_EQ(Costs::Duration(356146382), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_HWIO) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "HWIO"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNHWC_HWIO) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NHWC", "HWIO"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNHWC_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NHWC", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_VECT_C_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW_VECT_C", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_OIHW_VECT_I) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "OIHW_VECT_I"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest,
FusedConv2DBiasActivationNCHW_VECT_C_OIHW_VECT_I) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW_VECT_C", "OIHW_VECT_I"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, MulExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mul", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2200), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, MulBroadcastExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mul", 1000, 2));
EXPECT_EQ(Costs::Duration(3600), cost.memory_time);
EXPECT_EQ(Costs::Duration(400), cost.compute_time);
EXPECT_EQ(Costs::Duration(4000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, ModExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mod", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(1600), cost.compute_time);
EXPECT_EQ(Costs::Duration(3600), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, SquaredDifferenceExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("SquaredDifference", 1000, 2));
EXPECT_EQ(cost.memory_time, Costs::Duration(3600));
EXPECT_EQ(cost.compute_time, Costs::Duration(800));
EXPECT_EQ(cost.execution_time, Costs::Duration(4400));
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, UnaryOpExecutionTime) {
std::vector<std::pair<std::string, int>> unary_ops = {
{"All", 1}, {"ArgMax", 1}, {"Cast", 1}, {"Max", 1},
{"Min", 1}, {"Prod", 1}, {"Relu", 1}, {"Relu6", 1},
{"Softmax", 40}, {"Sum", 1}, {"TopKV2", 1}};
const int kTensorSize = 1000;
for (auto unary_op : unary_ops) {
OpContext op_context = DescribeUnaryOp(unary_op.first, kTensorSize);
const int kExpectedMemoryTime = 800;
int expected_compute_time = std::ceil(
unary_op.second * kTensorSize /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time))
<< unary_op.first;
EXPECT_EQ(cost.execution_time,
Costs::Duration(expected_compute_time + kExpectedMemoryTime));
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, BinaryOpExecutionTime) {
std::vector<std::pair<std::string, int>> binary_ops = {
{"Select", 1},
{"SelectV2", 1},
{"SquaredDifference", 2},
{"Where", 1},
};
const int kTensorSize1 = 1000;
const int kTensorSize2 = 2;
for (auto binary_op : binary_ops) {
OpContext op_context =
DescribeBinaryOp(binary_op.first, kTensorSize1, kTensorSize2);
const int kExpectedMemoryTime = 3600;
int expected_compute_time = std::ceil(
binary_op.second * kTensorSize1 * kTensorSize2 * 2 /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time)
<< binary_op.first;
EXPECT_EQ(Costs::Duration(expected_compute_time), cost.compute_time)
<< binary_op.first;
EXPECT_EQ(Costs::Duration(expected_compute_time + kExpectedMemoryTime),
cost.execution_time)
<< binary_op.first;
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, BroadcastAddExecutionTime) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Add");
DescribeTensor1D(100, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 1, 1, op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(44), cost.memory_time);
EXPECT_EQ(Costs::Duration(100), cost.compute_time);
EXPECT_EQ(Costs::Duration(144), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, UnknownOrPartialShape) {
{
auto cost = PredictCosts(DescribeMatMul(2, 4, 7, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeMatMul(-1, 4, 7, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeMatMul(2, 4, -1, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeConvolution(16, -1, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
}
TEST_P(OpLevelBatchMatMulCostEstimatorTest, TestBatchMatMul) {
{
auto cost = PredictCosts(DescribeBatchMatMul({}, {}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({1, 2, 4}, {1, 4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {1, 3, 4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
bool matmul_inaccurate = false;
bool batch_matmul_inaccurate = false;
EXPECT_EQ(
CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(DescribeBatchMatMul({2, 4}, {4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_EQ(matmul_inaccurate, batch_matmul_inaccurate);
EXPECT_EQ(10 * CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(
DescribeBatchMatMul({10, 2, 4}, {-1, 10, 4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_NE(matmul_inaccurate, batch_matmul_inaccurate);
EXPECT_EQ(20 * CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(
DescribeBatchMatMul({2, 10, 2, 4}, {-1, 10, 4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_NE(matmul_inaccurate, batch_matmul_inaccurate);
int prod = CountBatchMatMulDimProduct(
DescribeBatchMatMul({2, 4}, {1, 3, 4, 2}).op_info,
&batch_matmul_inaccurate);
EXPECT_EQ(prod, 16);
EXPECT_FALSE(batch_matmul_inaccurate);
OpContext bad_batch = DescribeBatchMatMul({2, 4}, {4, 2});
bad_batch.op_info.set_op("notBatchMatMul");
prod =
CountBatchMatMulDimProduct(bad_batch.op_info, &batch_matmul_inaccurate);
EXPECT_EQ(prod, 0);
EXPECT_TRUE(batch_matmul_inaccurate);
OpContext transpose_batch = DescribeBatchMatMul({2, 4, 3, 1}, {4, 2});
auto attr = transpose_batch.op_info.mutable_attr();
(*attr)["adj_x"].set_b(true);
(*attr)["adj_y"].set_b(true);
prod = CountBatchMatMulDimProduct(transpose_batch.op_info,
&batch_matmul_inaccurate);
EXPECT_EQ(prod, 12);
}
INSTANTIATE_TEST_SUITE_P(TestBatchMatMul, OpLevelBatchMatMulCostEstimatorTest,
::testing::Values("BatchMatMul", "BatchMatMulV2"));
TEST_F(OpLevelCostEstimatorTest, SparseTensorDenseMatMul) {
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(-1, {1, 1}, {1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {-1, 1}, {1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {1, -1}, {1, -1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {1, 1}, {-1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(
DescribeSparseTensorDenseMatMul(10, {1000, 100}, {50, 100}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2422), cost.memory_time);
}
{
auto cost = PredictCosts(
DescribeSparseTensorDenseMatMul(10, {100000, 100}, {50, 100}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2422), cost.memory_time);
}
}
void ExpectTensorShape(const std::vector<int64_t>& expected,
const TensorShapeProto& tensor_shape_proto) {
TensorShape tensor_shape_expected(expected);
TensorShape tensor_shape(tensor_shape_proto);
EXPECT_EQ(tensor_shape_expected, tensor_shape);
}
TEST_F(OpLevelCostEstimatorTest, GetTensorShapeProtoFromTensorProto) {
TensorProto tensor_proto;
TensorShapeProto tensor_shape_proto;
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(255);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
tensor_proto.Clear();
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(1);
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
GetTensorProto(DT_FLOAT, {}, {}, false, &tensor_proto);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
{
std::vector<int64_t> shape_expected = {10, 20, 30, 40};
GetTensorProto(DT_INT32, {4}, shape_expected,
false, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {40, 20, 90, 40};
GetTensorProto(DT_INT64, {4}, shape_expected,
false, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {10, 20, 30, 40};
GetTensorProto(DT_INT32, {4}, shape_expected,
true, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {40, 20, 90, 40};
GetTensorProto(DT_INT64, {4}, shape_expected,
true, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
}
TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputs) {
std::vector<string> paddings = {"VALID", "SAME"};
std::vector<string> formats = {"NHWC", "NCHW"};
for (const auto& p : paddings) {
for (const auto& f : formats) {
ValidateOpDimensionsFromInputs(10, 20, 20, 100, 3, 3, 2, 2, f, p);
ValidateOpDimensionsFromInputs(10, 20, 20, 100, 1, 1, 3, 3, f, p);
ValidateOpDimensionsFromInputs(10, 200, 200, 100, 5, 5, 3, 3, f, p);
ValidateOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 2, f, p);
}
}
}
TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputsError) {
std::vector<string> paddings = {"VALID", "SAME"};
std::vector<string> formats = {"NHWC", "NCHW"};
for (const auto& p : paddings) {
for (const auto& f : formats) {
ASSERT_THAT(
CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 0, 2, f, p),
testing::StatusIs(
error::INVALID_ARGUMENT,
"Stride must be > 0 for Height and Width, but got (2, 0)"));
ASSERT_THAT(
CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 0, f, p),
testing::StatusIs(
error::INVALID_ARGUMENT,
"Stride must be > 0 for Height and Width, but got (0, 2)"));
}
}
}
TEST_F(OpLevelCostEstimatorTest, PredictMaxPool) {
auto predict_max_pool = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context = DescribePoolingOp(
"MaxPool", {n, in, in, c}, {1, k, k, 1}, {1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_max_pool(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1075200), costs.execution_time);
EXPECT_EQ(Costs::Duration(307200), costs.compute_time);
EXPECT_EQ(Costs::Duration(768000), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_max_pool(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(499200), costs.execution_time);
EXPECT_EQ(Costs::Duration(38400), costs.compute_time);
EXPECT_EQ(Costs::Duration(460800), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_max_pool(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(561792), costs.execution_time);
EXPECT_EQ(Costs::Duration(56448), costs.compute_time);
EXPECT_EQ(Costs::Duration(505344), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictMaxPoolGrad) {
auto predict_max_pool_grad = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context =
DescribePoolingOp("MaxPoolGrad", {n, in, in, c}, {1, k, k, 1},
{1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_max_pool_grad(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1996800), costs.execution_time);
EXPECT_EQ(Costs::Duration(614400), costs.compute_time);
EXPECT_EQ(Costs::Duration(1382400), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_max_pool_grad(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(1536000), costs.execution_time);
EXPECT_EQ(Costs::Duration(153600), costs.compute_time);
EXPECT_EQ(Costs::Duration(1382400), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_max_pool_grad(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(1514112), costs.execution_time);
EXPECT_EQ(Costs::Duration(210048), costs.compute_time);
EXPECT_EQ(Costs::Duration(1304064), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictAvgPool) {
auto predict_avg_pool = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context = DescribePoolingOp(
"AvgPool", {n, in, in, c}, {1, k, k, 1}, {1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_avg_pool(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1113600), costs.execution_time);
EXPECT_EQ(Costs::Duration(345600), costs.compute_time);
EXPECT_EQ(Costs::Duration(768000), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_avg_pool(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(499200), costs.execution_time);
EXPECT_EQ(Costs::Duration(38400), costs.compute_time);
EXPECT_EQ(Costs::Duration(460800), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_avg_pool(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(580608), costs.execution_time);
EXPECT_EQ(Costs::Duration(75264), costs.compute_time);
EXPECT_EQ(Costs::Duration(505344), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictAvgPoolGrad) {
auto predict_avg_pool_grad = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context =
DescribePoolingOp("AvgPoolGrad", {n, in, in, c}, {1, k, k, 1},
{1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_avg_pool_grad(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1305602), costs.execution_time);
EXPECT_EQ(Costs::Duration(537600), costs.compute_time);
EXPECT_EQ(Costs::Duration(768002), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_avg_pool_grad(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(960002), costs.execution_time);
EXPECT_EQ(Costs::Duration(192000), costs.compute_time);
EXPECT_EQ(Costs::Duration(768002), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_avg_pool_grad(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(862082), costs.execution_time);
EXPECT_EQ(Costs::Duration(172416), costs.compute_time);
EXPECT_EQ(Costs::Duration(689666), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictFusedBatchNorm) {
auto predict_fused_bn = [this](const int n, const int in, const int c,
const bool is_training) -> Costs {
OpContext op_context = DescribeFusedBatchNorm(
is_training, false, {n, in, in, c}, "NHWC");
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_fused_bn(10, 20, 96, true);
EXPECT_EQ(Costs::Duration(614737), costs.execution_time);
EXPECT_EQ(Costs::Duration(153706), costs.compute_time);
EXPECT_EQ(Costs::Duration(461031), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_fused_bn(10, 20, 32, true);
EXPECT_EQ(Costs::Duration(204913), costs.execution_time);
EXPECT_EQ(Costs::Duration(51236), costs.compute_time);
EXPECT_EQ(Costs::Duration(153677), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_fused_bn(10, 20, 96, false);
EXPECT_EQ(Costs::Duration(384154), costs.execution_time);
EXPECT_EQ(Costs::Duration(76800), costs.compute_time);
EXPECT_EQ(Costs::Duration(307354), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_fused_bn(10, 20, 32, false);
EXPECT_EQ(Costs::Duration(128052), costs.execution_time);
EXPECT_EQ(Costs::Duration(25600), costs.compute_time);
EXPECT_EQ(Costs::Duration(102452), costs.memory_time);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictFusedBatchNormGrad) {
auto predict_fused_bn_grad = [this](const int n, const int in,
const int c) -> Costs {
OpContext op_context = DescribeFusedBatchNorm(
false, true, {n, in, in, c}, "NHWC");
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_fused_bn_grad(10, 20, 96);
EXPECT_EQ(Costs::Duration(1037050), costs.execution_time);
EXPECT_EQ(Costs::Duration(422496), costs.compute_time);
EXPECT_EQ(Costs::Duration(614554), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_fused_bn_grad(128, 7, 384);
EXPECT_EQ(Costs::Duration(6503809), costs.execution_time);
EXPECT_EQ(Costs::Duration(2649677), costs.compute_time);
EXPECT_EQ(Costs::Duration(3854132), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, MaybeGetMinimumShapeTest) {
{
TensorShapeProto x;
x.set_unknown_rank(true);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1, 1, 1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 1, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20}));
unknown_shapes = false;
std::vector<int64_t> z = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(z, ElementsAreArray({10, 20, 1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
x.add_dim()->set_size(-1);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20, 1, 20}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
x.add_dim()->set_size(30);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20}));
}
}
TEST_F(OpLevelCostEstimatorTest, IntermediateRdWrBandwidth) {
TestOpLevelCostEstimator estimator;
estimator.SetDeviceInfo(DeviceInfo(1,
1));
estimator.SetComputeMemoryOverlap(true);
auto cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(3548774400), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(3551112192), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
estimator.SetDeviceInfo(DeviceInfo(99999,
1));
estimator.SetComputeMemoryOverlap(true);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2337792), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.memory_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2373281), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
estimator.SetDeviceInfo(DeviceInfo(99999,
9999,
1,
1));
estimator.SetComputeMemoryOverlap(true);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2337792), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.intermediate_memory_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2373515), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
}
TEST_F(OpLevelCostEstimatorTest, Einsum) {
{
auto cost = PredictCosts(DescribeEinsum({100, 50}, {100, 50}, "ik,jk->ij"));
EXPECT_EQ(Costs::Duration(104000), cost.execution_time);
EXPECT_EQ(Costs::Duration(100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(4000), cost.memory_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
EXPECT_EQ(PredictCosts(DescribeEinsum({100, 50}, {100, 50}, "ik,jk->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {100, 50}, "ik,jk->ij"))
.execution_time);
}
{
auto cost = PredictCosts(
DescribeEinsum({25, 100, 50}, {100, 50, 25}, "Bik,jkB->Bij"));
EXPECT_EQ(Costs::Duration(25 * 104000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(25 * 4000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(
DescribeEinsum({25, 100, 50}, {100, 50, 25}, "Bik,jkB->Bij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 100, 50}, {100, 50, 25},
"Bik,jkB->Bij"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum(
{25, 16, 100, 50}, {16, 100, 50, 25}, "BNik,NjkB->BNij"));
EXPECT_EQ(Costs::Duration(16 * 25 * 104000), cost.execution_time);
EXPECT_EQ(
Costs::Duration(16 * 25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(16 * 25 * 4000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({25, 16, 100, 50}, {16, 100, 50, 25},
"BNik,NjkB->BNij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 16, 100, 50}, {16, 100, 50, 25},
"BNik,NjkB->BNij"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"));
EXPECT_EQ(Costs::Duration(2552000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"));
EXPECT_EQ(Costs::Duration(2552000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"))
.execution_time);
}
{
auto cost = PredictCosts(
DescribeEinsum({100, 50, 25}, {100, 50, 25}, "ikl,jkl->ij"));
EXPECT_EQ(Costs::Duration(2600000), cost.execution_time);
EXPECT_EQ(Costs::Duration(100 * 50 * 25 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(100000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(
DescribeEinsum({100, 50, 25}, {100, 50, 25}, "ikl,jkl->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25}, {100, 50, 25},
"ikl,jkl->ij"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum({100, 50}, {}, "ij->ji"));
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {}, "ij->ji")).execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {}, "ij->ji"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"));
EXPECT_EQ(Costs::Duration(52000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"))
.execution_time);
cost = PredictCosts(DescribeEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"));
EXPECT_EQ(Costs::Duration(52000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum(
{100, 50, 25, 16}, {50, 100, 32, 12}, "ik...,kl...->il..."));
EXPECT_EQ(Costs::Duration(1568000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(1568000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50, 25, 16}, {50, 100, 32, 12},
"ik...,kl...->il..."))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25, 16}, {50, 100, 32, 12},
"ik...,kl...->il..."))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"));
EXPECT_EQ(Costs::Duration(202000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(202000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum({-1, 50}, {100, 50}, "ik,jk->ij"));
EXPECT_EQ(Costs::Duration(3020), cost.execution_time);
EXPECT_EQ(Costs::Duration(1 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(2020), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(DescribeEinsum({-1, 50}, {100, 50}, "ik,jk->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({-1, 50}, {100, 50}, "ik,jk->ij"))
.execution_time);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictResourceVariableOps) {
TestOpLevelCostEstimator estimator;
estimator.SetDeviceInfo(DeviceInfo(1, 1));
{
OpContext op_context;
op_context.op_info.set_op("AssignVariableOp");
DescribeDummyTensor(op_context.op_info.add_inputs());
DescribeTensor1D(100, op_context.op_info.add_inputs());
auto cost = estimator.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(400), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(400), cost.execution_time);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
{
OpContext op_context;
op_context.op_info.set_op("AssignSubVariableOp");
DescribeDummyTensor(op_context.op_info.add_inputs());
DescribeTensor1D(100, op_context.op_info.add_inputs());
auto cost = estimator.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(400), cost.memory_time);
EXPECT_EQ(Costs::Duration(100), cost.compute_time);
EXPECT_EQ(Costs::Duration(400), cost.execution_time);
EXPECT_FALSE(cost.inaccurate);
}
}
TEST_F(OpLevelCostEstimatorTest, AddNExecutionTime) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("AddN");
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(1200), cost.memory_time);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(1400), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, IdentityOpExecutionTime) {
std::vector<std::string> identity_ops = {
"_Recv", "_Send", "BitCast", "Identity",
"Enter", "Exit", "IdentityN", "Merge",
"NextIteration", "Placeholder", "PreventGradient", "RefIdentity",
"Reshape", "StopGradient", "Switch"};
const int kTensorSize = 1000;
for (auto identity_op : identity_ops) {
OpContext op_context = DescribeUnaryOp(identity_op, kTensorSize);
const int kExpectedMemoryTime = 0;
const int kExpectedComputeTime = 1;
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime), cost.compute_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime + kExpectedMemoryTime),
cost.execution_time);
EXPECT_EQ(cost.max_memory, kTensorSize * 4);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, PureMemoryOpExecutionTime) {
std::vector<std::string> reshape_ops = {
"ConcatV2", "DataFormatVecPermute",
"DepthToSpace", "ExpandDims",
"Fill", "OneHot",
"Pack", "Range",
"SpaceToDepth", "Split",
"Squeeze", "Transpose",
"Tile", "Unpack"};
const int kTensorSize = 1000;
for (auto reshape_op : reshape_ops) {
OpContext op_context = DescribeUnaryOp(reshape_op, kTensorSize);
const int kExpectedMemoryTime = 800;
const int kExpectedComputeTime = 0;
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime), cost.compute_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime + kExpectedMemoryTime),
cost.execution_time);
EXPECT_EQ(cost.max_memory, kTensorSize * 4);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, ResizeBilinearExecutionTime) {
const int kImageDim = 255;
const int kChannelSize = 10;
const int kComputeLerpCost = 9;
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
ExpectZeroCost(cost);
op_context.op_info.clear_inputs();
DescribeTensor4D(0, 0, 0, 0, op_context.op_info.add_outputs());
cost = PredictCosts(op_context);
ExpectZeroCost(cost);
}
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
const int kExpectedMemoryTime = kImageDim * kImageDim * 4;
DescribeTensor4D(0, 0, 0, 0, op_context.op_info.add_outputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(0));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
AttrValue half_pixel_centers;
half_pixel_centers.set_b(false);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(0));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
const int kOutputImageDim = 100;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
DescribeTensor4D(1, kOutputImageDim, kOutputImageDim, kChannelSize,
op_context.op_info.add_outputs());
const int kExpectedMemoryTime =
(kImageDim * kImageDim + kOutputImageDim * kOutputImageDim) * 4;
{
AttrValue half_pixel_centers;
half_pixel_centers.set_b(false);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int kInterpWeightCost = 10;
const int num_ops =
kInterpWeightCost * (kOutputImageDim * 2) +
kComputeLerpCost * (kOutputImageDim * kOutputImageDim * kChannelSize);
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
AttrValue half_pixel_centers;
half_pixel_centers.set_b(true);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int kInterpWeightCost = 12;
const int num_ops =
kInterpWeightCost * (kOutputImageDim * 2) +
kComputeLerpCost * (kOutputImageDim * kOutputImageDim * kChannelSize);
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
op_context.op_info.clear_outputs();
constexpr int64_t kLargeOutputImageDim = 40000;
DescribeTensor4D(1, kLargeOutputImageDim, kLargeOutputImageDim,
kChannelSize, op_context.op_info.add_outputs());
const int64_t kInterpWeightCost = 12;
AttrValue half_pixel_centers;
half_pixel_centers.set_b(true);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int64_t num_ops =
kInterpWeightCost * (kLargeOutputImageDim * 2) +
kComputeLerpCost *
(kLargeOutputImageDim * kLargeOutputImageDim * kChannelSize);
const int64_t expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const int64_t expected_memory_time =
(kImageDim * kImageDim + kLargeOutputImageDim * kLargeOutputImageDim) *
4;
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(expected_memory_time));
EXPECT_EQ(cost.execution_time,
Costs::Duration(expected_memory_time + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, CropAndResizeExecutionTime) {
const int kImageDim = 255;
const int kChannelSize = 10;
const int kOutputImageDim = 100;
const int kNumBoxes = 10;
const int kOutputElements =
kNumBoxes * kOutputImageDim * kOutputImageDim * kChannelSize;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("CropAndResize");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
DescribeArbitraryRankInput({kNumBoxes, 4}, DT_INT64, &op_context.op_info);
DescribeTensor4D(kNumBoxes, kOutputImageDim, kOutputImageDim, kChannelSize,
op_context.op_info.add_outputs());
const int kExpectedMemoryTime =
(kImageDim * kImageDim * 4 +
kNumBoxes * 4 * 8 / 10 +
kNumBoxes * kOutputImageDim * kOutputImageDim * 4);
{
AttrValue method;
method.set_s("bilinear");
(*op_context.op_info.mutable_attr())["method"] = method;
int num_ops = 28 * kNumBoxes + 4 * kNumBoxes * kOutputImageDim +
4 * kNumBoxes * kOutputImageDim * kOutputImageDim +
3 * kNumBoxes * kOutputImageDim +
3 * kNumBoxes * kOutputImageDim * kOutputImageDim +
13 * kOutputElements;
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
AttrValue method;
method.set_s("nearest");
(*op_context.op_info.mutable_attr())["method"] = method;
int num_ops = 28 * kNumBoxes + 4 * kNumBoxes * kOutputImageDim +
4 * kNumBoxes * kOutputImageDim * kOutputImageDim +
2 * kNumBoxes * kOutputImageDim * kOutputImageDim +
kOutputElements;
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/op_level_cost_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ec7452e-20d4-4980-94de-c634b44a3126 | cpp | tensorflow/tensorflow | graph_properties | tensorflow/core/grappler/costs/graph_properties.cc | tensorflow/core/grappler/costs/graph_properties_test.cc | #include "tensorflow/core/grappler/costs/graph_properties.h"
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
using TensorVector = absl::InlinedVector<TensorValue, 4UL>;
const int64_t kUnknownDimFromConst = INT64_MAX;
const int kThresholdToSkipConstTensorInstantiation = 128;
template <typename Handle>
struct HashHandle {
std::size_t operator()(const Handle& h) const {
return absl::HashOf(h.Handle());
}
};
template <typename Handle>
struct CompareHandle {
bool operator()(const Handle& h1, const Handle& h2) const {
return h1.SameHandle(h2);
}
};
template <typename Handle>
struct HandleToObject {};
template <>
struct HandleToObject<ShapeHandle> {
typedef ShapeHandle Object;
static ShapeHandle Unknown() { return ShapeHandle(); }
};
template <>
struct HandleToObject<DimensionHandle> {
typedef int64_t Object;
static int64_t Unknown() { return -1; }
};
template <typename Handle>
struct Processor {};
template <>
struct Processor<ShapeHandle> {
void ExtractValue(ShapeHandle h, ShapeHandle* result) { *result = h; }
Status Merge(ShapeHandle h1, ShapeHandle h2, ShapeHandle* result) {
if (InferenceContext::RankKnown(*result)) {
return absl::OkStatus();
}
if (InferenceContext::RankKnown(h1)) {
*result = h1;
} else {
*result = h2;
}
return absl::OkStatus();
}
};
template <>
struct Processor<DimensionHandle> {
void ExtractValue(DimensionHandle d, int64_t* result) {
if (!InferenceContext::ValueKnown(d)) {
*result = -counter;
counter++;
} else {
int64_t val = InferenceContext::Value(d);
if (val >= 0) {
*result = val;
} else {
*result = -counter;
counter++;
}
}
}
Status Merge(DimensionHandle d1, DimensionHandle d2, int64_t* result) {
const int64_t dim1 = InferenceContext::Value(d1);
const int64_t dim2 = InferenceContext::Value(d2);
if (dim1 >= 0 && dim2 >= 0) {
CHECK_EQ(dim1, dim2);
return RefineDim(dim1, result);
} else if (dim1 >= 0 && dim2 < 0) {
return RefineDim(dim1, result);
} else if (dim1 < 0 && dim2 >= 0) {
return RefineDim(dim2, result);
} else if (dim1 < -1) {
return RefineDim(dim1, result);
} else if (dim2 < -1) {
return RefineDim(dim2, result);
} else {
CHECK_EQ(dim1, dim2);
CHECK_EQ(-1, dim1);
return RefineDim(-1, result);
}
return absl::OkStatus();
}
private:
Status RefineDim(int64_t dim, int64_t* result) {
if (*result >= 0) {
if (!(*result == dim || dim < 0)) {
return errors::InvalidArgument("Inconsistent dimensions detected");
}
} else if (dim >= 0) {
*result = dim;
} else if (dim < *result) {
*result = dim;
}
return absl::OkStatus();
}
int64_t counter = 2;
};
template <typename Handle>
class DisjointSet {
public:
DisjointSet() {}
~DisjointSet() {
for (auto rep : nodes_) {
delete rep.second;
}
}
Status Merge(Handle x, Handle y);
const typename HandleToObject<Handle>::Object GetMergedValue(Handle value);
private:
struct Rep {
Rep* parent;
int rank;
typename HandleToObject<Handle>::Object value;
};
Rep* Find(Handle value);
private:
Processor<Handle> processor_;
absl::flat_hash_map<Handle, Rep*, HashHandle<Handle>, CompareHandle<Handle>>
nodes_;
};
template <typename Handle>
const typename HandleToObject<Handle>::Object
DisjointSet<Handle>::GetMergedValue(Handle value) {
Rep* rep = Find(value);
if (!rep) {
return HandleToObject<Handle>::Unknown();
}
return rep->value;
}
template <typename Handle>
Status DisjointSet<Handle>::Merge(Handle x, Handle y) {
Rep* x_root = Find(x);
Rep* y_root = Find(y);
if (x_root == y_root) {
return absl::OkStatus();
}
if (x_root->rank < y_root->rank) {
TF_RETURN_IF_ERROR(processor_.Merge(y, x, &y_root->value));
x_root->parent = y_root;
} else if (x_root->rank > y_root->rank) {
TF_RETURN_IF_ERROR(processor_.Merge(x, y, &x_root->value));
y_root->parent = x_root;
} else {
TF_RETURN_IF_ERROR(processor_.Merge(x, y, &x_root->value));
y_root->parent = x_root;
x_root->rank = x_root->rank + 1;
}
return absl::OkStatus();
}
template <typename Handle>
typename DisjointSet<Handle>::Rep* DisjointSet<Handle>::Find(Handle value) {
auto it = nodes_.find(value);
if (it == nodes_.end()) {
Rep* node = new Rep;
node->parent = node;
node->rank = 0;
processor_.ExtractValue(value, &node->value);
nodes_[value] = node;
return node;
}
Rep* node = it->second;
Rep* root = node->parent;
while (root != root->parent) {
root = root->parent;
}
while (node->parent != root) {
Rep* next = node->parent;
node->parent = root;
node = next;
}
return root;
}
bool IsEnqueue(const NodeDef& n) {
return (n.op().find("Enqueue") != string::npos &&
n.op().find("EnqueueMany") == string::npos);
}
bool IsDequeue(const NodeDef& n) {
return (n.op().find("Dequeue") != string::npos &&
n.op().find("DequeueMany") == string::npos);
}
bool HasAnyUnknownDimensions(const TensorShapeProto& proto) {
if (proto.unknown_rank()) {
return true;
}
for (const auto& dim : proto.dim()) {
if (dim.size() < 0) {
return true;
}
}
return false;
}
void VerboseLogUnknownDimensionSources(
const GraphDef& graph,
const absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>&
input_properties_map,
const absl::flat_hash_map<string, std::vector<OpInfo::TensorProperties>>&
output_properties_map) {
if (!VLOG_IS_ON(2)) {
return;
}
VLOG(2) << "Nodes with known inputs, but with unknown output dimensions:";
std::map<string, int> op_to_count;
for (const NodeDef& node : graph.node()) {
const auto& input_properties = input_properties_map.at(node.name());
const auto& output_properties = output_properties_map.at(node.name());
bool has_unknown_inputs = false;
for (const auto& input_prop : input_properties) {
if (HasAnyUnknownDimensions(input_prop.shape())) {
has_unknown_inputs = true;
break;
}
}
if (has_unknown_inputs) {
continue;
}
for (const auto& output_prop : output_properties) {
if (HasAnyUnknownDimensions(output_prop.shape())) {
string inputs = "input_shapes=[";
for (const auto& input_prop : input_properties) {
inputs += PartialTensorShape::DebugString(input_prop.shape());
}
inputs += "]";
string outputs = "output_shapes=[";
for (const auto& output_prop : output_properties) {
outputs += PartialTensorShape::DebugString(output_prop.shape());
}
outputs += "]";
VLOG(2) << "Node: " << node.name() << ", Op: " << node.op() << ", "
<< inputs << ", " << outputs;
op_to_count[node.op()]++;
break;
}
}
}
VLOG(2) << "Op types with known inputs, but with unknown output dimensions "
<< "(format: <op_type> (<count>)):";
for (const auto& p : op_to_count) {
VLOG(2) << p.first << " (" << p.second << ")";
}
}
std::vector<ShapeHandle> ReplaceUnknownDimFromConstWithUnknownDim(
InferenceContext* ic, const std::vector<ShapeHandle>& shapes) {
std::vector<ShapeHandle> converted_shapes(shapes.size());
for (int i = 0, shapes_size = shapes.size(); i < shapes_size; i++) {
const auto& shape = shapes[i];
if (!ic->RankKnown(shape)) {
converted_shapes[i] = shape;
continue;
}
bool just_copy = true;
std::vector<DimensionHandle> dims;
for (int32_t i = 0; i < ic->Rank(shape); ++i) {
DimensionHandle dim = ic->Dim(shape, i);
if (ic->ValueKnown(dim) && ic->Value(dim) == kUnknownDimFromConst) {
just_copy = false;
dims.push_back(ic->UnknownDim());
} else {
dims.push_back(dim);
}
}
if (just_copy) {
converted_shapes[i] = shape;
continue;
}
converted_shapes[i] = ic->MakeShape(dims);
}
return converted_shapes;
}
TensorProto MakeTensorProtoFromShape(InferenceContext* ic,
const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape,
const DataType& dtype) {
TensorProto tensor_proto;
tensor_proto.set_dtype(dtype);
auto* shape_proto = tensor_proto.mutable_tensor_shape();
if (ic->Rank(shape) == 1) {
shape_proto->add_dim()->set_size(ic->Rank(tensor_as_shape));
}
for (int i = 0; i < ic->Rank(tensor_as_shape); i++) {
int64_t value = ic->Value(ic->Dim(tensor_as_shape, i));
if (dtype == DT_INT32) {
tensor_proto.add_int_val(value);
} else {
tensor_proto.add_int64_val(value);
}
}
return tensor_proto;
}
NodeDef MakeConstNodeDefFromTensorProto(InferenceContext* ic,
const TensorProto& tensor_proto,
const DataType& dtype) {
NodeDef const_node;
const_node.set_name("const_from_shape");
const_node.set_op("Const");
auto* attr = const_node.mutable_attr();
(*attr)["dtype"].set_type(dtype);
auto* tensor = (*attr)["value"].mutable_tensor();
*tensor = tensor_proto;
return const_node;
}
NodeDef MakeConstNodeDefFromShape(InferenceContext* ic,
const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape,
const DataType& dtype) {
return MakeConstNodeDefFromTensorProto(
ic, MakeTensorProtoFromShape(ic, shape, tensor_as_shape, dtype), dtype);
}
bool IsNumericType(const DataType dtype) {
static const gtl::FlatSet<DataType>* const kRealNumberTypes =
CHECK_NOTNULL((new gtl::FlatSet<DataType>{
DT_BFLOAT16,
DT_HALF,
DT_FLOAT,
DT_DOUBLE,
DT_INT8,
DT_INT16,
DT_INT32,
DT_INT64,
DT_UINT8,
DT_UINT16,
DT_UINT32,
DT_UINT64,
DT_QINT8,
DT_QUINT8,
DT_QINT16,
DT_QUINT16,
DT_QINT32,
DT_BOOL,
}));
return kRealNumberTypes->find(dtype) != kRealNumberTypes->end();
}
uint64 NumElementsFromTensorProto(const TensorProto& tensor_proto) {
if (!tensor_proto.has_tensor_shape()) {
return -1;
}
const auto& tensor_shape_proto = tensor_proto.tensor_shape();
if (tensor_shape_proto.unknown_rank()) {
return -1;
}
int64_t num_elements = 1;
for (const auto& dim : tensor_shape_proto.dim()) {
num_elements *= dim.size();
}
return num_elements;
}
}
bool IsShapeFullyDefinedIntegerVectorOrScalar(
InferenceContext* ic, const ShapeHandle& shape,
const ShapeHandle& tensor_as_shape, const DataType& dtype) {
if (!ic->FullyDefined(shape) || ic->Rank(shape) > 1 ||
!ic->FullyDefined(tensor_as_shape) ||
(dtype != DT_INT32 && dtype != DT_INT64)) {
return false;
}
for (int32_t i = 0; i < ic->Rank(tensor_as_shape); ++i) {
DimensionHandle dim = ic->Dim(tensor_as_shape, i);
if (ic->Value(dim) == kUnknownDimFromConst) {
LOG(WARNING) << "IsShapeFullyDefinedIntegerVectorOrScalar(): "
<< "tensor_as_shape input includes kUnknownDimFromConst -- "
<< ic->DebugString(tensor_as_shape);
return false;
}
}
return true;
}
class TopoQueue {
public:
explicit TopoQueue(const std::vector<const NodeDef*>& topo_order)
: topo_order_(TopoOrder(topo_order)) {}
void push(const NodeDef* n) { queue_.emplace(n, topo_order_.at(n)); }
const NodeDef* pop() {
CHECK(!empty());
auto it = queue_.begin();
const NodeDef* n = it->first;
queue_.erase(it);
return n;
}
bool empty() const { return queue_.empty(); }
std::size_t size() const { return queue_.size(); }
private:
using NodeAndId = std::pair<const NodeDef*, int>;
struct OrderByIdAscending {
bool operator()(const NodeAndId& lhs, const NodeAndId& rhs) const {
return lhs.second < rhs.second;
}
};
const absl::flat_hash_map<const NodeDef*, int> TopoOrder(
const std::vector<const NodeDef*>& topo_order) const {
absl::flat_hash_map<const NodeDef*, int> map;
map.reserve(topo_order.size());
for (int i = 0, topo_order_size = topo_order.size(); i < topo_order_size;
++i) {
map.emplace(topo_order[i], i);
}
return map;
}
const absl::flat_hash_map<const NodeDef*, int> topo_order_;
std::set<NodeAndId, OrderByIdAscending> queue_;
};
bool IsAllowListedOpTypeForEvaluateNode(const string& op_type) {
static const gtl::FlatSet<string>* const kOpTpeAllowlist =
CHECK_NOTNULL((new gtl::FlatSet<string>{
"Floor",
"Round",
"Sqrt",
"Square",
"Sign",
"Add",
"AddV2",
"Div",
"FloorDiv",
"FloorMod",
"Greater",
"GreaterEqual",
"Less",
"LessEqual",
"LogicalAnd",
"LogicalNot",
"LogicalOr",
"Maximum",
"Minimum",
"Mod",
"Mul",
"NotEqual",
"QuantizedAdd",
"QuantizedMul",
"SquareDifference",
"Sub",
"TruncateDiv",
"TruncateMod",
"RealDiv",
"AddN",
"StridedSlice",
"OnesLike",
"ZerosLike",
"Concat",
"ConcatV2",
"Split",
"Range",
"Fill",
"Cast",
"Prod",
"Unpack",
"GatherV2",
"Pack",
"ExpandDims",
}));
return kOpTpeAllowlist->find(op_type) != kOpTpeAllowlist->end();
}
static void NormalizeShapeForOutput(TensorShapeProto* shape) {
for (int i = 0; i < shape->dim_size(); i++) {
if (shape->dim(i).size() < -1) {
VLOG(2) << "Normalizing dimension: " << i << " from "
<< shape->dim(i).size() << " to -1";
shape->mutable_dim(i)->set_size(-1);
}
}
}
class SymbolicShapeRefiner {
public:
explicit SymbolicShapeRefiner(
const GraphView& graph,
const absl::flat_hash_map<string, absl::flat_hash_set<int>>& fed_ports,
const bool aggressive_shape_inference)
: graph_(graph),
function_library_(OpRegistry::Global(), graph.graph()->library()),
fed_ports_(fed_ports),
aggressive_shape_inference_(aggressive_shape_inference) {
graph_def_version_ = graph.graph()->versions().producer();
node_to_context_.reserve(graph.graph()->node_size());
}
const GraphView& graph() const { return graph_; }
struct NodeContext {
const OpRegistrationData* op_data;
DataTypeVector input_types;
DataTypeVector output_types;
std::unique_ptr<InferenceContext> inference_context;
std::vector<const TensorProto*> input_tensor_protos;
std::vector<const TensorProto*> output_tensor_protos;
std::vector<ShapeHandle> input_tensors_as_shapes_to_propagate;
std::vector<ShapeHandle> output_tensors_as_shapes;
bool shape_incompatible = false;
std::string StringifyShapeHandle(ShapeHandle s) {
auto* ic = inference_context.get();
if (ic->RankKnown(s)) {
std::vector<std::string> vals;
for (int i = 0; i < ic->Rank(s); i++) {
DimensionHandle d = ic->Dim(s, i);
if (ic->ValueKnown(d) && ic->Value(d) == kUnknownDimFromConst) {
vals.push_back("?(Const)");
} else {
vals.push_back(ic->DebugString(d));
}
}
return strings::StrCat("[", absl::StrJoin(vals, ","), "]");
} else {
return "?";
}
}
std::string DebugString(const NodeDef& node) {
std::string output;
auto* ic = inference_context.get();
absl::StrAppend(
&output, node.name(), " [", node.op(), "] has ", ic->num_inputs(),
(ic->num_inputs() > 1 ? " inputs and " : " input and "),
ic->num_outputs(), (ic->num_outputs() > 1 ? " outputs" : " output"));
if (op_data->is_function_op) {
absl::StrAppend(&output, " (function op)");
}
absl::StrAppend(&output, ": \n");
for (int i = 0; i < ic->num_inputs(); i++) {
absl::StrAppend(&output, " input [", i, "] ", node.input(i),
" -- type: ", DataTypeString(input_types.at(i)),
", shape: ", ic->DebugString(ic->input(i)),
", tensor: ");
Tensor t1;
int input_tensor_protos_size = input_tensor_protos.size();
if (input_tensor_protos_size > i &&
input_tensor_protos.at(i) != nullptr &&
t1.FromProto(*input_tensor_protos.at(i))) {
absl::StrAppend(&output, t1.DebugString(), ", tensor_as_shape: ");
} else {
absl::StrAppend(&output, " null, tensor_as_shape: ");
}
int input_tensors_as_shapes_to_propagate_size =
input_tensors_as_shapes_to_propagate.size();
if (input_tensors_as_shapes_to_propagate_size > i) {
absl::StrAppend(
&output,
StringifyShapeHandle(input_tensors_as_shapes_to_propagate.at(i)),
"\n");
} else {
absl::StrAppend(&output, " null\n");
}
}
for (int i = 0; i < ic->num_outputs(); i++) {
absl::StrAppend(&output, " output [", i,
"] -- type: ", DataTypeString(output_types.at(i)),
", shape: ", ic->DebugString(ic->output(i)),
", tensor: ");
Tensor t2;
int output_tensor_protos_size = output_tensor_protos.size();
if (output_tensor_protos_size > i &&
output_tensor_protos.at(i) != nullptr &&
t2.FromProto(*output_tensor_protos.at(i))) {
absl::StrAppend(&output, t2.DebugString(), ", tensor_as_shape: ");
} else {
absl::StrAppend(&output, " null, tensor_as_shape: ");
}
int output_tensors_as_shapes_size = output_tensors_as_shapes.size();
if (output_tensors_as_shapes_size > i) {
absl::StrAppend(&output,
StringifyShapeHandle(output_tensors_as_shapes.at(i)),
"\n");
} else {
absl::StrAppend(&output, " null\n");
}
}
return output;
}
};
NodeContext* GetNodeContext(const NodeDef* node) {
auto it = node_to_context_.find(node);
if (it == node_to_context_.end()) {
return nullptr;
}
return &it->second;
}
InferenceContext* GetContext(const NodeDef* node) {
auto it = node_to_context_.find(node);
if (it == node_to_context_.end()) {
return nullptr;
}
return it->second.inference_context.get();
}
Status UpdateFunction(const NodeDef* function_node) {
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(*function_node, &function));
auto it = fun_to_grappler_function_item_.find(function.name());
if (it == fun_to_grappler_function_item_.end()) {
return errors::InvalidArgument(
function.name(),
" was not previously added to SymbolicShapeRefiner.");
}
const absl::optional<GrapplerFunctionItem>& maybe_grappler_function_item =
it->second;
if (!maybe_grappler_function_item.has_value()) {
VLOG(3) << "Skip failed to instantiate function call: function_name="
<< function.name();
auto* ctx = GetNodeContext(function_node);
auto* ic = ctx->inference_context.get();
for (int i = 0; i < ic->num_outputs(); ++i) {
TF_RETURN_IF_ERROR(SetUnknownShape(function_node, i));
}
return absl::OkStatus();
}
GrapplerFunctionItem grappler_function_item = *maybe_grappler_function_item;
MutableGraphView gv(&grappler_function_item.graph);
for (int i = 0, end = grappler_function_item.inputs().size(); i < end;
++i) {
auto& fun_input = grappler_function_item.input(i);
NodeDef* fun_node = gv.GetNode(fun_input.node_name);
const TensorId input_tensor = ParseTensorName(function_node->input(i));
if (IsControlInput(input_tensor)) {
return errors::FailedPrecondition(
"Function inputs should not contain control nodes.");
}
const NodeDef* input_node = graph_.GetNode(input_tensor.node());
if (input_node == nullptr) {
return errors::FailedPrecondition(input_tensor.node(),
" was not found in the graph.");
}
InferenceContext* input_ic = GetContext(input_node);
if (input_ic == nullptr) {
return errors::FailedPrecondition(
"Inference context has not been created for ", input_tensor.node());
}
int output_port_num = input_tensor.index();
TensorShapeProto proto;
const auto handle = input_ic->output(output_port_num);
input_ic->ShapeHandleToProto(handle, &proto);
NormalizeShapeForOutput(&proto);
AttrValue output_attr;
output_attr.mutable_list()->add_shape()->Swap(&proto);
(*fun_node->mutable_attr())["_output_shapes"] = output_attr;
if (fun_input.data_type == DT_RESOURCE) {
auto* shapes_and_types =
input_ic->output_handle_shapes_and_types(output_port_num);
if (shapes_and_types != nullptr && !shapes_and_types->empty()) {
AttrValue dtype_attr;
AttrValue shape_attr;
for (const auto& shape_and_type : *shapes_and_types) {
const auto& dtype = shape_and_type.dtype;
const auto& shape_handle = shape_and_type.shape;
dtype_attr.mutable_list()->add_type(dtype);
input_ic->ShapeHandleToProto(
shape_handle, shape_attr.mutable_list()->add_shape());
}
(*fun_node->mutable_attr())["_handle_dtypes"] = dtype_attr;
(*fun_node->mutable_attr())["_handle_shapes"] = shape_attr;
} else {
VLOG(2)
<< "A function node (" << function_node->name()
<< ") has input with DT_RESOURCE, but the input node does not "
<< "have shapes_and_types information: \n"
<< "function_node: " << function_node->ShortDebugString() << "\n"
<< "function input: " << i
<< ", input node's output: " << output_port_num << "\n"
<< "input node: " << input_node->ShortDebugString();
}
}
}
absl::flat_hash_map<std::string, NodeDef*> output_nodes;
for (const auto& output_arg : grappler_function_item.outputs()) {
output_nodes[output_arg.node_name] = gv.GetNode(output_arg.node_name);
}
auto* ctx = GetNodeContext(function_node);
auto* ic = ctx->inference_context.get();
for (int i = grappler_function_item.inputs().size() - 1; i >= 0; --i) {
const string& input = function_node->input(i);
const string node_name = NodeName(input);
const NodeDef* input_node = graph_.GetNode(node_name);
if (IsConstant(*input_node)) {
TF_CHECK_OK(
ReplaceInputWithConst(*input_node, i, &grappler_function_item));
} else if (static_cast<int>(ctx->input_tensor_protos.size()) > i &&
ctx->input_tensor_protos[i] != nullptr) {
NodeDef const_input_node = MakeConstNodeDefFromTensorProto(
ic, *ctx->input_tensor_protos[i], ctx->input_types[i]);
TF_CHECK_OK(ReplaceInputWithConst(const_input_node, i,
&grappler_function_item));
} else if (static_cast<int>(ic->input_tensors_as_shapes().size()) > i &&
IsShapeFullyDefinedIntegerVectorOrScalar(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i])) {
NodeDef const_input_node = MakeConstNodeDefFromShape(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i]);
TF_CHECK_OK(ReplaceInputWithConst(const_input_node, i,
&grappler_function_item));
}
}
for (const auto& output_arg : grappler_function_item.outputs()) {
NodeDef* output_node = output_nodes[output_arg.node_name];
DCHECK_EQ(output_node->op(), "_Retval");
output_node->set_op("Identity");
output_node->mutable_attr()->erase("index");
}
GraphProperties gp(grappler_function_item);
TF_RETURN_IF_ERROR(gp.InferStatically(
true,
aggressive_shape_inference_,
true));
int output = 0;
ctx->output_tensors_as_shapes.resize(grappler_function_item.output_size());
ctx->output_tensor_protos.resize(grappler_function_item.output_size(),
nullptr);
for (auto const& out_arg : grappler_function_item.outputs()) {
TensorId out_tensor = ParseTensorName(out_arg.node_name);
if (output_nodes.count(out_tensor.node()) <= 0) {
return errors::FailedPrecondition(
"Unable to find return function_node ", out_tensor.node(), " for ",
function_node->name());
}
const NodeDef* retnode = output_nodes[out_tensor.node()];
auto output_properties = gp.GetOutputProperties(retnode->name());
int output_properties_size = output_properties.size();
if (out_tensor.index() >= output_properties_size) {
return errors::InvalidArgument(
out_tensor.ToString(), " has invalid position ", out_tensor.index(),
" (output_properties.size() = ", output_properties.size(), ").");
}
auto& outprop = output_properties[out_tensor.index()];
TensorShapeProto shape = outprop.shape();
NormalizeShapeForOutput(&shape);
ShapeHandle out;
TF_RETURN_IF_ERROR(ic->MakeShapeFromShapeProto(shape, &out));
ic->set_output(output, out);
if (outprop.has_value()) {
MaybeTensorProtoToShape(ic, outprop.value(),
&ctx->output_tensors_as_shapes[output]);
const_tensors_to_propagate_.push_back(outprop.value());
ctx->output_tensor_protos[output] = &const_tensors_to_propagate_.back();
}
output++;
}
return absl::OkStatus();
}
Status UpdateNode(const NodeDef* node, bool* refined) {
NodeContext* ctx = GetNodeContext(node);
if (ctx == nullptr) {
TF_RETURN_IF_ERROR(AddNode(node));
ctx = CHECK_NOTNULL(GetNodeContext(node));
*refined = true;
}
InferenceContext* ic = ctx->inference_context.get();
ctx->input_tensors_as_shapes_to_propagate.resize(ic->num_inputs());
ctx->input_tensor_protos.resize(ic->num_inputs(), nullptr);
for (int dst_input = 0; dst_input < ic->num_inputs(); ++dst_input) {
const GraphView::InputPort port(node, dst_input);
const GraphView::OutputPort fanin = graph_.GetRegularFanin(port);
int src_output = fanin.port_id;
const NodeDef* src = fanin.node;
NodeContext* src_ctx = GetNodeContext(src);
if (src_ctx == nullptr) {
return errors::FailedPrecondition(
"Input ", dst_input, " for '", node->name(),
"' was not previously added to SymbolicShapeRefiner.");
}
InferenceContext* src_ic = src_ctx->inference_context.get();
if (src_output >= src_ic->num_outputs()) {
return errors::OutOfRange("src_output = ", src_output,
", but num_outputs is only ",
src_ic->num_outputs());
}
if (static_cast<int>(src_ctx->output_tensors_as_shapes.size()) >
src_output) {
ctx->input_tensors_as_shapes_to_propagate[dst_input] =
src_ctx->output_tensors_as_shapes[src_output];
}
if (static_cast<int>(src_ctx->output_tensor_protos.size()) > src_output) {
const auto* tensor_proto = src_ctx->output_tensor_protos[src_output];
if (tensor_proto != nullptr) {
ctx->input_tensor_protos[dst_input] = tensor_proto;
}
}
if (!*refined &&
!ic->input(dst_input).SameHandle(src_ic->output(src_output))) {
*refined = true;
}
ic->SetInput(dst_input, src_ic->output(src_output));
if (!*refined && ic->requested_input_tensor_as_partial_shape(dst_input)) {
*refined = true;
}
if (ctx->input_types[dst_input] == DT_RESOURCE) {
auto* outputs = src_ic->output_handle_shapes_and_types(src_output);
if (!outputs) continue;
auto* inputs = ic->input_handle_shapes_and_types(dst_input);
if (!inputs || !EquivalentShapesAndTypes(*outputs, *inputs))
*refined = true;
ic->set_input_handle_shapes_and_types(dst_input, *outputs);
}
}
*refined |= ic->num_inputs() == 0;
if (!*refined) {
return absl::OkStatus();
}
ic->set_input_tensors_as_shapes(ReplaceUnknownDimFromConstWithUnknownDim(
ic, ctx->input_tensors_as_shapes_to_propagate));
if (ctx->op_data && ctx->op_data->is_function_op) {
if (aggressive_shape_inference_) {
auto s = UpdateOutputShapesUsingAnnotatedInformation(*node, ctx);
if (s.ok() && AllOutputShapesKnown(ctx)) {
return absl::OkStatus();
}
}
auto s = UpdateFunction(node);
if (s.ok()) {
return absl::OkStatus();
} else {
VLOG(1) << "UpdateFunction failed for " << node->op()
<< ". Defaulting to ShapeUnknown.\n"
<< s.ToString();
}
}
std::vector<Tensor> const_values(ic->num_inputs());
std::vector<const Tensor*> input_tensors(ic->num_inputs(), nullptr);
for (int dst_input = 0; dst_input < ic->num_inputs(); ++dst_input) {
const TensorProto* tensor_proto = ctx->input_tensor_protos[dst_input];
if (tensor_proto != nullptr &&
NumElementsFromTensorProto(*tensor_proto) <=
kThresholdToSkipConstTensorInstantiation &&
const_values[dst_input].FromProto(*tensor_proto)) {
input_tensors[dst_input] = &const_values[dst_input];
}
}
ic->set_input_tensors(input_tensors);
return InferShapes(*node, ctx);
}
Status SetUnknownShape(const NodeDef* node, int output_port) {
shape_inference::ShapeHandle shape =
GetUnknownOutputShape(node, output_port);
InferenceContext* ctx = GetContext(node);
if (ctx == nullptr) {
return errors::InvalidArgument("SetUnknownShape: Missing context");
}
if (output_port < 0 || output_port >= ctx->num_outputs()) {
return errors::InvalidArgument(
"SetUnknownShape: output_port must be in [0, ", ctx->num_outputs(),
") but was ", output_port);
}
ctx->set_output(output_port, shape);
return absl::OkStatus();
}
struct ShapeId {
const NodeDef* node;
int port_id;
friend bool operator==(const ShapeId& lhs, const ShapeId& rhs) {
return lhs.node == rhs.node && lhs.port_id == rhs.port_id;
}
template <typename H>
friend H AbslHashValue(H h, const ShapeId& s) {
return H::combine(std::move(h), s.node, s.port_id);
}
};
struct DimId {
const NodeDef* node;
int port_id;
int dim_index;
friend bool operator==(const DimId& lhs, const DimId& rhs) {
return lhs.node == rhs.node && lhs.port_id == rhs.port_id &&
lhs.dim_index == rhs.dim_index;
}
template <typename H>
friend H AbslHashValue(H h, const DimId& d) {
return H::combine(std::move(h), d.node, d.port_id, d.dim_index);
}
};
ShapeHandle OutputAsUnion(const NodeDef* node, int port_index,
ShapeHandle shape1, ShapeHandle shape2) {
if (shape1.SameHandle(shape2)) {
return shape1;
}
InferenceContext* ctx = GetContext(node);
ShapeHandle relaxed = shape1;
const int rank = ctx->Rank(shape1);
if (!ctx->RankKnown(shape2) || ctx->Rank(shape2) != rank) {
relaxed = GetUnknownOutputShape(node, port_index);
} else {
for (int d = 0; d < rank; ++d) {
if (!ctx->Dim(shape1, d).SameHandle(ctx->Dim(shape2, d))) {
int64_t val1 = ctx->Value(ctx->Dim(shape1, d));
int64_t val2 = ctx->Value(ctx->Dim(shape2, d));
if (val1 != val2 || (val1 < 0 && val2 < 0)) {
DimensionHandle new_dim = GetUnknownOutputDim(node, port_index, d);
TF_CHECK_OK(ctx->ReplaceDim(relaxed, d, new_dim, &relaxed));
}
}
}
}
return relaxed;
}
bool EquivalentShapes(ShapeHandle s1, ShapeHandle s2) const {
if (s1.SameHandle(s2)) {
return true;
}
if (InferenceContext::Rank(s1) != InferenceContext::Rank(s2)) {
return false;
}
if (!InferenceContext::RankKnown(s1) && !InferenceContext::RankKnown(s2)) {
return true;
}
const int rank = InferenceContext::Rank(s1);
for (int i = 0; i < rank; ++i) {
if (!InferenceContext::DimKnownRank(s1, i).SameHandle(
InferenceContext::DimKnownRank(s2, i))) {
int64_t val1 =
InferenceContext::Value(InferenceContext::DimKnownRank(s1, i));
int64_t val2 =
InferenceContext::Value(InferenceContext::DimKnownRank(s2, i));
if (val1 >= 0 && val2 >= 0 && val1 == val2) {
continue;
}
return false;
}
}
return true;
}
bool CompatibleShapes(ShapeHandle inferred_shape,
ShapeHandle annotated_shape) const {
if (inferred_shape.SameHandle(annotated_shape)) {
return true;
}
if (!InferenceContext::RankKnown(inferred_shape)) {
return true;
}
if (InferenceContext::Rank(inferred_shape) !=
InferenceContext::Rank(annotated_shape)) {
return false;
}
const int rank = InferenceContext::Rank(inferred_shape);
for (int i = 0; i < rank; ++i) {
if (!InferenceContext::DimKnownRank(inferred_shape, i)
.SameHandle(
InferenceContext::DimKnownRank(annotated_shape, i))) {
int64_t val1 = InferenceContext::Value(
InferenceContext::DimKnownRank(inferred_shape, i));
int64_t val2 = InferenceContext::Value(
InferenceContext::DimKnownRank(annotated_shape, i));
if (val1 >= 0 && val1 != val2) {
return false;
}
}
}
return true;
}
bool SameShapes(ShapeHandle inferred_shape,
ShapeHandle annotated_shape) const {
if (inferred_shape.SameHandle(annotated_shape)) {
return true;
}
if (InferenceContext::Rank(inferred_shape) !=
InferenceContext::Rank(annotated_shape)) {
return false;
}
const int rank = InferenceContext::Rank(inferred_shape);
for (int i = 0; i < rank; ++i) {
int64_t val1 = InferenceContext::Value(
InferenceContext::DimKnownRank(inferred_shape, i));
int64_t val2 = InferenceContext::Value(
InferenceContext::DimKnownRank(annotated_shape, i));
if (val1 != val2) {
return false;
}
}
return true;
}
bool EquivalentShapesAndTypes(const std::vector<ShapeAndType>& st1,
const std::vector<ShapeAndType>& st2) const {
if (st1.size() != st2.size()) {
return false;
}
for (int i = 0, st1_size = st1.size(); i < st1_size; ++i) {
const ShapeAndType& s1 = st1[i];
const ShapeAndType& s2 = st2[i];
if (s1.dtype != s2.dtype) {
return false;
}
if (!EquivalentShapes(s1.shape, s2.shape)) {
return false;
}
}
return true;
}
Status AddFunction(const NodeDef* function_node,
const std::string& function_name) {
auto it = fun_to_grappler_function_item_.find(function_name);
if (it != fun_to_grappler_function_item_.end()) {
return absl::OkStatus();
}
const FunctionDef* function_def =
CHECK_NOTNULL(function_library_.Find(function_name));
GrapplerFunctionItem grappler_function_item;
Status function_instantiated =
MakeGrapplerFunctionItem(*function_def, function_library_,
graph_def_version_, &grappler_function_item);
if (!function_instantiated.ok()) {
VLOG(3) << "Failed to instantiate a function. Error: "
<< function_instantiated.message();
fun_to_grappler_function_item_[function_def->signature().name()] =
absl::nullopt;
return absl::OkStatus();
}
if (static_cast<int>(grappler_function_item.inputs().size()) >
function_node->input_size()) {
return errors::FailedPrecondition(
"Function input size should be smaller than node input size.");
}
for (int i = grappler_function_item.inputs().size(),
end = function_node->input_size();
i < end; ++i) {
const string& input = function_node->input(i);
if (!IsControlInput(input)) {
return errors::FailedPrecondition(
"Found regular input (", input,
") instead of control nodes for node ", function_node->name());
}
}
fun_to_grappler_function_item_[function_def->signature().name()] =
grappler_function_item;
return absl::OkStatus();
}
Status AddNode(const NodeDef* node) {
NodeContext& node_ctx = node_to_context_[node];
NameAttrList function;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(*node, &function));
TF_RETURN_IF_ERROR(
function_library_.LookUp(function.name(), &node_ctx.op_data));
if (node_ctx.op_data->is_function_op) {
TF_RETURN_IF_ERROR(AddFunction(node, function.name()));
}
TF_RETURN_IF_ERROR(InOutTypesForNode(*node, node_ctx.op_data->op_def,
&node_ctx.input_types,
&node_ctx.output_types));
const int num_inputs = node_ctx.input_types.size();
std::vector<ShapeHandle> input_shapes(num_inputs);
std::vector<std::unique_ptr<std::vector<ShapeAndType>>>
input_handle_shapes_and_types(num_inputs);
std::vector<const Tensor*> input_tensors(num_inputs, nullptr);
std::vector<ShapeHandle> input_tensors_as_shapes;
node_ctx.inference_context.reset(new InferenceContext(
graph_def_version_, *node, node_ctx.op_data->op_def, input_shapes,
input_tensors, input_tensors_as_shapes,
std::move(input_handle_shapes_and_types)));
const Status s = node_ctx.inference_context->construction_status();
if (!s.ok()) {
node_ctx.inference_context.reset(nullptr);
}
return s;
}
private:
ShapeHandle GetUnknownOutputShape(const NodeDef* node, int index) {
ShapeId id{node, index};
auto it = unknown_shapes_.find(id);
if (it != unknown_shapes_.end()) {
return it->second;
}
InferenceContext* c = GetContext(node);
ShapeHandle shp = c->UnknownShape();
unknown_shapes_[id] = shp;
return shp;
}
DimensionHandle GetUnknownOutputDim(const NodeDef* node, int index,
int dim_id) {
DimId id{node, index, dim_id};
auto it = unknown_dims_.find(id);
if (it != unknown_dims_.end()) {
return it->second;
}
InferenceContext* c = GetContext(node);
DimensionHandle dim = c->UnknownDim();
unknown_dims_[id] = dim;
return dim;
}
bool AllOutputValuesKnown(NodeContext* c) {
InferenceContext* ic = c->inference_context.get();
int c_output_tensors_as_shapes_size = c->output_tensors_as_shapes.size();
int c_output_tensor_protos_size = c->output_tensor_protos.size();
if (c_output_tensors_as_shapes_size < ic->num_outputs() &&
c_output_tensor_protos_size < ic->num_outputs()) {
return false;
} else {
for (int i = 0; i < ic->num_outputs(); i++) {
if (c_output_tensor_protos_size > i &&
c->output_tensor_protos[i] != nullptr) {
continue;
}
if (c_output_tensors_as_shapes_size > i &&
ic->FullyDefined(c->output_tensors_as_shapes[i])) {
bool no_unknown_dim_from_const = true;
for (int32_t j = 0; j < ic->Rank(c->output_tensors_as_shapes[i]);
++j) {
const auto dim = ic->Dim(c->output_tensors_as_shapes[i], j);
if (ic->ValueKnown(dim) && ic->Value(dim) == kUnknownDimFromConst) {
no_unknown_dim_from_const = false;
break;
}
}
if (no_unknown_dim_from_const) {
continue;
}
}
return false;
}
}
return true;
}
bool AllOutputShapesKnown(NodeContext* c) {
InferenceContext* ic = c->inference_context.get();
for (int i = 0; i < ic->num_outputs(); i++) {
if (!ic->FullyDefined(ic->output(i))) {
return false;
}
}
return true;
}
bool AllInputValuesKnown(NodeContext* c) {
InferenceContext* ic = c->inference_context.get();
for (int i = 0; i < ic->num_inputs(); i++) {
const Tensor* tensor = ic->input_tensor(i);
const ShapeHandle& input_tensors_as_shape =
ic->input_tensors_as_shapes()[i];
if (tensor == nullptr && !ic->FullyDefined(input_tensors_as_shape)) {
return false;
}
}
return true;
}
bool ShouldUpdateOutputShapesAndValues(NodeContext* c, int64_t max_size) {
InferenceContext* ic = c->inference_context.get();
if (!IsAllowListedOpTypeForEvaluateNode(c->op_data->op_def.name())) {
return false;
}
for (const auto& input_type : c->input_types) {
if (!IsNumericType(input_type)) {
return false;
}
}
for (const auto& output_type : c->output_types) {
if (!IsNumericType(output_type)) {
return false;
}
}
for (int i = 0; i < ic->num_inputs(); i++) {
const Tensor* tensor = ic->input_tensor(i);
const ShapeHandle& input_shape_handle = ic->input(i);
if (tensor != nullptr) {
if (tensor->NumElements() > max_size) {
return false;
}
} else if (ic->Value(ic->NumElements(input_shape_handle)) > max_size) {
return false;
}
}
for (int i = 0; i < ic->num_outputs(); i++) {
const ShapeHandle& shape_handle = ic->output(i);
if (!ic->FullyDefined(shape_handle) ||
ic->Value(ic->NumElements(shape_handle)) > max_size) {
return false;
}
}
return true;
}
void CreateInputTensors(NodeContext* c,
std::vector<Tensor>* input_tensor_vector,
TensorVector* inputs) {
InferenceContext* ic = c->inference_context.get();
for (int i = 0; i < ic->num_inputs(); i++) {
if (ic->input_tensor(i)) {
input_tensor_vector->at(i) = *ic->input_tensor(i);
inputs->emplace_back(&input_tensor_vector->at(i));
} else {
const ShapeHandle& shape_handle = ic->input_tensors_as_shapes()[i];
const DataType& data_type = c->input_types[i];
int32_t rank = ic->Rank(shape_handle);
if (rank < 1) {
input_tensor_vector->at(i) = Tensor(data_type, {});
} else {
input_tensor_vector->at(i) = Tensor(data_type, {rank});
}
auto* tensor = &input_tensor_vector->at(i);
if (data_type == DT_INT32) {
auto flat = tensor->flat<int32>();
for (int j = 0; j < rank; j++) {
int32_t dim = ic->Value(ic->Dim(shape_handle, j));
flat(j) = dim;
}
} else {
auto flat = tensor->flat<int64_t>();
for (int j = 0; j < rank; j++) {
int64_t dim = ic->Value(ic->Dim(shape_handle, j));
flat(j) = dim;
}
}
inputs->emplace_back(tensor);
}
}
}
Status UpdateOutputShapesAndValues(const NodeDef& node, NodeContext* c) {
InferenceContext* ic = c->inference_context.get();
TensorVector inputs;
std::vector<Tensor> input_tensor_vector(ic->num_inputs());
CreateInputTensors(c, &input_tensor_vector, &inputs);
TensorVector outputs;
auto outputs_cleanup = gtl::MakeCleanup([&outputs] {
for (const auto& output : outputs) {
if (output.tensor) {
delete output.tensor;
}
}
});
TF_RETURN_IF_ERROR(EvaluateNode(node, inputs, nullptr,
&resource_mgr_, &outputs));
c->output_tensors_as_shapes.resize(outputs.size());
c->output_tensor_protos.resize(outputs.size(), nullptr);
for (int k = 0, outputs_size = outputs.size(); k < outputs_size; k++) {
const auto& t = outputs[k];
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(
ic->MakeShapeFromTensorShape(t->shape(), &output_shape));
if (ic->FullyDefined(ic->output(k)) &&
!EquivalentShapes(ic->output(k), output_shape)) {
LOG(WARNING) << "UpdateOutputShapesAndValues() -- node: " << node.name()
<< ", inferred output shape "
<< "doesn't match for k=" << k << ": "
<< "ic->output(k): " << ic->DebugString(ic->output(k))
<< ", output_shape: " << ic->DebugString(output_shape)
<< " -- " << node.DebugString();
}
ic->set_output(k, output_shape);
MaybeTensorValueToShape(ic, *t.tensor, &c->output_tensors_as_shapes[k]);
TensorProto tensor_proto;
t->AsProtoTensorContent(&tensor_proto);
const_tensors_to_propagate_.push_back(tensor_proto);
c->output_tensor_protos[k] = &const_tensors_to_propagate_.back();
}
return absl::OkStatus();
}
Status UpdateOutputShapesUsingAnnotatedInformation(const NodeDef& node,
NodeContext* c) const {
const auto& attr = node.attr();
if (attr.count(kOutputSame) == 0 || !attr.at(kOutputSame).b() ||
attr.count(kOutputShapes) == 0)
return absl::OkStatus();
InferenceContext* ic = c->inference_context.get();
int output_size = attr.at(kOutputShapes).list().shape_size();
for (int i = 0; i < ic->num_outputs(); i++) {
int shape_index = IsSwitch(node) ? 0 : i;
if (shape_index >= output_size) {
LOG(WARNING)
<< "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape size "
<< ic->num_outputs() << ", annotated output shape size "
<< output_size;
break;
}
const TensorShapeProto& shape =
attr.at(kOutputShapes).list().shape(shape_index);
if (shape.dim().empty()) continue;
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(ic->MakeShapeFromShapeProto(shape, &output_shape));
if ((ic->FullyDefined(ic->output(i)) &&
!SameShapes(ic->output(i), output_shape)) ||
(!ic->FullyDefined(ic->output(i)) &&
!CompatibleShapes(ic->output(i), output_shape))) {
LOG(WARNING)
<< "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape "
<< "doesn't match for i=" << i << ": "
<< "ic->output(k): " << ic->DebugString(ic->output(i))
<< ", annotated output shape: " << ic->DebugString(output_shape)
<< " -- " << node.DebugString();
c->shape_incompatible = true;
}
if (!ic->FullyDefined(ic->output(i)) &&
CompatibleShapes(ic->output(i), output_shape)) {
VLOG(3) << "UpdateOutputShapesUsingAnnotatedInformation() -- node: "
<< node.name() << ", inferred output shape " << i << ": "
<< "ic->output(i): " << ic->DebugString(ic->output(i))
<< ", annotated output shape: " << ic->DebugString(output_shape)
<< " -- " << node.ShortDebugString();
ic->set_output(i, output_shape);
}
}
return absl::OkStatus();
}
Status MaybeUpdateNodeContextOutput(const NodeDef& node, const bool is_fed,
NodeContext* c) {
InferenceContext* ic = c->inference_context.get();
if (!is_fed) {
if (IsConstant(node)) {
const TensorProto& tensor_proto = node.attr().at("value").tensor();
c->output_tensor_protos.resize(1);
c->output_tensor_protos[0] = &tensor_proto;
c->output_tensors_as_shapes.resize(1);
MaybeTensorProtoToShape(ic, tensor_proto,
&c->output_tensors_as_shapes[0]);
} else if (IsRank(node)) {
if (ic->RankKnown(ic->input(0))) {
int32_t rank = ic->Rank(ic->input(0));
const_tensors_to_propagate_.push_back(
MakeIntegerScalarTensorProto(DT_INT32, rank));
c->output_tensor_protos.resize(1);
c->output_tensor_protos[0] = &const_tensors_to_propagate_.back();
}
} else if (IsSize(node)) {
DimensionHandle size = ic->NumElements(ic->input(0));
if (ic->ValueKnown(size)) {
int64_t sz = ic->Value(size);
bool valid = false;
if (node.attr().at("out_type").type() == DT_INT32) {
if (sz < std::numeric_limits<int32>::max()) {
const_tensors_to_propagate_.push_back(
MakeIntegerScalarTensorProto(DT_INT32, sz));
valid = true;
}
} else {
const_tensors_to_propagate_.push_back(
MakeIntegerScalarTensorProto(DT_INT64, sz));
valid = true;
}
if (valid) {
c->output_tensor_protos.resize(1);
c->output_tensor_protos[0] = &const_tensors_to_propagate_.back();
}
}
} else if (IsShape(node)) {
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] = c->inference_context->input(0);
} else if (IsShapeN(node)) {
c->output_tensors_as_shapes.resize(c->inference_context->num_inputs());
for (int i = 0; i < c->inference_context->num_inputs(); ++i) {
c->output_tensors_as_shapes[i] = c->inference_context->input(i);
}
} else if (node.op() == "ConcatV2") {
bool valid = true;
ShapeHandle result;
for (int i = 0; i < ic->num_inputs() - 1; ++i) {
ShapeHandle input = c->input_tensors_as_shapes_to_propagate[i];
if (!ic->RankKnown(input)) {
valid = false;
break;
} else if (i == 0) {
result = input;
} else {
TF_RETURN_IF_ERROR(ic->Concatenate(result, input, &result));
}
}
if (valid) {
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] = result;
}
} else if (IsPack(node)) {
std::vector<DimensionHandle> dims;
bool valid = true;
for (int i = 0; i < ic->num_inputs(); ++i) {
const Tensor* t = ic->input_tensor(i);
if (t) {
if (t->dims() != 0 ||
(t->dtype() != DT_INT32 && t->dtype() != DT_INT64)) {
valid = false;
break;
}
int64_t size = t->dtype() == DT_INT32 ? t->scalar<int32>()()
: t->scalar<int64_t>()();
dims.push_back(size < 0 ? ic->MakeDim(kUnknownDimFromConst)
: ic->MakeDim(size));
} else {
const ShapeHandle& shape_handle =
c->input_tensors_as_shapes_to_propagate[i];
if (ic->RankKnown(shape_handle) && ic->Rank(shape_handle) >= 1 &&
ic->ValueKnown(ic->Dim(shape_handle, 0))) {
dims.push_back(ic->Dim(shape_handle, 0));
} else {
dims.push_back(ic->MakeDim(kUnknownDimFromConst));
}
}
}
if (valid) {
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] = ic->MakeShape(dims);
}
} else if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] =
c->input_tensors_as_shapes_to_propagate[0];
if (c->input_tensor_protos[0] != nullptr) {
c->output_tensor_protos.resize(1);
c->output_tensor_protos[0] = c->input_tensor_protos[0];
}
} else if (IsSlice(node)) {
ShapeHandle input = c->input_tensors_as_shapes_to_propagate[0];
bool valid = ic->RankKnown(input);
const Tensor* slice_offset = ic->input_tensor(1);
valid &= slice_offset != nullptr && slice_offset->NumElements() == 1;
const Tensor* slice_size = ic->input_tensor(2);
valid &= slice_size != nullptr && slice_size->NumElements() == 1;
if (valid) {
int64_t start = slice_offset->dtype() == DT_INT32
? slice_offset->flat<int32>()(0)
: slice_offset->flat<int64_t>()(0);
int64_t size = (slice_size->dtype() == DT_INT32
? slice_size->flat<int32>()(0)
: slice_size->flat<int64_t>()(0));
ShapeHandle result;
if (size == -1) {
TF_RETURN_IF_ERROR(ic->Subshape(input, start, &result));
} else {
int64_t end = start + size;
TF_RETURN_IF_ERROR(ic->Subshape(input, start, end, &result));
}
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] = result;
}
} else if (IsStridedSlice(node)) {
ShapeHandle input = c->input_tensors_as_shapes_to_propagate[0];
bool valid = ic->RankKnown(input);
const Tensor* slice_begin = ic->input_tensor(1);
valid &= slice_begin != nullptr && slice_begin->NumElements() == 1;
const Tensor* slice_end = ic->input_tensor(2);
valid &= slice_end != nullptr && slice_end->NumElements() == 1;
const Tensor* slice_stride = ic->input_tensor(3);
valid &= slice_stride != nullptr && slice_stride->NumElements() == 1;
if (node.attr().count("ellipsis_mask") > 0 &&
node.attr().at("ellipsis_mask").i() != 0) {
valid = false;
}
if (node.attr().count("new_axis_mask") > 0 &&
node.attr().at("new_axis_mask").i() != 0) {
valid = false;
}
if (node.attr().count("shrink_axis_mask") > 0 &&
node.attr().at("shrink_axis_mask").i() != 0) {
valid = false;
}
int begin_mask = 0;
if (node.attr().count("begin_mask") > 0) {
begin_mask = node.attr().at("begin_mask").i();
}
int end_mask = 0;
if (node.attr().count("end_mask") > 0) {
end_mask = node.attr().at("end_mask").i();
}
if (begin_mask < 0 || begin_mask > 1 || end_mask < 0 || end_mask > 1) {
valid = false;
}
if (valid) {
int64_t begin = 0;
if (begin_mask == 0) {
begin = slice_begin->dtype() == DT_INT32
? slice_begin->flat<int32>()(0)
: slice_begin->flat<int64_t>()(0);
}
int64_t end = std::numeric_limits<int64_t>::max();
if (end_mask == 0) {
end = (slice_end->dtype() == DT_INT32
? slice_end->flat<int32>()(0)
: slice_end->flat<int64_t>()(0));
}
int64_t stride = slice_stride->dtype() == DT_INT32
? slice_stride->flat<int32>()(0)
: slice_stride->flat<int64_t>()(0);
ShapeHandle result;
TF_RETURN_IF_ERROR(ic->Subshape(input, begin, end, stride, &result));
c->output_tensors_as_shapes.resize(1);
c->output_tensors_as_shapes[0] = result;
}
}
}
if (aggressive_shape_inference_) {
UpdateOutputShapesUsingAnnotatedInformation(node, c).IgnoreError();
const int max_element_size = 17;
if (AllOutputValuesKnown(c) || !AllInputValuesKnown(c) ||
!ShouldUpdateOutputShapesAndValues(c, max_element_size)) {
return absl::OkStatus();
}
UpdateOutputShapesAndValues(node, c).IgnoreError();
}
return absl::OkStatus();
}
Status InferShapes(const NodeDef& node, NodeContext* c) {
if (!c->op_data || c->op_data->shape_inference_fn == nullptr ||
!c->inference_context->Run(c->op_data->shape_inference_fn).ok()) {
TF_RETURN_IF_ERROR(
c->inference_context->Run(shape_inference::UnknownShape));
}
Status status = absl::OkStatus();
auto it = fed_ports_.find(node.name());
const bool is_fed = it != fed_ports_.end();
if (is_fed) {
for (const int output_port : it->second) {
status.Update(SetUnknownShape(&node, output_port));
}
}
status.Update(MaybeUpdateNodeContextOutput(node, is_fed, c));
return status;
}
private:
bool IsIntegerVector(const Tensor& tensor) {
if (tensor.dims() == 1 &&
(tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT64)) {
return true;
}
return false;
}
bool IsIntegerScalar(const Tensor& tensor) {
if (tensor.dims() == 0 &&
(tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT64) &&
tensor.NumElements() == 1) {
return true;
}
return false;
}
TensorProto MakeIntegerScalarTensorProto(const DataType dtype,
const int64_t val) {
TensorProto tensor_proto;
tensor_proto.set_dtype(dtype);
tensor_proto.mutable_tensor_shape();
if (dtype == DT_INT32) {
tensor_proto.add_int_val(val);
} else if (dtype == DT_INT64) {
tensor_proto.add_int64_val(val);
}
return tensor_proto;
}
bool MaybeTensorProtoToShape(InferenceContext* ic,
const TensorProto& tensor_proto,
ShapeHandle* tensors_as_shapes) {
if (tensor_proto.dtype() != DT_INT32 && tensor_proto.dtype() != DT_INT64) {
return false;
}
if (NumElementsFromTensorProto(tensor_proto) >
kThresholdToSkipConstTensorInstantiation) {
return false;
}
if (tensor_proto.tensor_shape().unknown_rank() ||
tensor_proto.tensor_shape().dim_size() > 1) {
return false;
}
Tensor tensor;
if (!tensor.FromProto(tensor_proto)) {
return false;
}
return MaybeTensorValueToShape(ic, tensor, tensors_as_shapes);
}
bool MaybeTensorValueToShape(InferenceContext* ic, const Tensor& tensor,
ShapeHandle* tensors_as_shapes) {
if (IsIntegerVector(tensor)) {
bool has_values_smaller_than_minus_1 = false;
std::vector<DimensionHandle> dims;
for (int i = 0; i < tensor.NumElements(); i++) {
int64_t value = tensor.dtype() == DT_INT32 ? tensor.flat<int32>()(i)
: tensor.flat<int64_t>()(i);
has_values_smaller_than_minus_1 |= (value < -1);
dims.push_back(value < 0 ? ic->MakeDim(kUnknownDimFromConst)
: ic->MakeDim(value));
}
if (!has_values_smaller_than_minus_1) {
*tensors_as_shapes = ic->MakeShape(dims);
return true;
}
} else if (IsIntegerScalar(tensor)) {
int64_t value = tensor.dtype() == DT_INT32 ? tensor.flat<int32>()(0)
: tensor.flat<int64_t>()(0);
if (value == -1) {
*tensors_as_shapes = ic->UnknownShape();
return true;
} else if (value >= 0) {
*tensors_as_shapes = ic->MakeShape({ic->MakeDim(value)});
return true;
}
}
return false;
}
const GraphView& graph_;
int graph_def_version_;
absl::flat_hash_map<const NodeDef*, NodeContext> node_to_context_;
absl::flat_hash_map<ShapeId, ShapeHandle> unknown_shapes_;
absl::flat_hash_map<DimId, DimensionHandle> unknown_dims_;
absl::flat_hash_map<string, absl::optional<GrapplerFunctionItem>>
fun_to_grappler_function_item_;
FunctionLibraryDefinition function_library_;
const absl::flat_hash_map<string, absl::flat_hash_set<int>>& fed_ports_;
std::deque<TensorProto> const_tensors_to_propagate_;
bool aggressive_shape_inference_;
ResourceMgr resource_mgr_;
};
class SymbolicShapeManager {
public:
SymbolicShapeManager() {}
Status Merge(ShapeHandle s1, ShapeHandle s2) {
if (!s1.IsSet() || !s2.IsSet()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(shapes_.Merge(s1, s2));
if (InferenceContext::Rank(s1) > 0 && InferenceContext::Rank(s2) > 0) {
CHECK_EQ(InferenceContext::Rank(s1), InferenceContext::Rank(s2));
for (int i = 0; i < InferenceContext::Rank(s1); ++i) {
TF_RETURN_IF_ERROR(dims_.Merge(InferenceContext::DimKnownRank(s1, i),
InferenceContext::DimKnownRank(s2, i)));
}
}
return absl::OkStatus();
}
Status Merge(DimensionHandle d1, DimensionHandle d2) {
if (!d1.IsSet() || !d2.IsSet()) {
return absl::OkStatus();
}
return dims_.Merge(d1, d2);
}
void AsTensorProperties(const ShapeHandle& shape, const DataType& type,
OpInfo::TensorProperties* properties) {
properties->set_dtype(type);
ShapeHandle actual_shape = shapes_.GetMergedValue(shape);
if (!InferenceContext::RankKnown(actual_shape)) {
properties->mutable_shape()->set_unknown_rank(true);
} else {
for (int j = 0; j < InferenceContext::Rank(actual_shape); ++j) {
shape_inference::DimensionHandle dim =
InferenceContext::DimKnownRank(actual_shape, j);
int64_t d = dims_.GetMergedValue(dim);
properties->mutable_shape()->add_dim()->set_size(d);
}
}
}
ShapeHandle GetMergedShape(InferenceContext* ic, ShapeHandle s) {
const auto& actual_shape = shapes_.GetMergedValue(s);
if (!InferenceContext::RankKnown(actual_shape)) {
return ic->UnknownShape();
} else {
std::vector<DimensionHandle> dims;
for (int j = 0; j < InferenceContext::Rank(actual_shape); ++j) {
shape_inference::DimensionHandle dim =
InferenceContext::DimKnownRank(actual_shape, j);
int64_t d = dims_.GetMergedValue(dim);
if (d < -1) {
d = -1;
}
dims.push_back(ic->MakeDim(d));
}
return ic->MakeShape(dims);
}
}
private:
DisjointSet<shape_inference::ShapeHandle> shapes_;
DisjointSet<shape_inference::DimensionHandle> dims_;
};
Status ValidateSymbolicShapeManager(const GraphDef& graph_def,
SymbolicShapeRefiner* refiner,
SymbolicShapeManager* shape_manager) {
if (!VLOG_IS_ON(1)) {
return absl::OkStatus();
}
VLOG(1) << "Checking any conflicts in shapes and dimensions ...";
int64_t num_incompatible_shapes = 0;
for (const NodeDef& node : graph_def.node()) {
auto ctx = refiner->GetNodeContext(&node);
if (!ctx) {
continue;
}
auto* ic = ctx->inference_context.get();
for (int i = 0; i < ic->num_inputs(); ++i) {
const auto& shape = ic->input(i);
const auto& merged_shape = shape_manager->GetMergedShape(ic, shape);
if (!refiner->CompatibleShapes(shape, merged_shape)) {
num_incompatible_shapes++;
VLOG(1) << "**** Incompatible shape from SymbolicShapeManager "
<< "for node " << node.name() << " input (" << i << ") "
<< ic->DebugString(shape)
<< " vs. merged: " << ic->DebugString(merged_shape);
}
}
for (int i = 0; i < ic->num_outputs(); ++i) {
const auto& shape = ic->output(i);
const auto& merged_shape = shape_manager->GetMergedShape(ic, shape);
if (!refiner->CompatibleShapes(shape, merged_shape)) {
num_incompatible_shapes++;
VLOG(1) << "**** Incompatible shape from SymbolicShapeManager "
<< "for node " << node.name() << " output (" << i << ") "
<< ic->DebugString(shape)
<< " vs. merged: " << ic->DebugString(merged_shape);
}
}
}
if (num_incompatible_shapes > 0) {
VLOG(1) << "**** WARNING: " << num_incompatible_shapes
<< " incompatible shapes from SymbolicShapeManager.";
} else {
VLOG(1) << "**** No incompatible shape found from SymbolicShapeManager.";
}
return absl::OkStatus();
}
Status VerboseShapeInferenceLogging(const GraphDef& graph_def,
SymbolicShapeRefiner* refiner,
SymbolicShapeManager* shape_manager) {
absl::flat_hash_set<std::string> node_names_for_logging = {};
if (!VLOG_IS_ON(3) || node_names_for_logging.empty()) {
return absl::OkStatus();
}
auto should_log = [&node_names_for_logging](std::string node_name) {
return node_names_for_logging.find(node_name) !=
node_names_for_logging.end();
};
for (const NodeDef& node : graph_def.node()) {
if (!should_log(node.name())) {
continue;
}
auto ctx = refiner->GetNodeContext(&node);
if (!ctx) {
continue;
}
auto* ic = ctx->inference_context.get();
VLOG(3) << "Shape inference for node : " << node.name();
VLOG(3) << ctx->DebugString(node);
std::string merged_shapes = "Merged shapes from SymbolicShapManager:\n";
for (int i = 0; i < ic->num_inputs(); ++i) {
absl::StrAppend(
&merged_shapes, " input[", i, "] -- ",
ic->DebugString(shape_manager->GetMergedShape(ic, ic->input(i))),
"\n");
}
for (int i = 0; i < ic->num_outputs(); ++i) {
absl::StrAppend(
&merged_shapes, " output[", i, "] -- ",
ic->DebugString(shape_manager->GetMergedShape(ic, ic->output(i))),
"\n");
}
VLOG(3) << merged_shapes;
VLOG(3) << "--------------------------------";
VLOG(3) << "";
}
return absl::OkStatus();
}
Status GraphProperties::RelaxEnqueueShapesAndMergeTypes(
SymbolicShapeRefiner* shape_refiner, const NodeDef* qnode,
const std::vector<ShapeAndType>& shapes_and_types,
std::vector<ShapeAndType>* queue_shapes_and_types) {
if (shapes_and_types.size() != queue_shapes_and_types->size()) {
return errors::InvalidArgument(
"Enqueue nodes mixed number of tensors: ", shapes_and_types.size(),
" vs ", queue_shapes_and_types->size());
}
for (size_t i = 0; i < shapes_and_types.size(); ++i) {
const ShapeAndType& a = shapes_and_types[i];
ShapeAndType& b = (*queue_shapes_and_types)[i];
if (a.dtype != b.dtype) {
return errors::InvalidArgument("Enqueue nodes mixed dtypes for tensor ",
i, ": ", DataTypeString(a.dtype), " vs ",
DataTypeString(b.dtype));
}
b.shape = shape_refiner->OutputAsUnion(qnode, i, a.shape, b.shape);
}
return absl::OkStatus();
}
Status GraphProperties::UpdateMerge(SymbolicShapeRefiner* shape_refiner,
const NodeDef* node,
bool* new_shapes) const {
InferenceContext* ic = shape_refiner->GetContext(node);
if (!ic) {
TF_RETURN_IF_ERROR(shape_refiner->AddNode(node));
ic = CHECK_NOTNULL(shape_refiner->GetContext(node));
*new_shapes = true;
ShapeHandle out1 = ic->Scalar();
if (ic->num_outputs() >= 2) ic->set_output(1, out1);
}
ShapeHandle out;
const std::vector<ShapeAndType>* out_handle = nullptr;
bool out_initialized = false;
for (const GraphView::Edge fanin : shape_refiner->graph().GetFaninEdges(
*node, false)) {
InferenceContext* src_ic = shape_refiner->GetContext(fanin.src.node);
if (!src_ic) {
continue;
}
ShapeHandle input = src_ic->output(fanin.src.port_id);
ic->SetInput(fanin.dst.port_id, input);
auto* input_handle =
src_ic->output_handle_shapes_and_types(fanin.src.port_id);
if (input_handle)
ic->set_input_handle_shapes_and_types(fanin.dst.port_id, *input_handle);
if (!out_initialized) {
out_initialized = true;
out = input;
out_handle = input_handle;
} else {
out = shape_refiner->OutputAsUnion(node, 0, input, out);
}
}
if (*new_shapes || !shape_refiner->EquivalentShapes(out, ic->output(0))) {
ic->set_output(0, out);
if (out_handle) ic->set_output_handle_shapes_and_types(0, *out_handle);
*new_shapes = true;
}
return absl::OkStatus();
}
Status GraphProperties::UpdateEnter(SymbolicShapeRefiner* shape_refiner,
const NodeDef* node, bool* new_shapes) {
InferenceContext* ic = shape_refiner->GetContext(node);
if (!ic) {
TF_RETURN_IF_ERROR(shape_refiner->UpdateNode(node, new_shapes));
ic = shape_refiner->GetContext(node);
}
GraphView::InputPort port(node, 0);
GraphView::OutputPort fanin = shape_refiner->graph().GetRegularFanin(port);
InferenceContext* src_ic = shape_refiner->GetContext(fanin.node);
ShapeHandle input = src_ic->output(fanin.port_id);
if (!ic->output(0).SameHandle(input)) {
ic->SetInput(0, input);
ic->set_output(0, input);
*new_shapes = true;
}
auto* outputs = src_ic->output_handle_shapes_and_types(fanin.port_id);
if (outputs) {
ic->set_input_handle_shapes_and_types(0, *outputs);
ic->set_output_handle_shapes_and_types(0, *outputs);
*new_shapes = true;
}
return absl::OkStatus();
}
Status GraphProperties::UpdateShapes(
SymbolicShapeRefiner* shape_refiner,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>& resource_handles,
const NodeDef* n, bool* new_shapes) const {
if (IsEnter(*n)) {
TF_RETURN_IF_ERROR(UpdateEnter(shape_refiner, n, new_shapes));
} else if (IsMerge(*n)) {
TF_RETURN_IF_ERROR(UpdateMerge(shape_refiner, n, new_shapes));
} else if (IsEnqueue(*n)) {
TF_RETURN_IF_ERROR(
UpdateEnqueue(n, resource_handles, shape_refiner, new_shapes));
} else if (IsQueue(*n)) {
TF_RETURN_IF_ERROR(UpdateQueue(n, shape_refiner, new_shapes));
} else {
TF_RETURN_IF_ERROR(shape_refiner->UpdateNode(n, new_shapes));
}
return absl::OkStatus();
}
Status GraphProperties::PropagateShapes(
SymbolicShapeRefiner* shape_refiner, TopoQueue* new_shapes,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>& resource_handles,
int num_loops) const {
VLOG(1) << "Propagating " << new_shapes->size() << " new shapes through "
<< num_loops << " loops and " << resource_handles.size()
<< " resources" << std::endl;
const int64_t max_loop_length = item_.graph.node_size();
const int64_t max_rank = 4;
const int64_t max_loop_iterations =
max_rank * max_loop_length * std::max<int64_t>(1, num_loops * num_loops);
const int64_t num_queues = resource_handles.size();
const int64_t max_resource_iterations = num_queues * num_queues * max_rank;
int64_t num_resource_iterations = 0;
do {
int64_t num_loop_iterations = 0;
while (!new_shapes->empty() &&
num_loop_iterations++ < max_loop_iterations) {
const NodeDef* n = new_shapes->pop();
bool updated = false;
TF_RETURN_IF_ERROR(
UpdateShapes(shape_refiner, resource_handles, n, &updated));
if (updated) {
for (const auto& fanout : shape_refiner->graph().GetFanouts(
*n, false)) {
new_shapes->push(fanout.node);
}
if (IsEnqueue(*n)) {
auto it = resource_handles.find(n);
if (it != resource_handles.end()) {
new_shapes->push(it->second);
}
}
}
}
} while (!new_shapes->empty() &&
num_resource_iterations++ < max_resource_iterations);
if (!new_shapes->empty()) {
return errors::Internal("Shape inference failed to converge");
}
return absl::OkStatus();
}
Status GraphProperties::UpdateQueue(const NodeDef* queue_node,
SymbolicShapeRefiner* shape_refiner,
bool* new_shapes) {
auto* ctx = shape_refiner->GetNodeContext(queue_node);
if (!ctx) {
TF_RETURN_IF_ERROR(shape_refiner->AddNode(queue_node));
ctx = CHECK_NOTNULL(shape_refiner->GetNodeContext(queue_node));
}
auto* ic = ctx->inference_context.get();
auto* outputs = ic->output_handle_shapes_and_types(0);
if (outputs) {
return shape_refiner->UpdateNode(queue_node, new_shapes);
}
if (queue_node->attr().count("shapes") <= 0 ||
queue_node->attr().count("component_types") <= 0 ||
queue_node->attr().at("shapes").list().shape_size() !=
queue_node->attr().at("component_types").list().type_size()) {
return shape_refiner->UpdateNode(queue_node, new_shapes);
}
const auto& shapes = queue_node->attr().at("shapes").list().shape();
const auto& types = queue_node->attr().at("component_types").list().type();
std::vector<ShapeAndType> shapes_and_types;
for (int i = 0; i < types.size(); i++) {
const auto& shape = shapes[i];
ShapeHandle shape_handle;
TF_RETURN_IF_ERROR(
ic->MakeShapeFromPartialTensorShape(shape, &shape_handle));
DataType data_type =
queue_node->attr().at("component_types").list().type(i);
ShapeAndType shape_and_type(shape_handle, data_type);
shapes_and_types.push_back(shape_and_type);
}
ic->set_output_handle_shapes_and_types(0, shapes_and_types);
*new_shapes = true;
bool dummy_new_shapes = false;
return shape_refiner->UpdateNode(queue_node, &dummy_new_shapes);
}
Status GraphProperties::UpdateEnqueue(
const NodeDef* enqueue_node,
const absl::flat_hash_map<const NodeDef*, const NodeDef*>& resource_handles,
SymbolicShapeRefiner* shape_refiner, bool* new_shapes) {
auto ctx = shape_refiner->GetNodeContext(enqueue_node);
if (!ctx) {
TF_RETURN_IF_ERROR(shape_refiner->AddNode(enqueue_node));
ctx = CHECK_NOTNULL(shape_refiner->GetNodeContext(enqueue_node));
}
auto it = resource_handles.find(enqueue_node);
if (it == resource_handles.end()) {
return absl::OkStatus();
}
const NodeDef* qnode = it->second;
auto qctx = shape_refiner->GetContext(qnode);
if (!qctx) {
return absl::OkStatus();
}
auto* queue_handle_data = qctx->output_handle_shapes_and_types(0);
std::vector<ShapeAndType> shapes_and_types;
for (int i = 1, end = ctx->input_types.size(); i < end; ++i) {
GraphView::InputPort inp(enqueue_node, i);
GraphView::OutputPort fanin = shape_refiner->graph().GetRegularFanin(inp);
InferenceContext* in = shape_refiner->GetContext(fanin.node);
ShapeHandle input = in->output(fanin.port_id);
ctx->inference_context->SetInput(i, input);
shapes_and_types.push_back({input, ctx->input_types[i]});
}
if (queue_handle_data == nullptr) {
qctx->set_output_handle_shapes_and_types(0, shapes_and_types);
*new_shapes = true;
} else {
TF_RETURN_IF_ERROR(RelaxEnqueueShapesAndMergeTypes(
shape_refiner, qnode, *queue_handle_data, &shapes_and_types));
*new_shapes |= !shape_refiner->EquivalentShapesAndTypes(*queue_handle_data,
shapes_and_types);
qctx->set_output_handle_shapes_and_types(0, shapes_and_types);
}
return absl::OkStatus();
}
Status GraphProperties::InferStatically(bool assume_valid_feeds,
bool aggressive_shape_inference,
bool include_input_tensor_values,
bool include_output_tensor_values) {
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item_.graph.library());
absl::flat_hash_map<string, absl::flat_hash_set<int>> fed_ports;
if (!assume_valid_feeds) {
for (const auto& feed : item_.feed) {
SafeTensorId tensor_id = ParseTensorName(feed.first);
fed_ports[tensor_id.node()].insert(tensor_id.index());
}
}
GraphView graph_view(&item_.graph);
absl::flat_hash_map<const NodeDef*,
std::pair<absl::flat_hash_set<const NodeDef*>,
absl::flat_hash_set<const NodeDef*>>>
resources;
absl::flat_hash_set<const NodeDef*> merge_nodes;
absl::flat_hash_set<const NodeDef*> fed_nodes;
absl::flat_hash_set<const NodeDef*> primary_inputs;
int num_loops = 0;
for (const NodeDef& node : item_.graph.node()) {
if (IsQueue(node)) {
for (const GraphView::InputPort& fanout :
graph_view.GetFanouts(node, false)) {
if (IsEnter(*fanout.node)) {
const NodeDef& enter = *fanout.node;
for (const GraphView::InputPort& fanout :
graph_view.GetFanouts(enter, false)) {
if (IsEnqueue(*fanout.node)) {
resources[&node].first.insert(fanout.node);
} else if (IsDequeue(*fanout.node)) {
resources[&node].second.insert(fanout.node);
}
}
} else {
if (IsEnqueue(*fanout.node)) {
resources[&node].first.insert(fanout.node);
} else if (IsDequeue(*fanout.node)) {
resources[&node].second.insert(fanout.node);
}
}
}
}
if (!HasRegularInputs(node)) {
primary_inputs.insert(&node);
} else if (IsMerge(node)) {
merge_nodes.insert(&node);
} else if (IsNextIteration(node)) {
++num_loops;
}
if (fed_ports.find(node.name()) != fed_ports.end()) {
fed_nodes.insert(&node);
}
}
absl::flat_hash_map<const NodeDef*, const NodeDef*> resource_handles;
std::vector<TopologicalDependency> extra_deps;
for (const auto& resource : resources) {
for (const NodeDef* src : resource.second.first) {
resource_handles[src] = resource.first;
for (const NodeDef* dst : resource.second.second) {
extra_deps.emplace_back(src, dst);
}
}
}
std::vector<const NodeDef*> topo_order;
Status s = ComputeTopologicalOrder(item_.graph, extra_deps, &topo_order);
if (!s.ok()) {
if (extra_deps.empty()) {
return s;
} else {
TF_RETURN_IF_ERROR(ComputeTopologicalOrder(item_.graph, &topo_order));
}
}
auto refiner = std::make_unique<SymbolicShapeRefiner>(
graph_view, fed_ports, aggressive_shape_inference);
TopoQueue new_shapes(topo_order);
for (const NodeDef* node : primary_inputs) {
new_shapes.push(node);
}
for (const NodeDef* node : fed_nodes) {
new_shapes.push(node);
}
TF_RETURN_IF_ERROR(
PropagateShapes(refiner.get(), &new_shapes, resource_handles, num_loops));
std::unique_ptr<SymbolicShapeManager> shape_manager =
std::make_unique<SymbolicShapeManager>();
bool found_error = false;
for (const NodeDef& node : item_.graph.node()) {
auto node_ctx = refiner->GetContext(&node);
if (!node_ctx) {
continue;
}
if (fed_ports.find(node.name()) != fed_ports.end()) {
VLOG(2) << "Skipping feed node shape: " << node.name();
continue;
}
for (const auto& merged_shapes : node_ctx->MergedShapes()) {
if (!shape_manager->Merge(merged_shapes.first, merged_shapes.second)
.ok()) {
found_error = true;
break;
}
}
for (const auto& merged_dims : node_ctx->MergedDims()) {
if (!shape_manager->Merge(merged_dims.first, merged_dims.second).ok()) {
found_error = true;
break;
}
}
if (found_error) {
shape_manager = std::make_unique<SymbolicShapeManager>();
break;
}
}
TF_RETURN_IF_ERROR(ValidateSymbolicShapeManager(item_.graph, refiner.get(),
shape_manager.get()));
for (const NodeDef& node : item_.graph.node()) {
VLOG(4) << "Filling in graph properties for node: " << node.name();
auto ctx = refiner->GetNodeContext(&node);
if (!ctx) {
continue;
}
auto* ic = ctx->inference_context.get();
{
auto& input_properties = input_properties_[node.name()];
CHECK_EQ(input_properties.size(), 0);
input_properties.resize(ic->num_inputs());
GraphView::InputPort input(&node, -1);
for (int i = 0; i < ic->num_inputs(); ++i) {
shape_manager->AsTensorProperties(ic->input(i), ctx->input_types[i],
&input_properties[i]);
input.port_id = i;
GraphView::OutputPort fanin = graph_view.GetRegularFanin(input);
if (include_input_tensor_values) {
if (IsConstant(*fanin.node)) {
const TensorProto& raw_val =
fanin.node->attr().at("value").tensor();
*input_properties[i].mutable_value() = raw_val;
} else if (static_cast<int>(ctx->input_tensor_protos.size()) > i &&
ctx->input_tensor_protos[i] != nullptr) {
*input_properties[i].mutable_value() = *ctx->input_tensor_protos[i];
} else if (static_cast<int>(ic->input_tensors_as_shapes().size()) >
i &&
IsShapeFullyDefinedIntegerVectorOrScalar(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i])) {
*input_properties[i].mutable_value() = MakeTensorProtoFromShape(
ic, ic->input(i), ic->input_tensors_as_shapes()[i],
ctx->input_types[i]);
}
}
}
}
{
auto& output_properties = output_properties_[node.name()];
CHECK_EQ(output_properties.size(), 0);
output_properties.resize(ic->num_outputs());
for (int i = 0; i < ic->num_outputs(); ++i) {
shape_manager->AsTensorProperties(ic->output(i), ctx->output_types[i],
&output_properties[i]);
auto converted_output_tensors_as_shapes =
ReplaceUnknownDimFromConstWithUnknownDim(
ic, ctx->output_tensors_as_shapes);
if (include_output_tensor_values) {
if (IsConstant(node)) {
const TensorProto& raw_val = node.attr().at("value").tensor();
*output_properties[i].mutable_value() = raw_val;
} else if (static_cast<int>(ctx->output_tensor_protos.size()) > i &&
ctx->output_tensor_protos[i] != nullptr) {
*output_properties[i].mutable_value() =
*ctx->output_tensor_protos[i];
} else if (static_cast<int>(
converted_output_tensors_as_shapes.size()) > i &&
IsShapeFullyDefinedIntegerVectorOrScalar(
ic, ic->output(i),
converted_output_tensors_as_shapes[i],
ctx->output_types[i])) {
*output_properties[i].mutable_value() = MakeTensorProtoFromShape(
ic, ic->output(i), converted_output_tensors_as_shapes[i],
ctx->output_types[i]);
}
}
}
}
if (aggressive_shape_inference && ctx->shape_incompatible)
incompatible_shape_nodes_.insert(node.name());
}
if (aggressive_shape_inference && !incompatible_shape_nodes_.empty())
LOG(WARNING) << incompatible_shape_nodes_.size()
<< " nodes have incompatible output shapes.";
VerboseLogUnknownDimensionSources(item_.graph, input_properties_,
output_properties_);
TF_RETURN_IF_ERROR(VerboseShapeInferenceLogging(item_.graph, refiner.get(),
shape_manager.get()));
return absl::OkStatus();
}
Status GraphProperties::InferDynamically(Cluster* cluster) {
TF_RETURN_IF_ERROR(cluster->Initialize(item_));
RunMetadata metadata;
TF_RETURN_IF_ERROR(
cluster->Run(item_.graph, item_.feed, item_.fetch, &metadata));
return InferFromCostGraph(metadata.cost_graph());
}
Status GraphProperties::AnnotateOutputShapes(GraphDef* output_graph_def) const {
*output_graph_def = item_.graph;
for (int i = 0; i < output_graph_def->node_size(); i++) {
auto node = output_graph_def->mutable_node(i);
AttrValue attr_output_shape;
auto tensor_properties = GetOutputProperties(node->name());
for (const auto& tensor_property : tensor_properties) {
TensorShapeProto* proto = attr_output_shape.mutable_list()->add_shape();
*proto = tensor_property.shape();
NormalizeShapeForOutput(proto);
}
(*node->mutable_attr())["_output_shapes"] = std::move(attr_output_shape);
}
return absl::OkStatus();
}
Status GraphProperties::InferFromCostGraph(const CostGraphDef& cost_graph) {
if (cost_graph.node_size() == 0) {
LOG(WARNING) << "cost_graph is empty: nothing can be inferred!";
}
std::unordered_map<string, const CostGraphDef::Node*> name_to_cost;
std::unordered_map<string, const NodeDef*> name_to_node;
for (auto& node : cost_graph.node()) {
name_to_cost[node.name()] = &node;
std::vector<OpInfo::TensorProperties> output_properties;
for (const auto& out : node.output_info()) {
OpInfo::TensorProperties properties;
properties.set_dtype(out.dtype());
*properties.mutable_shape() = out.shape();
output_properties.push_back(properties);
}
output_properties_[node.name()] = output_properties;
}
for (const auto& node : item_.graph.node()) {
auto it = name_to_cost.find(node.name());
if (it == name_to_cost.end()) {
continue;
}
std::vector<OpInfo::TensorProperties> inputs =
FindInputFeatures(node, name_to_cost, name_to_node);
input_properties_[node.name()] = inputs;
}
return absl::OkStatus();
}
bool GraphProperties::HasInputProperties(const string& node_name) const {
return input_properties_.find(node_name) != input_properties_.end();
}
bool GraphProperties::HasOutputProperties(const string& node_name) const {
return output_properties_.find(node_name) != output_properties_.end();
}
const std::vector<OpInfo::TensorProperties>&
GraphProperties::GetInputProperties(const string& node_name) const {
auto it = input_properties_.find(node_name);
if (it != input_properties_.end()) {
return it->second;
}
return missing_properties_;
}
const std::vector<OpInfo::TensorProperties>&
GraphProperties::GetOutputProperties(const string& node_name) const {
auto it = output_properties_.find(node_name);
if (it != output_properties_.end()) {
return it->second;
}
return missing_properties_;
}
void GraphProperties::ClearInputProperties(const string& node_name) {
input_properties_.erase(node_name);
}
void GraphProperties::ClearOutputProperties(const string& node_name) {
output_properties_.erase(node_name);
}
}
} | #include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#ifdef INTEL_MKL
#include "tensorflow/core/graph/mkl_graph_util.h"
#endif
namespace tensorflow {
namespace grappler {
namespace {
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
const char kTestDataPath[] = "core/grappler/costs/graph_properties_testdata";
REGISTER_OP("TestOpWithNoInferenceFn")
.Input("x: float")
.Output("y: float")
.Doc(R"doc(
Test op with no Inference Function registered.
x: input
y: output
)doc");
class GraphPropertiesTest : public ::testing::Test {
public:
void SetUp() override {
cluster_.reset(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster_->Provision());
auto f = FunctionDefHelper::Create(
"MyFillFunc",
{"shape: int32", "value: float"},
{"out: float"},
{},
{
{{"a"},
"Fill",
{"shape", "value"},
{{"T", DataType::DT_FLOAT}, {"index_type", DataType::DT_INT32}}},
},
{{"out", "a:output:0"}});
function_lib_.add_function()->Swap(&f);
}
void TearDown() override {
TF_ASSERT_OK(cluster_->Shutdown());
cluster_.reset();
}
protected:
string PropToString(const OpInfo::TensorProperties& p) {
string s = strings::StrCat(DataTypeString(p.dtype()), ": ");
if (p.shape().unknown_rank()) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, "[");
for (int i = 0; i < p.shape().dim_size(); ++i) {
strings::StrAppend(&s, i == 0 ? "" : ",",
std::max<int64_t>(p.shape().dim(i).size(), -1));
}
strings::StrAppend(&s, "]");
}
return s;
}
void ExpectTensorValues(const std::vector<int64_t>& expected,
const TensorProto& tensor_proto_to_compare) {
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(tensor_proto_to_compare));
EXPECT_EQ(expected.size(), tensor.NumElements());
ASSERT_TRUE(tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT64);
if (tensor.dtype() == DT_INT32) {
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<int32>()(i));
}
} else {
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<int64_t>()(i));
}
}
}
void ExpectFloatTensorValues(const std::vector<float>& expected,
const TensorProto& tensor_proto_to_compare) {
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(tensor_proto_to_compare));
EXPECT_EQ(expected.size(), tensor.NumElements());
ASSERT_EQ(tensor.dtype(), DT_FLOAT);
for (int i = 0; i < tensor.NumElements(); i++) {
EXPECT_EQ(expected[i], tensor.flat<float>()(i));
}
}
std::unique_ptr<SingleMachine> cluster_;
FunctionDefLibrary function_lib_;
};
TEST_F(GraphPropertiesTest, StaticProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(1, properties.GetInputProperties(node.name()).size());
const auto props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(10, prop.shape().dim(0).size());
EXPECT_EQ(1, prop.shape().dim(1).size());
} else if (node.op() == "AddN") {
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ(DT_FLOAT, in_prop.dtype());
EXPECT_FALSE(in_prop.shape().unknown_rank());
EXPECT_EQ(2, in_prop.shape().dim_size());
EXPECT_EQ(10, in_prop.shape().dim(0).size());
EXPECT_EQ(1, in_prop.shape().dim(1).size());
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
EXPECT_EQ(in_prop.dtype(), out_props[0].dtype());
EXPECT_EQ(in_prop.shape().DebugString(),
out_props[0].shape().DebugString());
}
}
}
TEST_F(GraphPropertiesTest, ClearProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(1, properties.GetInputProperties(node.name()).size());
const auto props = properties.GetOutputProperties(node.name());
properties.ClearOutputProperties(node.name());
const auto cleared_props = properties.GetOutputProperties(node.name());
EXPECT_TRUE(cleared_props.empty());
} else if (node.op() == "AddN") {
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
properties.ClearInputProperties(node.name());
const auto cleared_props = properties.GetInputProperties(node.name());
EXPECT_TRUE(cleared_props.empty());
}
}
}
TEST_F(GraphPropertiesTest, Clear) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
EXPECT_TRUE(properties.has_properties());
properties.Clear();
EXPECT_FALSE(properties.has_properties());
}
TEST_F(GraphPropertiesTest, DynamicProperties) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
GraphProperties properties(item);
TF_ASSERT_OK(cluster_->Initialize(item));
Status s = properties.InferDynamically(cluster_.get());
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "RandomStandardNormal") {
EXPECT_EQ(0, properties.GetInputProperties(node.name()).size());
} else if (node.op() == "AddN") {
if (node.name() == "AddN") {
const auto props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_INVALID, prop.dtype());
EXPECT_TRUE(prop.shape().unknown_rank());
} else {
const auto props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(10, prop.shape().dim(0).size());
EXPECT_EQ(1, prop.shape().dim(1).size());
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
string prop_str;
::tensorflow::protobuf::TextFormat::PrintToString(prop, &prop_str);
string out_prop_str;
::tensorflow::protobuf::TextFormat::PrintToString(out_props[0],
&out_prop_str);
EXPECT_EQ(prop_str, out_prop_str);
}
}
}
}
REGISTER_OP("DetectInputValueInShapeInferenceOp")
.Input("a: T")
.Output("o: T")
.Attr("T: {numbertype, bool}")
.SetShapeFn([](shape_inference::InferenceContext* c) {
if (c->input_tensor(0)) {
c->set_output(0, c->Matrix(10, 10));
return absl::OkStatus();
}
return shape_inference::UnknownShape(c);
});
class ConstTensorSkipTestCase {
public:
ConstTensorSkipTestCase(const DataType data_type,
const std::vector<int64_t> shape, const double value,
const bool expected)
: data_type_(data_type),
shape_(shape),
value_(value),
expected_(expected) {}
void RunTestAndValidate() const {
LOG(INFO) << "Run Const tensor skip test: "
<< "data_type: " << data_type_ << ", shape: {"
<< absl::StrJoin(shape_, ",") << "}, value: " << value_
<< ", expected: " << expected_;
GrapplerItem item;
const absl::Span<const int64_t> shape_array_slice(shape_);
Tensor const_tensor_value(data_type_, TensorShape(shape_array_slice));
switch (data_type_) {
case DT_INT32:
test::FillIota<int32>(&const_tensor_value, static_cast<int32>(value_));
break;
case DT_INT64:
test::FillIota<int64_t>(&const_tensor_value,
static_cast<int64_t>(value_));
break;
case DT_FLOAT:
test::FillIota<float>(&const_tensor_value, static_cast<float>(value_));
break;
case DT_DOUBLE:
test::FillIota<double>(&const_tensor_value,
static_cast<double>(value_));
break;
case DT_BFLOAT16:
test::FillIota<Eigen::bfloat16>(&const_tensor_value,
static_cast<Eigen::bfloat16>(value_));
break;
default:
CHECK(false) << "Unsupported data type (" << data_type_
<< ") in this test.";
break;
}
TF_ASSERT_OK(NodeDefBuilder("const", "Const")
.Attr("dtype", data_type_)
.Attr("value", const_tensor_value)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("const_identity", "Identity")
.Attr("dtype", data_type_)
.Input("const", 0, data_type_)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("detect", "DetectInputValueInShapeInferenceOp")
.Attr("T", data_type_)
.Input("const_identity", 0, data_type_)
.Finalize(item.graph.add_node()));
item.fetch.push_back("const");
item.fetch.push_back("const_identity");
item.fetch.push_back("detect");
GraphProperties graph_properties(item);
TF_ASSERT_OK(graph_properties.InferStatically(false));
const auto& const_output = graph_properties.GetOutputProperties("const");
EXPECT_EQ(1, const_output.size());
const OpInfo::TensorProperties& const_output0 = const_output[0];
const auto& const_identity_input =
graph_properties.GetInputProperties("const_identity");
EXPECT_EQ(1, const_identity_input.size());
const OpInfo::TensorProperties& const_identity_input0 =
const_identity_input[0];
const auto& const_identity_output =
graph_properties.GetOutputProperties("const_identity");
EXPECT_EQ(1, const_identity_output.size());
const OpInfo::TensorProperties& const_identity_output0 =
const_identity_output[0];
EXPECT_TRUE(const_output0.has_value());
EXPECT_TRUE(const_identity_input0.has_value());
EXPECT_TRUE(const_identity_output0.has_value());
const auto& detect_input = graph_properties.GetInputProperties("detect");
EXPECT_EQ(1, detect_input.size());
const OpInfo::TensorProperties& detect_input0 = detect_input[0];
const auto& detect_output = graph_properties.GetOutputProperties("detect");
EXPECT_EQ(1, detect_output.size());
const OpInfo::TensorProperties& detect_output0 = detect_output[0];
EXPECT_TRUE(const_output0.has_value());
EXPECT_TRUE(const_identity_input0.has_value());
EXPECT_TRUE(const_identity_output0.has_value());
EXPECT_TRUE(detect_input0.has_value());
if (expected_) {
EXPECT_EQ(detect_output0.shape().dim_size(), 2);
EXPECT_EQ(detect_output0.shape().dim(0).size(), 10);
EXPECT_EQ(detect_output0.shape().dim(1).size(), 10);
} else {
EXPECT_TRUE(detect_output0.shape().unknown_rank());
}
}
private:
DataType data_type_;
std::vector<int64_t> shape_;
double value_;
bool expected_;
};
TEST_F(GraphPropertiesTest, SkipInstantiatingConstTensor) {
std::vector<ConstTensorSkipTestCase> test_cases = {
{DT_INT32, {16, 8}, 1, true},
{DT_INT32, {1, 129}, 2, false},
{DT_INT64, {8, 8}, 3, true},
{DT_INT64, {128, 2}, 0, false},
{DT_FLOAT, {16, 8}, 1.0, true},
{DT_FLOAT, {16, 8}, 1.3, true},
{DT_FLOAT, {1, 129}, 0.7, false},
{DT_DOUBLE, {16, 8}, 1.0, true},
{DT_DOUBLE, {16, 8}, 1.3, true},
{DT_DOUBLE, {1, 129}, 0.7, false},
{DT_BFLOAT16, {16, 8}, 1.0, true},
{DT_BFLOAT16, {16, 8}, 1.3, true},
{DT_BFLOAT16, {1, 129}, 0.7, false},
};
for (const auto& test_case : test_cases) {
test_case.RunTestAndValidate();
}
}
TEST_F(GraphPropertiesTest, Variables) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "Variable")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
item.fetch.push_back("Var");
Tensor initial_val(DT_FLOAT, TensorShape({3, 7}));
test::FillIota<float>(&initial_val, 0);
TF_ASSERT_OK(NodeDefBuilder("InitialVal", "Const")
.Attr("dtype", DT_FLOAT)
.Attr("value", initial_val)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("InitVar", "Assign")
.Input("Var", 0, DT_FLOAT_REF)
.Input("InitialVal", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
item.init_ops.push_back("InitVar");
{
GraphProperties static_properties(item);
TF_ASSERT_OK(static_properties.InferStatically(false));
const auto props = static_properties.GetOutputProperties("Var");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT_REF, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
{
TF_ASSERT_OK(cluster_->Initialize(item));
GraphProperties dynamic_properties(item);
TF_ASSERT_OK(dynamic_properties.InferDynamically(cluster_.get()));
const auto props = dynamic_properties.GetOutputProperties("Var");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT_REF, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
}
TEST_F(GraphPropertiesTest, ReadVariableOpAfterEnter) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "VarHandleOp")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("Enter", "Enter")
.Attr("T", DT_RESOURCE)
.Attr("frame_name", "while_context")
.Attr("is_constant", true)
.Attr("parallel_iterations", 10)
.Input("Var", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("ReadVariableOpAfterEnter", "ReadVariableOp")
.Attr("dtype", DT_FLOAT)
.Input("Enter", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props = properties.GetOutputProperties("ReadVariableOpAfterEnter");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
TEST_F(GraphPropertiesTest, VarHandles) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Var", "VarHandleOp")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({3, 7}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("VarRead", "ReadVariableOp")
.Attr("dtype", DT_FLOAT)
.Input("Var", 0, DT_RESOURCE)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props = properties.GetOutputProperties("VarRead");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_FALSE(prop.shape().unknown_rank());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ(3, prop.shape().dim(0).size());
EXPECT_EQ(7, prop.shape().dim(1).size());
}
TEST_F(GraphPropertiesTest, WhileLoopWithVarHandleOpInput) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"while_loop_var_handle_op.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> resource_nodes{
"loop_var", "while/Enter", "while/Merge", "while/Switch",
"while/Identity", "while/NextIteration", "while/Exit"};
for (const string& node : resource_nodes) {
const auto props = properties.GetOutputProperties(node);
EXPECT_GE(props.size(), 1);
EXPECT_EQ("resource: []", PropToString(props[0]));
}
const auto props = properties.GetOutputProperties("while/ReadVariableOp");
EXPECT_EQ(1, props.size());
EXPECT_EQ("int32: []", PropToString(props[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_NoShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT});
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: ?", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_ShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT},
ops::FIFOQueue::Attrs().Shapes({{3, 7, 1}}));
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7,1]", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, QueueWithOnlyDequeue_PartialShapeAttr) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT},
ops::FIFOQueue::Attrs().Shapes({{3, 7, -1}}));
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7,-1]", PropToString(props1[0]));
}
TEST_F(GraphPropertiesTest, Queues) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q1 = ops::FIFOQueue(root.WithOpName("Queue1"), {DataType::DT_FLOAT});
Output rnd =
ops::RandomNormal(root.WithOpName("rnd"), {3, 7}, DataType::DT_FLOAT);
Output square1 = ops::Square(root.WithOpName("Square1"), rnd);
auto enqueue1 = ops::QueueEnqueue(root.WithOpName("Enqueue1"), q1, {square1});
auto dequeue1 =
ops::QueueDequeue(root.WithOpName("Dequeue1"), q1, {DataType::DT_FLOAT});
auto q2 =
ops::RandomShuffleQueue(root.WithOpName("Queue2"), {DataType::DT_FLOAT});
Output square2 = ops::Square(root.WithOpName("Square2"), dequeue1[0]);
auto enqueue2 = ops::QueueEnqueue(root.WithOpName("Enqueue2"), q2, {square2});
auto dequeue2 =
ops::QueueDequeue(root.WithOpName("Dequeue2"), q2, {DataType::DT_FLOAT});
auto q4 =
ops::RandomShuffleQueue(root.WithOpName("Queue4"), {DataType::DT_FLOAT});
auto enqueue4 = ops::QueueEnqueue(root.WithOpName("Enqueue4"), q4, {square2});
auto enqueue4_2 =
ops::QueueEnqueue(root.WithOpName("Enqueue4_2"), q4, {dequeue2[0]});
auto dequeue4 =
ops::QueueDequeue(root.WithOpName("Dequeue4"), q4, {DataType::DT_FLOAT});
auto q5 = ops::RandomShuffleQueue(
root.WithOpName("Queue5"),
{DataType::DT_FLOAT, DataType::DT_DOUBLE, DataType::DT_FLOAT});
Output rnd2 =
ops::RandomNormal(root.WithOpName("rnd2"), {10}, DataType::DT_DOUBLE);
Output rnd3 =
ops::RandomNormal(root.WithOpName("rnd3"), {1, 2, 3}, DataType::DT_FLOAT);
auto enqueue5 =
ops::QueueEnqueue(root.WithOpName("Enqueue5"), q5, {rnd, rnd2, rnd3});
auto dequeue5 = ops::QueueDequeue(
root.WithOpName("Dequeue5"), q5,
{DataType::DT_FLOAT, DataType::DT_DOUBLE, DataType::DT_FLOAT});
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props1 = properties.GetOutputProperties("Dequeue1");
ASSERT_EQ(1, props1.size());
EXPECT_EQ("float: [3,7]", PropToString(props1[0]));
const auto props2 = properties.GetOutputProperties("Dequeue2");
ASSERT_EQ(1, props2.size());
EXPECT_EQ("float: [3,7]", PropToString(props2[0]));
const auto props4 = properties.GetOutputProperties("Dequeue4");
ASSERT_EQ(1, props4.size());
EXPECT_EQ("float: [3,7]", PropToString(props4[0]));
const auto props5 = properties.GetOutputProperties("Dequeue5");
ASSERT_EQ(3, props5.size());
EXPECT_EQ("float: [3,7]", PropToString(props5[0]));
EXPECT_EQ("double: [10]", PropToString(props5[1]));
EXPECT_EQ("float: [1,2,3]", PropToString(props5[2]));
}
TEST_F(GraphPropertiesTest, MergeWithoutLoops) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"merge_without_loops.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> nodes{"cond/Merge", "cond/concat", "cond/concat_1"};
std::vector<string> expected_outputs{"float: [-1,-1,1]", "float: [2,1,1]",
"float: [1,2,1]"};
for (int i = 0; i < nodes.size(); i++) {
const auto props = properties.GetOutputProperties(nodes[i]);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(expected_outputs[i], PropToString(prop));
}
const auto props = properties.GetInputProperties("Less");
EXPECT_EQ(2, props.size());
for (int i = 0; i < props.size(); ++i) {
EXPECT_EQ(DT_INT32, props[i].dtype());
EXPECT_TRUE(props[i].has_value());
EXPECT_EQ("int32: []", PropToString(props[i]));
}
}
TEST_F(GraphPropertiesTest, WhileLoop) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"while_loop.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
for (const string& node : nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,2]", PropToString(prop));
}
auto shape_in = properties.GetOutputProperties("ones").at(0).shape();
auto shape_out = properties.GetOutputProperties("while/Exit_1").at(0).shape();
EXPECT_GE(-2, shape_in.dim(0).size());
EXPECT_GE(-2, shape_out.dim(0).size());
EXPECT_NE(shape_in.dim(0).size(), shape_out.dim(0).size());
}
TEST_F(GraphPropertiesTest, NestedLoop) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"nested_loop.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> outer_nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
std::vector<string> inner_nodes{"while/while/Merge_1",
"while/while/NextIteration_1",
"while/while/Exit_1"};
for (const string& node : outer_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,1,1]", PropToString(prop));
}
for (const string& node : inner_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,1,-1]", PropToString(prop));
}
}
TEST_F(GraphPropertiesTest, LoopsAndQueues) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"loops_and_queues.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> outer_nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
std::vector<string> inner_nodes{"while/while/Merge_1",
"while/while/NextIteration_1",
"while/while/Exit_1"};
for (const string& node : outer_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [1,1,-1]", PropToString(prop));
}
for (const string& node : inner_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,1,-1]", PropToString(prop));
}
}
TEST_F(GraphPropertiesTest, LoopsAndResourceVars) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"loops_and_resource_vars.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> outer_nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
std::vector<string> inner_nodes{"while/while/Merge_1",
"while/while/NextIteration_1",
"while/while/Exit_1"};
for (const string& node : outer_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_INT32, prop.dtype());
EXPECT_EQ("int32: []", PropToString(prop));
}
for (const string& node : inner_nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_INT32, prop.dtype());
EXPECT_EQ("int32: []", PropToString(prop));
}
}
TEST_F(GraphPropertiesTest, QueuesAndLoops) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"queues_and_loops.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
std::vector<string> nodes{"while/Merge_1", "while/NextIteration_1",
"while/Exit_1"};
for (const string& node : nodes) {
const auto props = properties.GetOutputProperties(node);
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,2]", PropToString(prop));
}
const auto props = properties.GetOutputProperties("concat");
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [-1,4]", PropToString(prop));
}
TEST_F(GraphPropertiesTest, InferRestoreOpShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var = ops::Variable(s.WithOpName("var"), TensorShape({128, 256}),
DataType::DT_FLOAT);
Output filename =
ops::Const(s.WithOpName("filename"), string("model"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("a"), TensorShape());
Output restore = ops::Restore(s.WithOpName("restore"), filename, tensor_name,
DataType::DT_FLOAT);
Output init_restore = ops::Assign(s.WithOpName("init_restore"), var, restore);
Output shape_and_slice = ops::Const(s.WithOpName("shape_and_slice"),
string("256 256 0,128:-"), TensorShape());
Output restore_slice =
ops::RestoreSlice(s.WithOpName("restore_slice"), filename, tensor_name,
shape_and_slice, DataType::DT_FLOAT);
Output init_restore_slice =
ops::Assign(s.WithOpName("init_restore_slice"), var, restore_slice);
Output restore_v2 =
ops::RestoreSlice(s.WithOpName("restore_v2"), filename, tensor_name,
shape_and_slice, DataType::DT_FLOAT);
Output init_restore_v2 =
ops::Assign(s.WithOpName("init_restore_v2"), var, restore_v2);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("init_restore");
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto restore_props = properties.GetOutputProperties("restore");
const OpInfo::TensorProperties& restore_prop = restore_props[0];
EXPECT_EQ(DT_FLOAT, restore_prop.dtype());
EXPECT_EQ("float: [128,256]", PropToString(restore_prop));
const auto restore_slice_props =
properties.GetOutputProperties("restore_slice");
const OpInfo::TensorProperties& restore_slice_prop = restore_slice_props[0];
EXPECT_EQ(DT_FLOAT, restore_slice_prop.dtype());
EXPECT_EQ("float: [128,256]", PropToString(restore_slice_prop));
const auto restorev2_props = properties.GetOutputProperties("restore_v2");
const OpInfo::TensorProperties& restorev2_prop = restorev2_props[0];
EXPECT_EQ(DT_FLOAT, restorev2_prop.dtype());
EXPECT_EQ("float: [128,256]", PropToString(restorev2_prop));
const auto input_props = properties.GetInputProperties("init_restore");
ASSERT_EQ(2, input_props.size());
const OpInfo::TensorProperties& input_prop = input_props[1];
EXPECT_EQ(DT_FLOAT, input_prop.dtype());
EXPECT_EQ("float: [128,256]", PropToString(input_prop));
}
TEST_F(GraphPropertiesTest, InferRestoreOpShape_WithTwoNodesShareSameOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var = ops::Variable(s.WithOpName("var"), PartialTensorShape(),
DataType::DT_FLOAT);
Output var2 = ops::Variable(s.WithOpName("var2"), TensorShape({128, 256}),
DataType::DT_FLOAT);
Output filename =
ops::Const(s.WithOpName("filename"), string("model"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("a"), TensorShape());
Output restore = ops::Restore(s.WithOpName("restore"), filename, tensor_name,
DataType::DT_FLOAT);
Output init = ops::Assign(s.WithOpName("init"), var, restore);
Output init2 = ops::Assign(s.WithOpName("init2"), var2, restore);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("init");
item.fetch.push_back("init2");
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto props = properties.GetOutputProperties("restore");
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ("float: [128,256]", PropToString(prop));
}
TEST_F(GraphPropertiesTest, TensorAsShapesPropagation) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), {5, 7}, {2});
Output a1 = ops::Identity(s.WithOpName("a1"), a);
Output b = ops::Const(s.WithOpName("b"), 99, {});
Output b1 = ops::Identity(s.WithOpName("b1"), b);
Output c = ops::Const(s.WithOpName("c"), 1, {4, 4, 4});
Output c1 = ops::Identity(s.WithOpName("c1"), c);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
EXPECT_EQ("int32: [2]", PropToString(properties.GetOutputProperties("a")[0]));
EXPECT_EQ("int32: [2]",
PropToString(properties.GetOutputProperties("a1")[0]));
EXPECT_EQ("int32: []", PropToString(properties.GetOutputProperties("b")[0]));
EXPECT_EQ("int32: []", PropToString(properties.GetOutputProperties("b1")[0]));
EXPECT_EQ("int32: [4,4,4]",
PropToString(properties.GetOutputProperties("c")[0]));
EXPECT_EQ("int32: [4,4,4]",
PropToString(properties.GetOutputProperties("c1")[0]));
EXPECT_TRUE(properties.GetOutputProperties("a")[0].has_value());
EXPECT_TRUE(properties.GetInputProperties("a1")[0].has_value());
EXPECT_TRUE(properties.GetOutputProperties("a1")[0].has_value());
EXPECT_TRUE(properties.GetOutputProperties("b")[0].has_value());
EXPECT_TRUE(properties.GetInputProperties("b1")[0].has_value());
EXPECT_TRUE(properties.GetOutputProperties("b1")[0].has_value());
EXPECT_TRUE(properties.GetOutputProperties("c")[0].has_value());
EXPECT_TRUE(properties.GetInputProperties("c1")[0].has_value());
EXPECT_TRUE(properties.GetOutputProperties("c1")[0].has_value());
ExpectTensorValues({5, 7}, properties.GetOutputProperties("a")[0].value());
ExpectTensorValues({5, 7}, properties.GetInputProperties("a1")[0].value());
ExpectTensorValues({5, 7}, properties.GetOutputProperties("a1")[0].value());
ExpectTensorValues({99}, properties.GetOutputProperties("b")[0].value());
ExpectTensorValues({99}, properties.GetInputProperties("b1")[0].value());
ExpectTensorValues({99}, properties.GetOutputProperties("b1")[0].value());
std::vector<int64_t> c_values;
for (int i = 0; i < 4 * 4 * 4; i++) {
c_values.push_back(1);
}
ExpectTensorValues({c_values},
properties.GetOutputProperties("c")[0].value());
ExpectTensorValues({c_values},
properties.GetInputProperties("c1")[0].value());
ExpectTensorValues({c_values},
properties.GetOutputProperties("c1")[0].value());
}
TEST_F(GraphPropertiesTest, IdentityPassingShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 5, {2});
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Const(s.WithOpName("const"), 0.1f, {});
Output d = ops::Fill(s.WithOpName("fill"), b, c);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("fill");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [5,5]", PropToString(out_prop0));
}
TEST_F(GraphPropertiesTest, SkippingValueInferenceForLargeTensors) {
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 4, {2});
Output b = ops::Const(s.WithOpName("const"), 7, {});
Output c = ops::Fill(s.WithOpName("fill"), a, b);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto out_props = properties.GetOutputProperties("fill");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("int32: [4,4]", PropToString(out_prop0));
EXPECT_TRUE(out_prop0.has_value());
}
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1000, {4});
Output b = ops::Const(s.WithOpName("const"), 7, {});
Output c = ops::Fill(s.WithOpName("fill"), a, b);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto out_props = properties.GetOutputProperties("fill");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("int32: [1000,1000,1000,1000]", PropToString(out_prop0));
EXPECT_FALSE(out_prop0.has_value());
}
}
TEST_F(GraphPropertiesTest, PackWithConstInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {});
Output b = ops::Const(s.WithOpName("b"), 2, {});
Output c = ops::Const(s.WithOpName("c"), 3, {});
Output d = ops::Const(s.WithOpName("d"), 4, {});
Output e = ops::Stack(s.WithOpName("pack"), {a, b, c, d});
Output f = ops::Const(s.WithOpName("const"), 0.1f, {});
Output g = ops::Fill(s.WithOpName("fill"), e, f);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("fill");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,2,3,4]", PropToString(out_prop0));
}
TEST_F(GraphPropertiesTest, RankOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("Const"), 1, {4, 4, 4});
Output r = ops::Rank(s.WithOpName("Rank"), c);
Output i = ops::Identity(s.WithOpName("Identity"), r);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto rank_props = properties.GetOutputProperties("Rank");
const OpInfo::TensorProperties rank_prop0 = rank_props[0];
EXPECT_EQ("int32: []", PropToString(rank_prop0));
EXPECT_TRUE(rank_prop0.has_value());
ExpectTensorValues({3}, rank_prop0.value());
const auto identity_props = properties.GetOutputProperties("Identity");
const OpInfo::TensorProperties identity_props0 = identity_props[0];
EXPECT_EQ("int32: []", PropToString(identity_props0));
EXPECT_TRUE(identity_props0.has_value());
ExpectTensorValues({3}, identity_props0.value());
}
TEST_F(GraphPropertiesTest, SizeOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("Const"), 1, {1, 2, 3, 4});
Output r = ops::Size(s.WithOpName("Size"), c);
Output i = ops::Identity(s.WithOpName("Identity"), r);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto size_props = properties.GetOutputProperties("Size");
const OpInfo::TensorProperties size_props0 = size_props[0];
EXPECT_EQ("int32: []", PropToString(size_props0));
EXPECT_TRUE(size_props0.has_value());
ExpectTensorValues({24}, size_props0.value());
const auto identity_props = properties.GetOutputProperties("Identity");
const OpInfo::TensorProperties identity_props0 = identity_props[0];
EXPECT_EQ("int32: []", PropToString(identity_props0));
EXPECT_TRUE(identity_props0.has_value());
ExpectTensorValues({24}, identity_props0.value());
}
TEST_F(GraphPropertiesTest, PackWithConstMinus1AndReshapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape0 = ops::Const(s.WithOpName("shape0"), 4, {});
Output shape1 = ops::Const(s.WithOpName("shape1"), -1, {});
Output pack = ops::Stack(s.WithOpName("pack"), {shape0, shape1});
Output x0_ = ops::Placeholder(s.WithOpName("x0_"), DataType::DT_FLOAT);
Output x1_ = ops::Placeholder(s.WithOpName("x1_"), DataType::DT_FLOAT);
Output x0 = ops::Reshape(s.WithOpName("x0"), x0_, pack);
Output x1 = ops::Reshape(s.WithOpName("x1"), x1_, pack);
Output s0 = ops::Const(s.WithOpName("s0"), true, {4, 16});
Output s1 = ops::Placeholder(s.WithOpName("s1"), DataType::DT_BOOL);
Output y0 = ops::Placeholder(s.WithOpName("y0"), DataType::DT_FLOAT);
Output y1 = ops::Placeholder(s.WithOpName("y1"), DataType::DT_FLOAT);
Output z0 = ops::SelectV2(s.WithOpName("z0"), s0, x0, y0);
Output z1 = ops::SelectV2(s.WithOpName("z1"), s1, x1, y1);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
auto* node = item.graph.mutable_node(i);
if (node->op() == "SelectV2") {
node->set_op("Select");
}
}
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
for (const auto& node_name : {"x0", "y0", "z0"}) {
const auto out_props = properties.GetOutputProperties(node_name);
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [4,16]", PropToString(out_prop0));
}
{
const auto out_props = properties.GetOutputProperties("s0");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("bool: [4,16]", PropToString(out_prop0));
}
for (const auto& node_name : {"x1", "y1", "z1"}) {
const auto out_props = properties.GetOutputProperties(node_name);
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [4,-1]", PropToString(out_prop0));
}
{
const auto out_props = properties.GetOutputProperties("s1");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("bool: ?", PropToString(out_prop0));
}
}
TEST_F(GraphPropertiesTest, PackWithIdentityInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a0 = ops::Const(s.WithOpName("a0"), 1, {});
Output b0 = ops::Const(s.WithOpName("b0"), 2, {});
Output c0 = ops::Const(s.WithOpName("c0"), 3, {});
Output d0 = ops::Const(s.WithOpName("d0"), 4, {});
Output a = ops::Identity(s.WithOpName("a"), a0);
Output b = ops::Identity(s.WithOpName("b"), b0);
Output c = ops::Identity(s.WithOpName("c"), c0);
Output d = ops::Identity(s.WithOpName("d"), d0);
Output e = ops::Stack(s.WithOpName("pack"), {a, b, c, d});
Output f = ops::Const(s.WithOpName("const"), 0.1f, {});
Output g = ops::Fill(s.WithOpName("fill"), e, f);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("fill");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,2,3,4]", PropToString(out_prop0));
}
TEST_F(GraphPropertiesTest, FunctionWithDtResourceInput) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_with_dt_resource_input.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props =
properties.GetOutputProperties("FunctionWithDtResourceInput");
EXPECT_EQ(out_props.size(), 2);
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,3]", PropToString(out_prop0));
const OpInfo::TensorProperties out_prop1 = out_props[1];
EXPECT_EQ("float: [1,3]", PropToString(out_prop1));
}
{
for (int i = 0; i < item.graph.node_size(); i++) {
auto* node = item.graph.mutable_node(i);
if (node->name() == "y") {
node->mutable_attr()->erase("_handle_dtypes");
node->mutable_attr()->erase("_handle_shapes");
break;
}
}
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props =
properties.GetOutputProperties("FunctionWithDtResourceInput");
EXPECT_EQ(out_props.size(), 2);
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: ?", PropToString(out_prop0));
const OpInfo::TensorProperties out_prop1 = out_props[1];
EXPECT_EQ("float: [1,3]", PropToString(out_prop1));
}
}
TEST_F(GraphPropertiesTest, FunctionWithConstInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(function_lib_));
Output shape = ops::Const(s.WithOpName("shape"), {1, 2, 3, 4});
Output value = ops::Const(s.WithOpName("value"), 0.1f, {});
auto builder = tensorflow::NodeBuilder("MyFillFunc", "MyFillFunc",
s.graph()->op_registry());
tensorflow::Node* func_op;
auto _shape = tensorflow::ops::AsNodeOut(s, shape);
auto _value = tensorflow::ops::AsNodeOut(s, value);
TF_ASSERT_OK(
builder.Input(_shape).Input(_value).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyFillFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,2,3,4]", PropToString(out_prop0));
}
TEST_F(GraphPropertiesTest, FunctionWithIdentityOfConstInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(function_lib_));
Output shape_ = ops::Const(s.WithOpName("shape_"), {1, 2, 3, 4});
Output shape = ops::Identity(s.WithOpName("shape"), shape_);
Output value = ops::Const(s.WithOpName("value"), 0.1f, {});
auto builder = tensorflow::NodeBuilder("MyFillFunc", "MyFillFunc",
s.graph()->op_registry());
tensorflow::Node* func_op;
auto _shape = tensorflow::ops::AsNodeOut(s, shape);
auto _value = tensorflow::ops::AsNodeOut(s, value);
TF_ASSERT_OK(
builder.Input(_shape).Input(_value).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyFillFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,2,3,4]", PropToString(out_prop0));
}
TEST_F(GraphPropertiesTest, FunctionReturnTensorValue) {
FunctionDefLibrary library;
*library.add_function() = FunctionDefHelper::Create(
"MyFunc",
{"x: int32"},
{"out: int32"},
{},
{{{"a"}, "Identity", {"x"}, {{"T", DataType::DT_INT32}}}},
{{"out", "a:output:0"}});
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(library));
Output shape = ops::Const(s.WithOpName("shape"), {5, 7}, {2});
auto _shape = tensorflow::ops::AsNodeOut(s, shape);
auto builder =
tensorflow::NodeBuilder("MyFunc", "MyFunc", s.graph()->op_registry());
tensorflow::Node* func_op;
TF_ASSERT_OK(builder.Input(_shape).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("int32: [2]", PropToString(out_prop0));
EXPECT_TRUE(out_prop0.has_value());
ExpectTensorValues({5, 7}, out_prop0.value());
ExpectTensorValues({5, 7},
properties.GetInputProperties("MyFunc")[0].value());
}
TEST_F(GraphPropertiesTest, ArithmeticFunctionReturnTensorValue) {
FunctionDefLibrary library;
*library.add_function() = FunctionDefHelper::Create(
"MyFunc",
{"x: int32", "y: int32"},
{"out: int32"},
{},
{{{"a"}, "Add", {"x", "y"}, {{"T", DataType::DT_INT32}}}},
{{"out", "a:z:0"}});
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(library));
Output shape = ops::Const(s.WithOpName("shape"), {5, 7}, {2});
auto _shape = tensorflow::ops::AsNodeOut(s, shape);
auto builder =
tensorflow::NodeBuilder("MyFunc", "MyFunc", s.graph()->op_registry());
tensorflow::Node* func_op;
TF_ASSERT_OK(
builder.Input(_shape).Input(_shape).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
false,
true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("int32: [2]", PropToString(out_prop0));
EXPECT_FALSE(out_prop0.has_value());
}
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
true,
true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("int32: [2]", PropToString(out_prop0));
EXPECT_TRUE(out_prop0.has_value());
ExpectTensorValues({10, 14}, out_prop0.value());
ExpectTensorValues({5, 7},
properties.GetInputProperties("MyFunc")[0].value());
ExpectTensorValues({5, 7},
properties.GetInputProperties("MyFunc")[1].value());
}
}
TEST_F(GraphPropertiesTest, ArithmeticFunctionReturnTensorValueFloat) {
FunctionDefLibrary library;
*library.add_function() = FunctionDefHelper::Create(
"MyFunc",
{"x: float", "y: float"},
{"out: float"},
{},
{{{"a"}, "Add", {"x", "y"}, {{"T", DataType::DT_FLOAT}}}},
{{"out", "a:z:0"}});
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(library));
Output x1 = ops::Const(s.WithOpName("x1"), {5.0f, 7.0f}, {2});
Output x2 = ops::Identity(s.WithOpName("x1"), x1);
auto _x1 = tensorflow::ops::AsNodeOut(s, x1);
auto _x2 = tensorflow::ops::AsNodeOut(s, x2);
auto builder =
tensorflow::NodeBuilder("MyFunc", "MyFunc", s.graph()->op_registry());
tensorflow::Node* func_op;
TF_ASSERT_OK(builder.Input(_x1).Input(_x2).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
false,
true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [2]", PropToString(out_prop0));
EXPECT_FALSE(out_prop0.has_value());
}
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
true,
true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [2]", PropToString(out_prop0));
EXPECT_TRUE(out_prop0.has_value());
ExpectFloatTensorValues({10.0, 14.0}, out_prop0.value());
ExpectFloatTensorValues({5.0, 7.0},
properties.GetInputProperties("MyFunc")[0].value());
ExpectFloatTensorValues({5.0, 7.0},
properties.GetInputProperties("MyFunc")[1].value());
}
}
TEST_F(GraphPropertiesTest, FunctionWithScalarInput) {
FunctionDefLibrary library;
*library.add_function() = FunctionDefHelper::Create(
"MyFunc",
{"x: float"},
{"out: float"},
{},
{{{"a"}, "Identity", {"x"}, {{"T", DataType::DT_FLOAT}}}},
{{"out", "a:output:0"}});
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(library));
Output placeholder =
ops::Placeholder(s.WithOpName("Placeholder"), DataType::DT_FLOAT,
ops::Placeholder::Shape(TensorShape({})));
Output identity = ops::Identity(s.WithOpName("Identity"), placeholder);
auto _identity = tensorflow::ops::AsNodeOut(s, identity);
auto builder =
tensorflow::NodeBuilder("MyFunc", "MyFunc", s.graph()->op_registry());
tensorflow::Node* func_op;
TF_ASSERT_OK(builder.Input(_identity).Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
EXPECT_GT(item.graph.versions().producer(), 21);
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(true));
const auto out_props = properties.GetOutputProperties("MyFunc");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ(DT_FLOAT, out_prop0.dtype());
EXPECT_FALSE(out_prop0.shape().unknown_rank());
}
TEST_F(GraphPropertiesTest, SimpleFunctionStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"simple_function.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyAdd_55e046a8");
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ(DT_FLOAT, out_prop.dtype());
EXPECT_FALSE(out_prop.shape().unknown_rank());
EXPECT_EQ(2, out_prop.shape().dim_size());
EXPECT_EQ(1, out_prop.shape().dim(0).size());
EXPECT_EQ(2, out_prop.shape().dim(1).size());
const auto in_props = properties.GetInputProperties("MyAdd_55e046a8");
EXPECT_EQ(2, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ("float: [1,2]", PropToString(in_prop));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,2]", PropToString(in_prop1));
}
TEST_F(GraphPropertiesTest, LargeFunctionStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"large_function_graph.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("y0");
EXPECT_EQ(2, out_props.size());
const OpInfo::TensorProperties& out_prop0 = out_props[0];
EXPECT_EQ("float: [128,112,112,64]", PropToString(out_prop0));
const OpInfo::TensorProperties& out_prop1 = out_props[1];
EXPECT_EQ("float: [128,112,112,24]", PropToString(out_prop1));
const auto in_props = properties.GetInputProperties("y0");
EXPECT_EQ(4, in_props.size());
const OpInfo::TensorProperties& in_prop0 = in_props[0];
EXPECT_EQ("float: [64]", PropToString(in_prop0));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,1,24,64]", PropToString(in_prop1));
const OpInfo::TensorProperties& in_prop2 = in_props[2];
EXPECT_EQ("float: [128,224,224,3]", PropToString(in_prop2));
const OpInfo::TensorProperties& in_prop3 = in_props[3];
EXPECT_EQ("float: [7,7,3,8]", PropToString(in_prop3));
}
TEST_F(GraphPropertiesTest, LargeFunctionWithMultipleOutputs) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_functional_while.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyFunc_AenMyWWx1Us");
EXPECT_EQ(2, out_props.size());
const OpInfo::TensorProperties& out_prop0 = out_props[0];
EXPECT_EQ(DT_INT32, out_prop0.dtype());
EXPECT_FALSE(out_prop0.shape().unknown_rank());
const OpInfo::TensorProperties& out_prop1 = out_props[1];
EXPECT_EQ(DT_FLOAT, out_prop1.dtype());
EXPECT_FALSE(out_prop1.shape().unknown_rank());
}
TEST_F(GraphPropertiesTest, FunctionWithErrorStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_error.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyAdd_yabA4wXEdM4");
EXPECT_EQ(1, out_props.size());
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ(DT_FLOAT, out_prop.dtype());
EXPECT_TRUE(out_prop.shape().unknown_rank());
const auto in_props = properties.GetInputProperties("MyAdd_yabA4wXEdM4");
EXPECT_EQ(2, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ("float: [1,2]", PropToString(in_prop));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,2]", PropToString(in_prop1));
}
TEST_F(GraphPropertiesTest, FunctionSwitchStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_switch.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyAdd_MPaeanipb7o");
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ(DT_FLOAT, out_prop.dtype());
EXPECT_EQ("float: [1,2]", PropToString(out_prop));
const auto in_props = properties.GetInputProperties("MyAdd_MPaeanipb7o");
EXPECT_EQ(2, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ("float: [1,2]", PropToString(in_prop));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,2]", PropToString(in_prop1));
}
TEST_F(GraphPropertiesTest, FunctionSwitch2StaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_switch_2.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyAdd_MPaeanipb7o");
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ("float: [1,2]", PropToString(out_prop));
const auto in_props = properties.GetInputProperties("MyAdd_MPaeanipb7o");
EXPECT_EQ(2, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ("float: [1,2]", PropToString(in_prop));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,2]", PropToString(in_prop1));
}
TEST_F(GraphPropertiesTest, FunctionSwitchShapesStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"function_switch_shapes.pbtxt");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto out_props = properties.GetOutputProperties("MyAdd_lEKAAnIwI5I");
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ("float: [1,2]", PropToString(out_prop));
const auto in_props = properties.GetInputProperties("MyAdd_lEKAAnIwI5I");
EXPECT_EQ(2, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ("float: [1,2]", PropToString(in_prop));
const OpInfo::TensorProperties& in_prop1 = in_props[1];
EXPECT_EQ("float: [1,3]", PropToString(in_prop1));
}
TEST_F(GraphPropertiesTest, SymbolicShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape(PartialTensorShape({-1, -1})));
Output b =
ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape(PartialTensorShape({-1})));
Output c = ops::Identity(s.WithOpName("c"), a);
Output d = ops::Identity(s.WithOpName("d"), b);
Output e = ops::Add(s.WithOpName("e"), c, d);
Output f = ops::Add(s.WithOpName("f"), a, c);
Output zero = ops::Const(s.WithOpName("zero"), 0.0f, {});
Output g = ops::Shape(s.WithOpName("g"), c);
Output h = ops::Fill(s.WithOpName("h"), g, zero);
Output zero_idx = ops::Const(s.WithOpName("zero_idx"), {0}, {1});
Output j = ops::Sum(s.WithOpName("j"), a, zero_idx);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto shape_a = properties.GetOutputProperties("a").at(0).shape();
const auto shape_c = properties.GetOutputProperties("c").at(0).shape();
EXPECT_EQ(2, shape_a.dim_size());
EXPECT_EQ(shape_a.dim_size(), shape_c.dim_size());
EXPECT_GE(-2, shape_a.dim(0).size());
EXPECT_EQ(shape_a.dim(0).size(), shape_c.dim(0).size());
EXPECT_GE(-2, shape_a.dim(1).size());
EXPECT_EQ(shape_a.dim(1).size(), shape_c.dim(1).size());
PartialTensorShape shape(shape_a);
EXPECT_FALSE(shape.IsFullyDefined());
EXPECT_FALSE(shape.unknown_rank());
const auto shape_b = properties.GetOutputProperties("b").at(0).shape();
const auto shape_d = properties.GetOutputProperties("d").at(0).shape();
EXPECT_EQ(1, shape_b.dim_size());
EXPECT_EQ(shape_b.dim_size(), shape_d.dim_size());
EXPECT_GE(-2, shape_b.dim(0).size());
EXPECT_NE(shape_a.dim(0).size(), shape_b.dim(0).size());
EXPECT_EQ(shape_b.dim(0).size(), shape_d.dim(0).size());
const auto shape_e = properties.GetOutputProperties("e").at(0).shape();
ASSERT_EQ(2, shape_e.dim_size());
EXPECT_EQ(shape_e.dim(0).size(), shape_c.dim(0).size());
EXPECT_NE(shape_e.dim(1).size(), shape_c.dim(1).size());
EXPECT_NE(shape_e.dim(0).size(), shape_d.dim(0).size());
const auto shape_f = properties.GetOutputProperties("f").at(0).shape();
ASSERT_EQ(2, shape_f.dim_size());
EXPECT_EQ(shape_f.dim(0).size(), shape_a.dim(0).size());
EXPECT_EQ(shape_f.dim(1).size(), shape_a.dim(1).size());
const auto shape_h = properties.GetOutputProperties("h").at(0).shape();
ASSERT_EQ(2, shape_f.dim_size());
EXPECT_EQ(shape_h.dim(0).size(), shape_c.dim(0).size());
EXPECT_EQ(shape_h.dim(1).size(), shape_c.dim(1).size());
const auto shape_j = properties.GetOutputProperties("j").at(0).shape();
ASSERT_EQ(1, shape_j.dim_size());
EXPECT_EQ(shape_j.dim(0).size(), shape_a.dim(1).size());
}
TEST_F(GraphPropertiesTest, DoNotValidateColocationConstraints) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1.0f, {1});
Output b = ops::Const(s.WithOpName("b"), 2.0f, {1});
Output c = ops::Const(s.WithOpName("c").ColocateWith(a), 3.0f, {1});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphDef optimized_graph;
for (const auto& node : item.graph.node()) {
if (node.name() != "a") {
*optimized_graph.add_node() = node;
}
}
item.graph.Swap(&optimized_graph);
GraphProperties properties(item);
TF_EXPECT_OK(properties.InferStatically(false));
}
TEST_F(GraphPropertiesTest, ShapeTracking) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape(PartialTensorShape({-1, -1})));
Output b =
ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape(PartialTensorShape({-1})));
Output zero = ops::Const(s.WithOpName("zero"), 0.0f, {});
auto shp = ops::ShapeN(s.WithOpName("shapes"), {a, b});
Output o1 = ops::Fill(s.WithOpName("o1"), shp[0], zero);
Output o2 = ops::Fill(s.WithOpName("o2"), shp[1], zero);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto shape_a = properties.GetOutputProperties("a").at(0).shape();
const auto shape_b = properties.GetOutputProperties("b").at(0).shape();
const auto shape_o1 = properties.GetOutputProperties("o1").at(0).shape();
const auto shape_o2 = properties.GetOutputProperties("o2").at(0).shape();
EXPECT_EQ(shape_a.DebugString(), shape_o1.DebugString());
EXPECT_EQ(shape_b.DebugString(), shape_o2.DebugString());
}
TEST_F(GraphPropertiesTest, FedNodes) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
{
GraphProperties properties(item);
Status s = properties.InferStatically(false);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "Const") {
continue;
}
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
const OpInfo::TensorProperties& out_prop = out_props[0];
if (node.name() == "x") {
EXPECT_FALSE(in_prop.shape().unknown_rank());
EXPECT_EQ(1, in_prop.shape().dim_size());
EXPECT_EQ(2, in_prop.shape().dim(0).size());
EXPECT_TRUE(out_prop.shape().unknown_rank());
} else if (node.op() == "Square" || node.op() == "AddN") {
EXPECT_TRUE(in_prop.shape().unknown_rank());
EXPECT_TRUE(out_prop.shape().unknown_rank());
}
}
}
{
GraphProperties properties(item);
Status s = properties.InferStatically(true);
TF_ASSERT_OK(s);
for (const auto& node : item.graph.node()) {
if (node.op() == "Square" || node.op() == "AddN") {
const auto in_props = properties.GetInputProperties(node.name());
EXPECT_EQ(1, in_props.size());
const OpInfo::TensorProperties& in_prop = in_props[0];
EXPECT_EQ(DT_FLOAT, in_prop.dtype());
EXPECT_FALSE(in_prop.shape().unknown_rank());
EXPECT_EQ(2, in_prop.shape().dim_size());
const auto out_props = properties.GetOutputProperties(node.name());
EXPECT_EQ(1, out_props.size());
const OpInfo::TensorProperties& out_prop = out_props[0];
EXPECT_EQ(in_prop.dtype(), out_prop.dtype());
EXPECT_EQ(in_prop.shape().DebugString(),
out_prop.shape().DebugString());
}
}
}
}
TEST_F(GraphPropertiesTest, Performance) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
"large_graph.pbtxt.html");
TF_ASSERT_OK(ReadGraphDefFromFile(filename, &item.graph));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(
&item.graph,
FunctionLibraryDefinition(OpRegistry::Global(), item.graph.library()), 0,
true));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
}
TEST_F(GraphPropertiesTest, StridedSlicesOfShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape(PartialTensorShape({-1, -1})));
auto shp = ops::Shape(s.WithOpName("shape"), {a});
Output index1 = ops::Const(s.WithOpName("index1"), 0, {1});
Output index2 = ops::Const(s.WithOpName("index2"), 1, {1});
Output index3 = ops::Const(s.WithOpName("index3"), 2, {1});
Output b = ops::StridedSlice(s.WithOpName("b"), shp, index1, index2, index2);
Output c = ops::StridedSlice(s.WithOpName("c"), shp, index2, index3, index2);
Output zero = ops::Const(s.WithOpName("zero"), 0.0f, {});
Output o1 = ops::Fill(s.WithOpName("o1"), b, zero);
Output o2 = ops::Fill(s.WithOpName("o2"), c, zero);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(false));
const auto shape_a = properties.GetOutputProperties("a").at(0).shape();
const auto shape_o1 = properties.GetOutputProperties("o1").at(0).shape();
const auto shape_o2 = properties.GetOutputProperties("o2").at(0).shape();
EXPECT_EQ(2, shape_a.dim_size());
EXPECT_EQ(1, shape_o1.dim_size());
EXPECT_EQ(1, shape_o2.dim_size());
EXPECT_EQ(shape_a.dim(0).size(), shape_o1.dim(0).size());
EXPECT_EQ(shape_a.dim(1).size(), shape_o2.dim(0).size());
}
TEST_F(GraphPropertiesTest, StridedSliceOfShapeWithShrinkAxisMask) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output placeholder =
ops::Placeholder(scope.WithOpName("input_placeholder"), DT_FLOAT,
ops::Placeholder::Shape(TensorShape({5, 480, 40, 1})));
auto input_shape = ops::Shape(scope.WithOpName("input_shape"), placeholder);
Output begin = ops::Const(scope.WithOpName("begin"), {0}, {1});
Output end = ops::Const(scope.WithOpName("end"), {3}, {1});
Output stride = ops::Const(scope.WithOpName("stride"), {1}, {1});
Output slice =
ops::StridedSlice(scope.WithOpName("slice"), input_shape, begin, end,
stride, ops::StridedSlice::ShrinkAxisMask(1));
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
false,
true));
EXPECT_FALSE(properties.GetOutputProperties("slice").at(0).has_value());
}
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
EXPECT_TRUE(properties.GetOutputProperties("slice").at(0).has_value());
const auto slice_value =
properties.GetOutputProperties("slice").at(0).value();
ExpectTensorValues({5}, slice_value);
}
}
TEST_F(GraphPropertiesTest, ValuePropagationThroughArithmeticOps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), {5, 7}, {2});
Output b = ops::Const(s.WithOpName("b"), {8, 8}, {2});
Output c = ops::Const(s.WithOpName("c"), {2, 2}, {2});
Output a1 = ops::OnesLike(s.WithOpName("a1"), a);
Output a_plus_one = ops::Add(s.WithOpName("a_plus_one"), a, a1);
Output a_plus_a = ops::Add(s.WithOpName("a_plus_a"), a, a);
Output b_plus_2a = ops::Add(s.WithOpName("b_plus_2a"), b, a_plus_a);
Output c_plus_b_plus_2a =
ops::Add(s.WithOpName("c_plus_b_plus_2a"), c, b_plus_2a);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto& a_plus_one_prop = properties.GetOutputProperties("a_plus_one")[0];
EXPECT_EQ("int32: [2]", PropToString(a_plus_one_prop));
EXPECT_TRUE(a_plus_one_prop.has_value());
ExpectTensorValues({6, 8}, a_plus_one_prop.value());
const auto& a_plus_a_prop = properties.GetOutputProperties("a_plus_a")[0];
EXPECT_EQ("int32: [2]", PropToString(a_plus_a_prop));
EXPECT_TRUE(a_plus_a_prop.has_value());
ExpectTensorValues({10, 14}, a_plus_a_prop.value());
const auto& b_plus_2a_prop = properties.GetOutputProperties("b_plus_2a")[0];
EXPECT_EQ("int32: [2]", PropToString(b_plus_2a_prop));
EXPECT_TRUE(b_plus_2a_prop.has_value());
ExpectTensorValues({18, 22}, b_plus_2a_prop.value());
const auto& c_plus_b_plus_2a_prop =
properties.GetOutputProperties("c_plus_b_plus_2a")[0];
EXPECT_EQ("int32: [2]", PropToString(c_plus_b_plus_2a_prop));
EXPECT_TRUE(c_plus_b_plus_2a_prop.has_value());
ExpectTensorValues({20, 24}, c_plus_b_plus_2a_prop.value());
}
TEST_F(GraphPropertiesTest, ShapeAnnotation) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Input", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", PartialTensorShape({-1, -1}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("Identity", "Identity")
.Attr("dtype", DT_FLOAT)
.Attr("_same_output_for_iterations", true)
.Attr("_output_shape_vector", {TensorShape({5, 7})})
.Input("Input", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
false,
true));
const auto props = properties.GetOutputProperties("Identity");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ("float: [-1,-1]", PropToString(prop));
}
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto props = properties.GetOutputProperties("Identity");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ("float: [5,7]", PropToString(prop));
}
}
TEST_F(GraphPropertiesTest, ShapeAnnotationWithCompatibleShapes) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Input", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", PartialTensorShape({-1, 100}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("Identity", "Identity")
.Attr("dtype", DT_FLOAT)
.Attr("_same_output_for_iterations", true)
.Attr("_output_shape_vector", {TensorShape({10, 100})})
.Input("Input", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto props = properties.GetOutputProperties("Identity");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ("float: [10,100]", PropToString(prop));
}
TEST_F(GraphPropertiesTest, ShapeAnnotationWithIncompatibleShapes) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Input", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", PartialTensorShape({-1, 100}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("Identity", "Identity")
.Attr("dtype", DT_FLOAT)
.Attr("_same_output_for_iterations", true)
.Attr("_output_shape_vector", {TensorShape({10, 10})})
.Input("Input", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto props = properties.GetOutputProperties("Identity");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ("float: [-1,100]", PropToString(prop));
}
TEST_F(GraphPropertiesTest, ShapeAnnotationWithoutInferenceFn) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("Input", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", PartialTensorShape({-1, -1}))
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(
NodeDefBuilder("TestOpWithNoInferenceFn", "TestOpWithNoInferenceFn")
.Attr("_same_output_for_iterations", true)
.Attr("_output_shape_vector", {TensorShape({10, 100})})
.Input("Input", 0, DT_FLOAT)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto props = properties.GetOutputProperties("TestOpWithNoInferenceFn");
EXPECT_EQ(1, props.size());
const OpInfo::TensorProperties& prop = props[0];
EXPECT_EQ(DT_FLOAT, prop.dtype());
EXPECT_EQ(2, prop.shape().dim_size());
EXPECT_EQ("float: [10,100]", PropToString(prop));
}
TEST_F(GraphPropertiesTest, PartitionedCallOp) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary library;
FunctionDef called_func = FunctionDefHelper::Create(
"identity_function",
{"arg0: int32"},
{"ret0: int32"},
{},
{{{"Identity"}, "Identity", {"arg0"}, {{"T", DT_INT32}}}},
{{"ret0", "Identity:output:0"}});
*library.add_function() = called_func;
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library));
Output in = ops::Const(root, {3, 1, 2, 0});
NameAttrList b_name_attr;
b_name_attr.set_name("identity_function");
ops::PartitionedCall call(root.WithOpName("identity_call"), {in}, {DT_INT32},
b_name_attr);
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
false,
true));
EXPECT_EQ("int32: [4]",
PropToString(properties.GetOutputProperties("identity_call")[0]));
}
TEST_F(GraphPropertiesTest, NonTrivialInputPartitionedCallOp) {
auto f = FunctionDefHelper::Create(
"FunctionWhichAdds",
{"arg0: int32", "arg1: int32"},
{"ret0: int32"},
{},
{{{"a"}, "Add", {"arg0", "arg1"}, {{"T", DT_INT32}}}},
{{"ret0", "a:z:0"}});
FunctionDefLibrary function_lib;
function_lib.add_function()->Swap(&f);
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(function_lib));
PartialTensorShape input_shape({2, 2, -1});
Output in1 =
ops::Placeholder(root, DT_INT32, ops::Placeholder::Shape(input_shape));
Output in2 =
ops::Placeholder(root, DT_INT32, ops::Placeholder::Shape(input_shape));
NameAttrList b_name_attr;
b_name_attr.set_name("FunctionWhichAdds");
ops::PartitionedCall call(root.WithOpName("add_call"), {in1, in2}, {DT_INT32},
b_name_attr);
GrapplerItem item;
TF_ASSERT_OK(root.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
true,
false,
true));
EXPECT_EQ("int32: [2,2,-1]",
PropToString(properties.GetOutputProperties("add_call")[0]));
}
TEST_F(GraphPropertiesTest, ShapeAnnotatedFunctionOp) {
auto f = FunctionDefHelper::Create(
"FuncShapeCannotBeInferred",
{},
{"output: float"},
{},
{
{{"p"}, "Placeholder", {}, {{"dtype", DataType::DT_FLOAT}}},
},
{{"output", "p:output:0"}});
FunctionDefLibrary function_lib;
function_lib.add_function()->Swap(&f);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_ASSERT_OK(s.graph()->AddFunctionLibrary(function_lib));
tensorflow::Node* func_op;
TensorShapeProto output_shape;
output_shape.set_unknown_rank(false);
output_shape.add_dim()->set_size(1);
output_shape.add_dim()->set_size(2);
output_shape.add_dim()->set_size(3);
output_shape.add_dim()->set_size(4);
TF_ASSERT_OK(tensorflow::NodeBuilder("f", "FuncShapeCannotBeInferred",
s.graph()->op_registry())
.Attr("_execution_count", 1)
.Attr("_same_output_for_iterations", true)
.Attr("_output_dtype_vector", {DataType::DT_FLOAT})
.Attr("_output_shape_vector", {output_shape})
.Finalize(s.graph(), &func_op));
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
false,
false));
const auto out_props = properties.GetOutputProperties("f");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: ?", PropToString(out_prop0));
}
{
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(
false,
true,
true));
const auto out_props = properties.GetOutputProperties("f");
const OpInfo::TensorProperties out_prop0 = out_props[0];
EXPECT_EQ("float: [1,2,3,4]", PropToString(out_prop0));
}
}
TEST_F(GraphPropertiesTest,
SymbolicShapeInferenceWithReshapeOpsSharingShapeVector) {
GrapplerItem item;
TF_ASSERT_OK(NodeDefBuilder("data", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({10, 10, 10, 10}))
.Finalize(item.graph.add_node()));
Tensor num_segments(DT_INT32, TensorShape({}));
test::FillIota<int>(&num_segments, 3);
TF_ASSERT_OK(NodeDefBuilder("num_segments", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", num_segments)
.Finalize(item.graph.add_node()));
Tensor minus_one(DT_INT32, TensorShape({1}));
test::FillIota<int>(&minus_one, -1);
TF_ASSERT_OK(NodeDefBuilder("minus_one", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", minus_one)
.Finalize(item.graph.add_node()));
Tensor plus_ten(DT_INT32, TensorShape({1}));
test::FillIota<int>(&plus_ten, 10);
TF_ASSERT_OK(NodeDefBuilder("plus_ten", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", plus_ten)
.Finalize(item.graph.add_node()));
Tensor axis(DT_INT32, TensorShape({}));
test::FillIota<int>(&axis, -1);
TF_ASSERT_OK(NodeDefBuilder("axis", "Const")
.Attr("dtype", DT_INT32)
.Attr("value", axis)
.Finalize(item.graph.add_node()));
std::vector<NodeDefBuilder::NodeOut> inputs(2);
inputs[0] = NodeDefBuilder::NodeOut{"minus_one", 0, DT_INT32};
inputs[1] = NodeDefBuilder::NodeOut{"plus_ten", 0, DT_INT32};
TF_ASSERT_OK(NodeDefBuilder("concat", "ConcatV2")
.Input(inputs)
.Input("axis", 0, DT_INT32)
.Attr("N", 2)
.Attr("T", DT_INT32)
.Attr("Tidx", DT_INT32)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("segment_ids_", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("segment_ids_shape_before_reshape", "Shape")
.Input("segment_ids_", 0, DT_FLOAT)
.Attr("T", DT_FLOAT)
.Attr("out_type", DT_INT32)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("segment_ids", "Reshape")
.Input("segment_ids_", 0, DT_FLOAT)
.Input("concat", 0, DT_INT32)
.Attr("T", DT_FLOAT)
.Attr("Tshape", DT_INT32)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("y", "UnsortedSegmentSum")
.Input("data", 0, DT_FLOAT)
.Input("segment_ids", 0, DT_INT32)
.Input("num_segments", 0, DT_INT32)
.Attr("T", DT_FLOAT)
.Attr("Tindices", DT_INT32)
.Attr("Tnumsegments", DT_INT32)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("x1", "Placeholder")
.Attr("dtype", DT_FLOAT)
.Finalize(item.graph.add_node()));
TF_ASSERT_OK(NodeDefBuilder("y1", "Reshape")
.Input("x1", 0, DT_FLOAT)
.Input("concat", 0, DT_INT32)
.Attr("T", DT_FLOAT)
.Attr("Tshape", DT_INT32)
.Finalize(item.graph.add_node()));
GraphProperties properties(item);
TF_ASSERT_OK(properties.InferStatically(true));
const auto& y1_output_properties = properties.GetOutputProperties("y1");
EXPECT_EQ(y1_output_properties.size(), 1);
EXPECT_EQ(y1_output_properties[0].shape().dim_size(), 2);
EXPECT_LT(y1_output_properties[0].shape().dim(0).size(), 0);
EXPECT_EQ(y1_output_properties[0].shape().dim(1).size(), 10);
}
TEST(HelperFunctions, IsShapeFullyDefinedIntegerVectorOrScalar) {
NodeDef node_def;
OpRegistrationData op_reg_data;
OpDefBuilder b("dummy");
CHECK(b.Finalize(&op_reg_data).ok());
std::vector<std::unique_ptr<std::vector<ShapeAndType>>>
input_handle_shapes_and_types;
InferenceContext ic(0, node_def, op_reg_data.op_def,
{},
{},
{},
std::move(input_handle_shapes_and_types));
ShapeHandle fully_defined_vector = ic.MakeShape(
{ic.MakeDim(4), ic.MakeDim(5), ic.MakeDim(6), ic.MakeDim(7)});
ShapeHandle vector_with_unknown = ic.MakeShape(
{ic.MakeDim(4), ic.MakeDim(5), ic.UnknownDim(), ic.MakeDim(7)});
ShapeHandle vector_with_unknown_from_const = ic.MakeShape(
{ic.MakeDim(4), ic.MakeDim(INT64_MAX), ic.MakeDim(6), ic.MakeDim(7)});
ShapeHandle rank_1_vector = ic.MakeShape({ic.MakeDim(4)});
EXPECT_TRUE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, fully_defined_vector, DT_INT32));
EXPECT_TRUE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, fully_defined_vector, DT_INT64));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, fully_defined_vector, DT_FLOAT));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, vector_with_unknown, DT_INT32));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, vector_with_unknown_from_const, DT_INT32));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, rank_1_vector, ic.UnknownShape(), DT_INT32));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, ic.UnknownShape(), fully_defined_vector, DT_INT32));
EXPECT_FALSE(IsShapeFullyDefinedIntegerVectorOrScalar(
&ic, fully_defined_vector, vector_with_unknown_from_const, DT_INT32));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/graph_properties.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/graph_properties_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19318bbc-47c6-4b3c-a78a-c14e89b2b652 | cpp | tensorflow/tensorflow | virtual_placer | tensorflow/core/grappler/costs/virtual_placer.cc | tensorflow/core/grappler/costs/virtual_placer_test.cc | #include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
VirtualPlacer::VirtualPlacer(
const std::unordered_map<string, DeviceProperties>& devices)
: devices_(devices),
default_job_name_lowercase_("localhost") {
lfqn_map_.reserve(devices_.size());
for (const auto& kv : devices_) {
const auto lfqn = to_lfqn_or_empty(kv.first);
if (lfqn.empty()) {
LOG(ERROR) << "VirtualPlacer couldn't parse device name from cluster: "
<< kv.first;
} else {
lfqn_map_[lfqn] = kv.first;
}
}
if (devices_.empty()) {
default_device_name_ = "UNKNOWN";
DeviceProperties& prop = devices_["UNKNOWN"];
prop.set_type("UNKNOWN");
} else if (devices_.size() == 1) {
default_device_name_ = devices_.begin()->first;
} else {
std::map<int, string> cpu_devices;
std::map<int, string> gpu_devices;
for (const auto& kv : lfqn_map_) {
const auto& lfqn = kv.first;
const auto& cluster_device_name = kv.second;
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(lfqn, &parsed_name);
if (parsed) {
const auto type = absl::AsciiStrToLower(parsed_name.type);
if (type == "gpu") {
gpu_devices[parsed_name.id] = cluster_device_name;
} else if (type == "cpu") {
cpu_devices[parsed_name.id] = cluster_device_name;
}
}
}
if (!gpu_devices.empty()) {
default_device_name_ = gpu_devices.begin()->second;
} else if (!cpu_devices.empty()) {
default_device_name_ = cpu_devices.begin()->second;
} else {
default_device_name_ = devices_.begin()->first;
}
}
VLOG(3) << "default device name: " << default_device_name_;
std::unordered_set<string> job_names_from_cluster;
for (const auto& device : lfqn_map_) {
const auto& lfqn = device.first;
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(lfqn, &parsed_name);
if (parsed && !parsed_name.job.empty()) {
job_names_from_cluster.insert(parsed_name.job);
if (job_names_from_cluster.size() > 1) {
break;
}
}
}
if (job_names_from_cluster.size() == 1) {
auto it = job_names_from_cluster.begin();
default_job_name_lowercase_ = *it;
}
VLOG(3) << "default job name: " << default_job_name_lowercase_;
}
const DeviceProperties& VirtualPlacer::get_device(const NodeDef& node) const {
string device = get_canonical_device_name(node);
VLOG(3) << "node.name=" << node.name() << " node.device=" << node.device()
<< " is placed on: " << device;
auto it = devices_.find(device);
DCHECK(it != devices_.end());
return it->second;
}
string VirtualPlacer::get_canonical_device_name(const NodeDef& node) const {
if (node.device().empty()) {
return default_device_name_;
}
const auto lfqn = to_lfqn_or_empty(node.device());
if (lfqn.empty()) {
return default_device_name_;
}
const auto it = lfqn_map_.find(lfqn);
if (it != lfqn_map_.end()) {
return it->second;
}
return default_device_name_;
}
string VirtualPlacer::to_lfqn_or_empty(const string& device_name) const {
DeviceNameUtils::ParsedName parsed_name;
const auto lowercase_name = absl::AsciiStrToLower(device_name);
bool parsed = DeviceNameUtils::ParseFullName(lowercase_name, &parsed_name);
if (!parsed) {
parsed = DeviceNameUtils::ParseLocalName(lowercase_name, &parsed_name);
parsed_name.job = "localhost";
}
if (!parsed) {
if (lowercase_name == "gpu" || lowercase_name == "cpu") {
parsed_name.job = "localhost";
parsed_name.type = lowercase_name;
parsed = true;
}
}
if (!parsed) {
return {};
}
if (parsed_name.job.empty()) {
parsed_name.job = default_job_name_lowercase_;
}
parsed_name.type = absl::AsciiStrToLower(parsed_name.type);
string lfqn = strings::StrCat(
"/job:", parsed_name.job, "/replica:", parsed_name.replica,
"/task:", parsed_name.task, "/device:", parsed_name.type, ":",
parsed_name.id);
return lfqn;
}
}
} | #include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
TEST(VirtualPlacerTest, LocalDevices) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, ShortNames) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/CPU:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/GPU:0", placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/CPU:0", placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/GPU:0", placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, PlacementOnNonDefaultDevice) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties tpu_device;
tpu_device.set_type("TPU");
devices["/job:localhost/replica:0/task:0/device:TPU:0"] = tpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("/device:TPU:0");
EXPECT_EQ("TPU", placer.get_device(node).type());
EXPECT_EQ("/job:localhost/replica:0/task:0/device:TPU:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, EmptyJobName) {
for (const string& job_name : {"localhost", "worker", "worker_train"}) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices[strings::StrCat("/job:", job_name, "/replica:0/task:0/cpu:0")] =
cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices[strings::StrCat("/job:", job_name,
"/replica:0/task:0/device:GPU:0")] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
node.set_device("/device:CPU:0");
EXPECT_EQ(strings::StrCat("/job:", job_name, "/replica:0/task:0/cpu:0"),
placer.get_canonical_device_name(node));
node.set_device("/device:GPU:0");
EXPECT_EQ(
strings::StrCat("/job:", job_name, "/replica:0/task:0/device:GPU:0"),
placer.get_canonical_device_name(node));
}
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:ps/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:worker/replica:0/task:0/cpu:0"] = cpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
node.set_device("/device:CPU:0");
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
}
string GetDefaultDeviceName(
const std::unordered_map<string, DeviceProperties>& devices) {
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
return placer.get_canonical_device_name(node);
}
TEST(VirtualPlacerTest, DefaultDevice) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:worker/replica:0/task:0/cpu:0"] = cpu_device;
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
GetDefaultDeviceName(devices));
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
for (int i = 0; i < 8; i++) {
devices[strings::StrCat("/job:worker/replica:0/task:0/gpu:", i)] =
gpu_device;
EXPECT_EQ("/job:worker/replica:0/task:0/gpu:0",
GetDefaultDeviceName(devices));
}
}
TEST(VirtualPlacerTest, MultiReplica) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
for (int i = 0; i < 8; i++) {
devices[strings::StrCat("/job:worker/replica:", i, "/task:0/cpu:0")] =
cpu_device;
for (int j = 0; j < 8; j++) {
devices[strings::StrCat("/job:worker/replica:", i, "/task:0/gpu:", j)] =
gpu_device;
}
}
std::unique_ptr<VirtualCluster> cluster(new VirtualCluster(devices));
std::unique_ptr<VirtualPlacer> placer(new VirtualPlacer(devices));
auto get_device_name = [&placer](const string& device) -> string {
NodeDef node;
node.set_op("Conv2D");
node.set_device(device);
return placer->get_canonical_device_name(node);
};
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
get_device_name("/replica:0/cpu:0"));
EXPECT_EQ("/job:worker/replica:2/task:0/cpu:0",
get_device_name("/replica:2/cpu:0"));
EXPECT_EQ("/job:worker/replica:7/task:0/cpu:0",
get_device_name("/replica:7/cpu:0"));
EXPECT_EQ("/job:worker/replica:3/task:0/gpu:0",
get_device_name("/replica:3/gpu:0"));
EXPECT_EQ("/job:worker/replica:5/task:0/gpu:3",
get_device_name("/replica:5/gpu:3"));
EXPECT_EQ("/job:worker/replica:4/task:0/gpu:7",
get_device_name("/replica:4/gpu:7"));
for (int i = 0; i < 4; i++) {
devices[strings::StrCat("/job:ps/replica:", i, "/task:0/cpu:0")] =
cpu_device;
}
cluster.reset(new VirtualCluster(devices));
placer.reset(new VirtualPlacer(cluster->GetDevices()));
EXPECT_EQ("/job:worker/replica:0/task:0/cpu:0",
get_device_name("/job:worker/replica:0/cpu:0"));
EXPECT_EQ("/job:worker/replica:7/task:0/gpu:3",
get_device_name("/job:worker/replica:7/gpu:3"));
EXPECT_EQ("/job:ps/replica:0/task:0/cpu:0",
get_device_name("/job:ps/replica:0/cpu:0"));
EXPECT_EQ("/job:ps/replica:1/task:0/cpu:0",
get_device_name("/job:ps/replica:1/cpu:0"));
EXPECT_EQ("/job:ps/replica:2/task:0/cpu:0",
get_device_name("/job:ps/replica:2/cpu:0"));
EXPECT_EQ("/job:ps/replica:3/task:0/cpu:0",
get_device_name("/job:ps/replica:3/cpu:0"));
}
TEST(VirtualPlacerTest, FallBackUnknown) {
std::unordered_map<string, DeviceProperties> devices;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("UNKNOWN", placer.get_device(node).type());
EXPECT_EQ("UNKNOWN", placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, FallBackCPU) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:my_job/replica:0/task:0/cpu:0"] = cpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
}
TEST(VirtualPlacerTest, RemoteDevices) {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
devices["/job:my_job/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
devices["/job:my_job/replica:0/task:0/device:GPU:0"] = gpu_device;
VirtualCluster cluster(devices);
VirtualPlacer placer(devices);
NodeDef node;
node.set_op("Conv2D");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0/cpu:0");
EXPECT_EQ("CPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/cpu:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0/device:GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("CPU");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("GPU:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
node.set_device("/job:my_job/replica:0/task:0");
EXPECT_EQ("GPU", placer.get_device(node).type());
EXPECT_EQ("/job:my_job/replica:0/task:0/device:GPU:0",
placer.get_canonical_device_name(node));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_placer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/virtual_placer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66a6130e-df50-475d-9fcc-fa07e2cdd66e | cpp | tensorflow/tensorflow | robust_stats | tensorflow/core/grappler/costs/robust_stats.cc | tensorflow/core/grappler/costs/robust_stats_test.cc | #include "tensorflow/core/grappler/costs/robust_stats.h"
#include <algorithm>
#include <cmath>
#include <utility>
namespace tensorflow {
namespace grappler {
static double SortedMedian(const std::vector<double> &values) {
const int n = values.size();
if (n == 0) return 0.0;
if (n & 1) {
return values[n / 2];
} else {
return (values[n / 2] + values[n / 2 - 1]) / 2.0;
}
}
static double Median(std::vector<double> &&values) {
const size_t n = values.size();
if (n == 0) return 0;
const auto middle = values.begin() + (n / 2);
std::nth_element(values.begin(), middle, values.end());
if (n & 1) {
return *middle;
}
const auto lower_middle = std::max_element(values.begin(), middle);
if (*lower_middle <= 0 && *middle >= 0) {
return (*lower_middle + *middle) / 2;
}
return *lower_middle + (*middle - *lower_middle) / 2;
}
static std::pair<double, double> ScaledMedianAbsoluteDeviation(
const std::vector<double> &sorted_values) {
double median = SortedMedian(sorted_values);
std::vector<double> deviations;
deviations.reserve(sorted_values.size());
for (double d : sorted_values) {
deviations.push_back(std::abs(d - median));
}
double mad = Median(std::move(deviations)) * 1.4826;
return std::pair<double, double>(median, mad);
}
RobustStats::RobustStats(const std::vector<double> &values)
: RobustStats(std::vector<double>(values)) {}
RobustStats::RobustStats(std::vector<double> &&values) {
std::sort(values.begin(), values.end());
lo_ = values[0];
hi_ = values.back();
HuberMAD(values);
}
double UpdateHuberMean(const std::vector<double> &sorted_values, double mean,
double margin) {
int num_within = 0;
double sum = 0.0;
for (double d : sorted_values) {
if (d < mean - margin) {
sum -= margin;
} else if (d > mean + margin) {
sum += margin;
} else {
sum += d;
++num_within;
}
}
if (num_within > 0) {
return sum / num_within;
} else {
return mean;
}
}
void RobustStats::HuberMAD(const std::vector<double> &sorted_values) {
const std::pair<double, double> median_mad =
ScaledMedianAbsoluteDeviation(sorted_values);
mean_ = median_mad.first;
stddev_ = median_mad.second;
const double c = 1.5;
const double margin = c * stddev_;
if (margin > 0.0) {
for (int k = 0; k < 10; ++k) {
double old_mean = mean_;
mean_ = UpdateHuberMean(sorted_values, mean_, margin);
if (mean_ == old_mean) break;
}
}
}
}
} | #include "tensorflow/core/grappler/costs/robust_stats.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class RobustStatsTest : public ::testing::Test {
public:
void SetUp() override {
for (double d = 1.0; d <= 5.0; d += 1.0) {
values1_.push_back(5.0 - d);
values1_.push_back(5.0 + d);
values2_.push_back(25.0 - 2 * d);
values2_.push_back(25.0 + 2 * d);
values3_.push_back(-3.0 - d);
values3_.push_back(-3.0 + d);
}
values1_.push_back(5.0);
values3_.push_back(197.0);
values3_.push_back(-203.0);
}
std::vector<double> values1_;
std::vector<double> values2_;
std::vector<double> values3_;
};
TEST_F(RobustStatsTest, Simple) {
RobustStats s1(values1_);
EXPECT_EQ(5.0, s1.mean());
EXPECT_EQ(0.0, s1.lo());
EXPECT_EQ(10.0, s1.hi());
RobustStats s2(values2_);
EXPECT_EQ(25.0, s2.mean());
EXPECT_EQ(15.0, s2.lo());
EXPECT_EQ(35.0, s2.hi());
RobustStats s3(values3_);
EXPECT_EQ(-3.0, s3.mean());
EXPECT_EQ(-203.0, s3.lo());
EXPECT_EQ(197.0, s3.hi());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/robust_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/robust_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c2d5cdab-c5af-4a62-b278-95f45b56ecde | cpp | tensorflow/tensorflow | sig_node | tensorflow/core/grappler/graph_analyzer/sig_node.cc | tensorflow/core/grappler/graph_analyzer/sig_node_test.cc | #include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <algorithm>
#include "absl/strings/str_format.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
static constexpr bool debug = false;
SigNode::SigNode(const NodeDef* node) : node_(node) {}
void SigNode::CopyLinks(const GenNode& from, const TranslationMap& map) {
hash_to_link_.clear();
hashed_peers_.clear();
std::map<LinkTag, Link> link_map;
CopyLinksPass1(from, map, &link_map);
CopyLinksPass2(&link_map);
}
void SigNode::CopyLinksPass1(const GenNode& from, const TranslationMap& map,
std::map<LinkTag, Link>* link_map) {
LinkTag::Hasher link_hasher;
for (const auto& entry : from.links()) {
for (const auto& target : entry.second) {
auto nodeit = map.find(target.node);
if (nodeit == map.end()) {
continue;
}
LinkTag tag(entry.first, target.port);
size_t hval = link_hasher(tag);
Link& map_entry = (*link_map)[tag];
if (map_entry.peers.empty()) {
map_entry.tag = tag;
map_entry.unique_hash = hval;
}
map_entry.peers.push_back(nodeit->second);
}
}
}
void SigNode::CopyLinksPass2(std::map<LinkTag, Link>* link_map) {
for (auto& entry : *link_map) {
Link* hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
while (!hl_entry_ptr->peers.empty()) {
CombineHash(1, &entry.second.unique_hash);
hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
}
for (const auto& peer : entry.second.peers) {
hashed_peers_.emplace_back(HashedPeer(entry.second.unique_hash, peer));
}
hl_entry_ptr->tag = entry.second.tag;
hl_entry_ptr->unique_hash = entry.second.unique_hash;
hl_entry_ptr->peers.swap(entry.second.peers);
}
}
void SigNode::ComputeTopoHash0() {
topo_hash_.clear();
last_hashed_nodes_ = next_hashed_nodes_ = node_mask_;
size_t hval = std::hash<string>()(opcode());
for (const auto& entry : hashed_peers_) {
CombineHash(entry.link_hash, &hval);
}
topo_hash_.push_back(hval);
}
void SigNode::ComputeTopoHash(int distance) {
next_hashed_nodes_ = last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " mask=" << std::hex
<< next_hashed_nodes_;
}
if (hash_is_final_) {
return;
}
const int64_t topo_hash_size = topo_hash_.size();
CHECK(topo_hash_size == distance);
int prev = distance - 1;
size_t hval = topo_hash_[0];
if (!hashed_peers_.empty()) {
size_t last_link_hash = hashed_peers_[0].link_hash;
size_t comm_hash = 0;
for (const auto& entry : hashed_peers_) {
if (entry.link_hash != last_link_hash) {
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
comm_hash = 0;
last_link_hash = entry.link_hash;
}
CombineHashCommutative(entry.peer->GetTopoHash(prev), &comm_hash);
next_hashed_nodes_ |= entry.peer->last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " += " << entry.peer->name()
<< " mask=" << std::hex << next_hashed_nodes_;
}
}
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
}
topo_hash_.push_back(hval);
}
size_t SigNode::GetTopoHash(int distance) const {
CHECK(!topo_hash_.empty());
const int64_t topo_hash_size = topo_hash_.size();
if (distance >= topo_hash_size) {
CHECK(hash_is_final_);
return topo_hash_.back();
} else {
return topo_hash_[distance];
}
}
bool SigNode::operator==(const SigNode& other) const {
if (opcode() != other.opcode()) {
return false;
}
if (unique_rank_ != other.unique_rank_) {
return false;
}
if (hashed_peers_.size() != other.hashed_peers_.size()) {
return false;
}
for (auto it1 = hashed_peers_.begin(), it2 = other.hashed_peers_.begin();
it1 != hashed_peers_.end(); ++it1, ++it2) {
if (it1->link_hash != it2->link_hash) {
return false;
}
if (it1->peer->unique_rank_ != it2->peer->unique_rank_) {
return false;
}
}
return true;
}
constexpr int Signature::kMaxGraphSize;
string Signature::ToString() const {
string result;
for (size_t n = 0; n < nodes.size(); ++n) {
result += absl::StrFormat("%d:%s", n, nodes[n]->opcode());
for (const auto& entry : nodes[n]->hashed_peers_) {
const auto& link = nodes[n]->hash_to_link_[entry.link_hash];
if (link.tag.local.IsInbound()) {
result +=
absl::StrFormat("[%s:%s:%d]", string(link.tag.local),
string(link.tag.remote), entry.peer->unique_rank_);
}
}
result.push_back(',');
}
return result;
}
Status Signature::Compute() {
if (map.size() > kMaxGraphSize) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"A graph of %d nodes is too big for signature computation, "
"the maximal supported node count is %d.",
map.size(), kMaxGraphSize));
}
size_t next_node_id = 0;
sig_short = 0;
sig_full.resize(0);
PrepareNodes();
FindUniqueHashes(&next_node_id);
while (next_node_id < map.size()) {
ComputeOneRound(next_node_id);
FindUniqueHashes(&next_node_id);
}
OrderLinks();
return absl::OkStatus();
}
void Signature::PrepareNodes() {
nodes.resize(0);
int64_t mask = 1;
for (const auto& entry : map) {
SigNode* node = entry.second.get();
node->last_hashed_nodes_ = node->node_mask_ = mask;
mask <<= 1;
node->unique_rank_ = ~0;
node->hash_is_final_ = false;
node->ComputeTopoHash0();
if (node->GetHighTopoHash() <= map.size()) {
node->ReHighTopoHash();
}
nodes.emplace_back(node);
}
}
void Signature::FindUniqueHashes(size_t* next_node_id_p) {
std::stable_sort(nodes.begin() + *next_node_id_p, nodes.end(),
SigNode::NodeOrderLess());
bool found_unique = false;
for (size_t n = *next_node_id_p; n < nodes.size(); ++n) {
size_t cur_hash = nodes[n]->GetHighTopoHash();
if (n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash) {
for (++n;
n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash;
++n) {
}
if (found_unique || n != nodes.size() - 1) {
continue;
}
}
found_unique = true;
size_t id = (*next_node_id_p)++;
nodes[n]->unique_rank_ = id;
size_t last_hash = nodes[n]->GetHighTopoHash();
CombineHash(last_hash, &sig_short);
sig_full.push_back(last_hash);
nodes[n]->topo_hash_.resize(1);
nodes[n]->topo_hash_[0] = id + 1;
nodes[n]->hash_is_final_ = true;
nodes[n]->last_hashed_nodes_ = nodes[n]->node_mask_;
if (n != id) {
std::swap(nodes[id], nodes[n]);
}
}
}
void Signature::ComputeOneRound(size_t next_node_id) {
int debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
node->topo_hash_.resize(1);
node->last_hashed_nodes_ = node->node_mask_;
node->hash_is_final_ = false;
if (debug) {
LOG(INFO) << "DEBUG distance=" << 0 << " node " << debug_i++ << " "
<< node->name() << " mask=" << std::hex
<< node->last_hashed_nodes_;
}
}
bool stop = false;
for (int distance = 1; !stop; ++distance) {
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (node->hash_is_final_) {
continue;
}
node->ComputeTopoHash(distance);
if (node->GetHighTopoHash() <= nodes.size()) {
node->ReHighTopoHash();
}
}
stop = true;
debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (debug) {
LOG(INFO) << "DEBUG distance=" << distance << " node " << debug_i++
<< " " << node->name() << " oldmask=" << std::hex
<< node->last_hashed_nodes_ << " mask=" << std::hex
<< node->next_hashed_nodes_;
}
if (node->last_hashed_nodes_ == node->next_hashed_nodes_) {
node->hash_is_final_ = true;
} else {
node->last_hashed_nodes_ = node->next_hashed_nodes_;
stop = false;
}
}
}
}
void Signature::OrderLinks() {
for (const auto& node : nodes) {
if (node->hashed_peers_.empty()) {
continue;
}
size_t cur_link_hash = node->hashed_peers_[0].link_hash + 1;
int first_idx = -1;
int idx;
for (idx = 0; idx < static_cast<int64_t>(node->hashed_peers_.size());
++idx) {
auto& entry = node->hashed_peers_[idx];
if (entry.link_hash == cur_link_hash) {
continue;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
cur_link_hash = entry.link_hash;
first_idx = idx;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
}
}
bool Signature::operator==(const Signature& other) const {
if (sig_short != other.sig_short) {
return false;
}
if (sig_full.size() != other.sig_full.size()) {
return false;
}
for (auto it1 = sig_full.begin(), it2 = other.sig_full.begin();
it1 != sig_full.end(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
}
if (nodes.size() != other.nodes.size()) {
return false;
}
for (auto it1 = nodes.begin(), it2 = other.nodes.begin(); it1 != nodes.end();
++it1, ++it2) {
if (**it1 != **it2) {
return false;
}
}
return true;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/subgraph.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Gt;
using ::testing::Ne;
using ::testing::SizeIs;
TEST(SigNodeLinkTag, Compare) {
SigNode::LinkTag a(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag b(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag c(GenNode::Port(false, 2), GenNode::Port(false, 1));
SigNode::LinkTag d(GenNode::Port(false, 1), GenNode::Port(false, 3));
SigNode::LinkTag e(GenNode::Port(false, 2), GenNode::Port(false, 2));
EXPECT_TRUE(a == b);
EXPECT_FALSE(a == c);
EXPECT_FALSE(a == e);
EXPECT_FALSE(a < b);
EXPECT_FALSE(b < a);
EXPECT_TRUE(a < c);
EXPECT_FALSE(c < a);
EXPECT_TRUE(a < d);
EXPECT_FALSE(d < a);
}
class SigBaseTest : public ::testing::Test, protected TestGraphs {
protected:
void BuildSigMap(const GraphDef& graph) {
gen_map_.clear();
sig_.map.clear();
CHECK(GenNode::BuildGraphInMap(graph, &gen_map_).ok());
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
}
static void CopyLinksPass2(
std::map<SigNode::LinkTag, SigNode::Link>* link_map, SigNode* node) {
node->CopyLinksPass2(link_map);
}
static void ComputeTopoHash0(SigNode* node) { node->ComputeTopoHash0(); }
static void ComputeTopoHash(int distance, SigNode* node) {
node->ComputeTopoHash(distance);
}
static size_t GetTopoHash(int distance, SigNode* node) {
return node->GetTopoHash(distance);
}
static size_t GetHighTopoHash(SigNode* node) {
return node->GetHighTopoHash();
}
static void ReHighTopoHash(SigNode* node) { node->ReHighTopoHash(); }
static SigNode::HashedPeerVector& RefHashedPeers(SigNode* node) {
return node->hashed_peers_;
}
static size_t& RefUniqueRank(SigNode* node) { return node->unique_rank_; }
static bool& RefHashIsFinal(SigNode* node) { return node->hash_is_final_; }
static std::vector<size_t>& RefTopoHash(SigNode* node) {
return node->topo_hash_;
}
static uint64_t& RefNodeMask(SigNode* node) { return node->node_mask_; }
static uint64_t& RefLastHashedNodes(SigNode* node) {
return node->last_hashed_nodes_;
}
static uint64_t& RefNextHashedNodes(SigNode* node) {
return node->next_hashed_nodes_;
}
static void PrepareNodes(Signature* signature) { signature->PrepareNodes(); }
static void FindUniqueHashes(size_t* next_node_id_p, Signature* signature) {
signature->FindUniqueHashes(next_node_id_p);
}
static void ComputeOneRound(size_t next_node_id, Signature* signature) {
signature->ComputeOneRound(next_node_id);
}
static void OrderLinks(Signature* signature) { signature->OrderLinks(); }
GenNodeMap gen_map_;
Signature sig_;
};
class SigNodeTest : public SigBaseTest {};
TEST_F(SigNodeTest, DuplicateHash) {
NodeDef node1 = MakeNodeConst("node1");
NodeDef node2 = MakeNodeConst("node2");
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
SigNode sn1(&node1);
SigNode sn2(&node2);
SigNode sn3(&node3);
constexpr size_t kSameHash = 999;
SigNode::Link link1;
link1.tag = SigNode::LinkTag(GenNode::Port(true, 0), GenNode::Port(false, 0));
link1.unique_hash = kSameHash;
link1.peers.emplace_back(&sn1);
SigNode::Link link2;
link2.tag = SigNode::LinkTag(GenNode::Port(true, 1), GenNode::Port(false, 0));
link2.unique_hash = kSameHash;
link2.peers.emplace_back(&sn2);
SigNode::Link link3;
link3.tag = SigNode::LinkTag(GenNode::Port(true, 2), GenNode::Port(false, 0));
link3.unique_hash = kSameHash;
link3.peers.emplace_back(&sn3);
std::map<SigNode::LinkTag, SigNode::Link> link_map;
link_map[link1.tag] = link1;
link_map[link2.tag] = link2;
link_map[link3.tag] = link3;
CopyLinksPass2(&link_map, &sn3);
auto& hl = sn3.hash_to_link();
EXPECT_THAT(hl, SizeIs(3));
std::map<SigNode::LinkTag, SigNode::Link> rehashed;
auto hlit = hl.begin();
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
ASSERT_THAT(rehashed, SizeIs(3));
auto rhit = rehashed.begin();
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link1.tag);
EXPECT_THAT(rhit->second.unique_hash, Eq(kSameHash));
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn1));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link2.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
size_t hash2 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn2));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link3.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
EXPECT_THAT(rhit->second.unique_hash, Ne(hash2));
size_t hash3 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn3));
auto& peers = sn3.hashed_peers();
EXPECT_THAT(peers, SizeIs(3));
auto peerit = peers.begin();
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(kSameHash));
EXPECT_THAT(peerit->peer, Eq(&sn1));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash2));
EXPECT_THAT(peerit->peer, Eq(&sn2));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash3));
EXPECT_THAT(peerit->peer, Eq(&sn3));
}
TEST_F(SigNodeTest, GetTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
RefHashIsFinal(&sn1) = true;
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(456));
EXPECT_THAT(GetHighTopoHash(&sn1), Eq(456));
}
TEST_F(SigNodeTest, ReTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
ReHighTopoHash(&sn1);
size_t expected_hash = 456;
CombineHash(1, &expected_hash);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(expected_hash));
}
TEST_F(SigNodeTest, ComputeTopoHash0) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefUniqueRank(&sn1) = 10;
RefNodeMask(&sn1) = 0x02;
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
RefLastHashedNodes(&sn1) = 0xFF;
RefNextHashedNodes(&sn1) = 0xFF;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(2, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
ComputeTopoHash0(&sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(1));
size_t exp_hval = std::hash<string>()(sn1.opcode());
CombineHash(1, &exp_hval);
CombineHash(1, &exp_hval);
CombineHash(2, &exp_hval);
CombineHash(3, &exp_hval);
CombineHash(3, &exp_hval);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(exp_hval));
}
TEST_F(SigNodeTest, ComputeTopoHashNotFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x38));
size_t exp_hash = 123;
size_t comm_hash;
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(10, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHash(20, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(30, &exp_hash);
CombineHash(comm_hash, &exp_hash);
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(exp_hash));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(3));
}
TEST_F(SigNodeTest, ComputeTopoHashFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
RefHashIsFinal(&sn1) = true;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(321));
}
TEST_F(SigNodeTest, EqualsOpcode) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
node2.set_op("Mul");
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsRank) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
RefUniqueRank(&sn1) = 1;
RefUniqueRank(&sn2) = 2;
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsLinkSize) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GraphDef graph2;
(*graph2.add_node()) = MakeNodeConst("node1");
auto node22 = graph2.add_node();
*node22 = MakeNodeMul("node2", "node1", "node1");
node22->add_input("node2");
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph2, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
EXPECT_FALSE(*sig_map2["node2"] == *sig_map1["node2"]);
}
TEST_F(SigNodeTest, EqualsLinks) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
SigNode* sn2 = sig_map2["node2"].get();
++RefHashedPeers(sn2)[0].link_hash;
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
--RefHashedPeers(sn2)[0].link_hash;
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
++RefUniqueRank(sig_map2["node1"].get());
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
}
class SignatureTest : public SigBaseTest {
protected:
static void InitPermutation(size_t size,
std::vector<size_t>* plain_permutation,
std::vector<size_t>* countdown) {
plain_permutation->clear();
countdown->clear();
for (size_t i = 0; i < size; ++i) {
plain_permutation->emplace_back(i);
countdown->emplace_back(size - 1 - i);
}
}
static void BuildPermutation(const std::vector<size_t>& plain_permutation,
const std::vector<size_t>& countdown,
std::vector<size_t>* result) {
*result = plain_permutation;
for (int i = 0; i < result->size(); ++i) {
std::swap((*result)[i], (*result)[i + countdown[i]]);
}
}
static bool CountDown(std::vector<size_t>* countdown) {
int pos;
for (pos = countdown->size() - 2; pos >= 0; --pos) {
if ((*countdown)[pos] > 0) {
--(*countdown)[pos];
break;
}
(*countdown)[pos] = (countdown->size() - 1 - pos);
}
return pos >= 0;
}
void TestGraphEveryWay(const GraphDef& graph) {
size_t graph_size = graph.node_size();
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
std::vector<size_t> plain_permutation;
std::vector<size_t> countdown;
InitPermutation(graph_size, &plain_permutation, &countdown);
std::set<string> signatures;
std::vector<size_t> permutation;
do {
BuildPermutation(plain_permutation, countdown, &permutation);
constexpr bool kDebugPermutation = false;
if (kDebugPermutation) {
string p;
for (int i = 0; i < permutation.size(); ++i) {
p.push_back('0' + permutation[i]);
}
LOG(INFO) << "Permutation: " << p;
}
std::vector<std::unique_ptr<SigNode>> hold(graph_size);
int idx;
sig_.nodes.clear();
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes before permutation:";
}
for (auto& entry : sig_.map) {
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
hold[idx++] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[permutation[idx++]]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
sig_.nodes.emplace_back(entry.second.get());
RefUniqueRank(entry.second.get()) = idx;
}
OrderLinks(&sig_);
ASSERT_THAT(sig_.Compute(), Eq(absl::OkStatus()));
signatures.insert(sig_.ToString());
EXPECT_THAT(sig_.sig_full, SizeIs(graph_size));
size_t hval = 0;
for (size_t ih : sig_.sig_full) {
EXPECT_THAT(ih, Gt(graph_size));
CombineHash(ih, &hval);
}
EXPECT_THAT(sig_.sig_short, Eq(hval));
idx = 0;
for (auto& entry : sig_.map) {
hold[permutation[idx++]] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after un-permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[idx++]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
}
} while (CountDown(&countdown));
for (const auto& s : signatures) {
LOG(INFO) << "Signature: " << s;
}
EXPECT_THAT(signatures, SizeIs(1));
}
};
TEST_F(SignatureTest, PrepareNodes) {
NodeDef node1 = MakeNodeConst("node1");
sig_.map["node1"] = std::make_unique<SigNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
sig_.map["node2"] = std::make_unique<SigNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
sig_.map["node3"] = std::make_unique<SigNode>(&node3);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(3));
int idx = 0;
for (const auto& entry : sig_.map) {
EXPECT_THAT(RefNodeMask(entry.second.get()), Eq(1 << idx))
<< " at index " << idx;
EXPECT_THAT(RefUniqueRank(entry.second.get()), Eq(static_cast<size_t>(~0)))
<< " at index " << idx;
EXPECT_THAT(RefHashIsFinal(entry.second.get()), false)
<< " at index " << idx;
EXPECT_THAT(RefTopoHash(entry.second.get()), SizeIs(1))
<< " at index " << idx;
++idx;
}
}
TEST_F(SignatureTest, FindUniqueHashesAllDifferent) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(900);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(800);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(600);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
size_t next = 1;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(4));
EXPECT_THAT(sig_.nodes[0], Eq(&sn1));
EXPECT_THAT(sig_.nodes[1], Eq(&sn4));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn2));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
ASSERT_THAT(RefTopoHash(&sn2), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn3), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn4), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn2)[0], Eq(4));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(3));
EXPECT_THAT(RefTopoHash(&sn4)[0], Eq(2));
EXPECT_THAT(sig_.sig_full, ElementsAre(600, 700, 800));
size_t exp_short_hash = 0;
CombineHash(600, &exp_short_hash);
CombineHash(700, &exp_short_hash);
CombineHash(800, &exp_short_hash);
EXPECT_THAT(sig_.sig_short, Eq(exp_short_hash));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicatesExceptOne) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(800);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(800);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn3));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn1));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn5));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(false));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(1));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicates) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(700);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(700);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn5));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn1));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn5)[0], Eq(1));
}
TEST_F(SignatureTest, ComputeOneRoundCircular) {
BuildSigMap(graph_circular_onedir_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
size_t hval = GetHighTopoHash(sig_.nodes[0]);
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(GetHighTopoHash(sig_.nodes[i]), Eq(hval)) << " at index " << i;
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefTopoHash(sig_.nodes[i]), SizeIs(4)) << " at index " << i;
}
}
TEST_F(SignatureTest, ComputeOneRoundLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
std::vector<size_t> hash_size;
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(4, 5, 5, 6, 6));
}
TEST_F(SignatureTest, ComputeOneRoundSplitLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
std::swap(sig_.nodes[0], sig_.nodes[2]);
ASSERT_THAT(RefNodeMask(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
RefHashIsFinal(sig_.nodes[0]) = true;
ComputeOneRound(1, &sig_);
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
std::vector<size_t> hash_size;
for (int i = 1; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(3, 3, 4, 4));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[4]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[4]), Eq(0x1C));
}
TEST_F(SignatureTest, OrderLinks) {
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph_for_link_order_, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
for (auto it = sig_.map.rbegin(); it != sig_.map.rend(); ++it) {
auto& entry = *it;
RefUniqueRank(entry.second.get()) = sig_.nodes.size();
sig_.nodes.emplace_back(entry.second.get());
}
string before = sig_.ToString();
EXPECT_THAT(before, Eq(
"0:Mul[i0:o0:5][i0:o0:4][i0:o1:4][i0:o2:3][i0:o2:2][i0:o3:2],"
"1:Mul[i0:o0:5][i0:o0:4][i0:o0:3][i0:o0:2],"
"2:Const,"
"3:Const,"
"4:Const,"
"5:Const,"
));
OrderLinks(&sig_);
string after = sig_.ToString();
EXPECT_THAT(after, Eq(
"0:Mul[i0:o0:4][i0:o0:5][i0:o1:4][i0:o2:2][i0:o2:3][i0:o3:2],"
"1:Mul[i0:o0:2][i0:o0:3][i0:o0:4][i0:o0:5],"
"2:Const,"
"3:Const,"
"4:Const,"
"5:Const,"
));
}
TEST_F(SignatureTest, GraphTooBig) {
GraphDef graph;
for (int i = 0; i <= Signature::kMaxGraphSize; ++i) {
(*graph.add_node()) = MakeNodeConst(absl::StrFormat("node%d", i));
}
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &gen_map_), Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
ASSERT_THAT(sig_.Compute(),
Eq(Status(absl::StatusCode::kInvalidArgument,
"A graph of 65 nodes is too big for signature "
"computation, the maximal supported node count is "
"64.")));
}
TEST_F(SignatureTest, ToString) {
BuildSigMap(graph_circular_onedir_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
for (int i = 0; i < 5; ++i) {
RefUniqueRank(sig_.nodes[i]) = i;
RefHashIsFinal(sig_.nodes[i]) = true;
}
string result = sig_.ToString();
ASSERT_THAT(result, Eq(
"0:Mul[i0:o0:4][i0:o0:4],"
"1:Mul[i0:o0:0][i0:o0:0],"
"2:Mul[i0:o0:1][i0:o0:1],"
"3:Mul[i0:o0:2][i0:o0:2],"
"4:Mul[i0:o0:3][i0:o0:3],"
));
}
TEST_F(SignatureTest, Permutation) {
std::vector<size_t> plain_permutation;
std::vector<size_t> countdown;
InitPermutation(5, &plain_permutation, &countdown);
std::set<string> results;
std::vector<size_t> permutation;
do {
BuildPermutation(plain_permutation, countdown, &permutation);
EXPECT_THAT(permutation, SizeIs(5));
string p;
for (int i = 0; i < permutation.size(); ++i) {
p.push_back('0' + permutation[i]);
}
LOG(INFO) << "Permutation: " << p;
results.insert(p);
} while (CountDown(&countdown));
EXPECT_THAT(results, SizeIs(5 * 4 * 3 * 2 * 1));
}
TEST_F(SignatureTest, ComputeCircularOneDir) {
TestGraphEveryWay(graph_circular_onedir_);
}
TEST_F(SignatureTest, ComputeCircularBiDir) {
TestGraphEveryWay(graph_circular_bidir_);
}
TEST_F(SignatureTest, ComputeLinear) { TestGraphEveryWay(graph_linear_); }
TEST_F(SignatureTest, ComputeMultiInput) {
TestGraphEveryWay(graph_multi_input_);
}
TEST_F(SignatureTest, ComputeAllOrNone) {
TestGraphEveryWay(graph_all_or_none_);
}
TEST_F(SignatureTest, ComputeCross) { TestGraphEveryWay(graph_small_cross_); }
TEST_F(SignatureTest, Equals) {
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph_circular_bidir_, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
Signature sig1;
sg1.ExtractForSignature(&sig1.map);
ASSERT_THAT(sig1.Compute(), Eq(absl::OkStatus()));
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph_circular_bidir_, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
Signature sig2;
sg2.ExtractForSignature(&sig2.map);
ASSERT_THAT(sig2.Compute(), Eq(absl::OkStatus()));
EXPECT_TRUE(sig1 == sig2);
++sig2.sig_short;
EXPECT_FALSE(sig1 == sig2);
--sig2.sig_short;
EXPECT_TRUE(sig1 == sig2);
++sig2.sig_full[0];
EXPECT_FALSE(sig1 == sig2);
--sig2.sig_full[0];
EXPECT_TRUE(sig1 == sig2);
std::swap(sig2.nodes[0], sig2.nodes[1]);
EXPECT_FALSE(sig1 == sig2);
std::swap(sig2.nodes[0], sig2.nodes[1]);
EXPECT_TRUE(sig1 == sig2);
sig2.nodes.emplace_back(sig2.nodes[0]);
EXPECT_FALSE(sig1 == sig2);
EXPECT_FALSE(sig2 == sig1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/sig_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/sig_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21b1cf8f-e548-408a-b9f8-3c49c1a4d992 | cpp | tensorflow/tensorflow | graph_analyzer | tensorflow/core/grappler/graph_analyzer/graph_analyzer.cc | tensorflow/core/grappler/graph_analyzer/graph_analyzer_test.cc | #include <deque>
#include <iostream>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GraphAnalyzer::GraphAnalyzer(const GraphDef& graph, int subgraph_size)
: graph_(graph), subgraph_size_(subgraph_size) {}
GraphAnalyzer::~GraphAnalyzer() {}
Status GraphAnalyzer::Run() {
if (subgraph_size_ > Signature::kMaxGraphSize) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrFormat("Subgraphs of %d nodes are not supported, "
"the maximal supported node count is %d.",
subgraph_size_, Signature::kMaxGraphSize));
}
Status st = BuildMap();
if (!st.ok()) {
return st;
}
FindSubgraphs();
DropInvalidSubgraphs();
st = CollateResult();
if (!st.ok()) {
return st;
}
return absl::OkStatus();
}
Status GraphAnalyzer::BuildMap() {
nodes_.clear();
return GenNode::BuildGraphInMap(graph_, &nodes_);
}
void GraphAnalyzer::FindSubgraphs() {
result_.clear();
if (subgraph_size_ < 1) {
return;
}
partial_.clear();
todo_.clear();
const Subgraph::Identity empty_parent;
for (const auto& node : nodes_) {
if (subgraph_size_ == 1) {
result_.ExtendParent(empty_parent, node.second.get());
} else {
todo_.push_back(partial_.ExtendParent(empty_parent, node.second.get()));
}
}
while (!todo_.empty()) {
ExtendSubgraph(todo_.front());
todo_.pop_front();
}
partial_.clear();
}
void GraphAnalyzer::ExtendSubgraph(Subgraph* parent) {
const int next_parent_id = parent->id().size() + 1;
bool will_complete = (next_parent_id == subgraph_size_);
SubgraphPtrSet& sg_set = will_complete ? result_ : partial_;
const GenNode* last_all_or_none_node = nullptr;
for (SubgraphIterator sit(parent); !sit.AtEnd(); sit.Next()) {
const GenNode* node = sit.GetNode();
GenNode::Port port = sit.GetPort();
const GenNode::LinkTarget& neighbor = sit.GetNeighbor();
if (node->AllInputsOrNone() && port.IsInbound() && !port.IsControl()) {
if (node != last_all_or_none_node) {
ExtendSubgraphAllOrNone(parent, node);
last_all_or_none_node = node;
}
sit.SkipPort();
} else if (neighbor.node->AllInputsOrNone() && !port.IsInbound() &&
!port.IsControl()) {
if (parent->id().find(neighbor.node) == parent->id().end()) {
ExtendSubgraphAllOrNone(parent, neighbor.node);
}
} else if (node->IsMultiInput(port)) {
ExtendSubgraphPortAllOrNone(parent, node, port);
sit.SkipPort();
} else if (neighbor.node->IsMultiInput(neighbor.port)) {
if (parent->id().find(neighbor.node) != parent->id().end()) {
continue;
}
ExtendSubgraphPortAllOrNone(parent, neighbor.node, neighbor.port);
} else {
Subgraph* sg = sg_set.ExtendParent(parent->id(), neighbor.node);
if (!will_complete && sg != nullptr) {
todo_.push_back(sg);
}
}
}
}
void GraphAnalyzer::ExtendSubgraphAllOrNone(Subgraph* parent,
const GenNode* node) {
Subgraph::Identity id = parent->id();
id.insert(node);
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::ExtendSubgraphPortAllOrNone(Subgraph* parent,
const GenNode* node,
GenNode::Port port) {
auto nbit = node->links().find(port);
if (nbit == node->links().end()) {
return;
}
Subgraph::Identity id = parent->id();
id.insert(node);
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::AddExtendedSubgraph(Subgraph* parent,
const Subgraph::Identity& id) {
if (id.size() == parent->id().size()) {
return;
}
auto sg = std::make_unique<Subgraph>(id);
SubgraphPtrSet& spec_sg_set =
(id.size() == subgraph_size_) ? result_ : partial_;
if (spec_sg_set.find(sg) != spec_sg_set.end()) {
return;
}
const int id_size = id.size();
if (id_size != subgraph_size_) {
todo_.push_back(sg.get());
}
spec_sg_set.insert(std::move(sg));
}
void GraphAnalyzer::DropInvalidSubgraphs() {
auto resit = result_.begin();
while (resit != result_.end()) {
if (HasInvalidMultiInputs(resit->get())) {
auto delit = resit;
++resit;
result_.erase(delit);
} else {
++resit;
}
}
}
bool GraphAnalyzer::HasInvalidMultiInputs(Subgraph* sg) {
for (auto const& node : sg->id()) {
if (!node->AllInputsOrNone()) {
continue;
}
bool anyIn = false;
bool anyOut = false;
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
if (sg->id().find(link.node) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
}
}
if (anyIn && anyOut) {
return true;
}
}
for (SubgraphIterator sit(sg); !sit.AtEnd(); sit.Next()) {
if (sit.GetNode()->IsMultiInput(sit.GetPort())) {
bool anyIn = false;
bool anyOut = false;
do {
GenNode* peer = sit.GetNeighbor().node;
if (sg->id().find(peer) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
} while (sit.NextIfSamePort());
if (anyIn && anyOut) {
return true;
}
}
}
return false;
}
Status GraphAnalyzer::CollateResult() {
ordered_collation_.clear();
collation_map_.clear();
for (const auto& it : result_) {
auto sig = std::make_unique<Signature>();
it->ExtractForSignature(&sig->map);
Status status = sig->Compute();
if (!status.ok()) {
return status;
}
auto& coll_entry = collation_map_[sig.get()];
if (coll_entry.sig == nullptr) {
coll_entry.sig = std::move(sig);
}
++coll_entry.count;
}
for (auto& entry : collation_map_) {
ordered_collation_.insert(&entry.second);
}
result_.clear();
return absl::OkStatus();
}
std::vector<string> GraphAnalyzer::DumpRawSubgraphs() {
std::vector<string> result;
for (const auto& it : result_) {
result.emplace_back(it->Dump());
}
return result;
}
std::vector<string> GraphAnalyzer::DumpSubgraphs() {
std::vector<string> result;
for (auto ptr : ordered_collation_) {
result.emplace_back(
absl::StrFormat("%d %s", ptr->count, ptr->sig->ToString()));
}
return result;
}
Status GraphAnalyzer::OutputSubgraphs() {
size_t total = 0;
for (auto ptr : ordered_collation_) {
std::cout << ptr->count << ' ' << ptr->sig->ToString() << '\n';
total += ptr->count;
}
std::cout << "Total: " << total << '\n';
if (std::cout.fail()) {
return Status(absl::StatusCode::kDataLoss, "Failed to write to stdout");
} else {
return absl::OkStatus();
}
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphAnalyzerTest : public ::testing::Test, protected TestGraphs {
protected:
Status BuildMap() { return gran_->BuildMap(); }
void FindSubgraphs() { gran_->FindSubgraphs(); }
void DropInvalidSubgraphs() { gran_->DropInvalidSubgraphs(); }
Status CollateResult() { return gran_->CollateResult(); }
void ExtendSubgraph(Subgraph* parent) { gran_->ExtendSubgraph(parent); }
void ExtendSubgraphPortAllOrNone(Subgraph* parent, GenNode* node,
GenNode::Port port) {
gran_->ExtendSubgraphPortAllOrNone(parent, node, port);
}
void ExtendSubgraphAllOrNone(Subgraph* parent, GenNode* node) {
gran_->ExtendSubgraphAllOrNone(parent, node);
}
std::vector<string> DumpRawSubgraphs() { return gran_->DumpRawSubgraphs(); }
std::vector<string> DumpPartials() {
std::vector<string> result;
for (const auto& it : gran_->partial_) {
result.emplace_back(it->Dump());
}
return result;
}
const GenNodeMap& GetNodes() { return gran_->nodes_; }
GenNode* GetNode(const string& name) { return gran_->nodes_.at(name).get(); }
SubgraphPtrSet& GetResult() { return gran_->result_; }
SubgraphPtrSet& GetPartial() { return gran_->partial_; }
std::deque<Subgraph*>& GetTodo() { return gran_->todo_; }
std::unique_ptr<GraphAnalyzer> gran_;
};
TEST_F(GraphAnalyzerTest, BuildMap) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
EXPECT_THAT(st, Eq(absl::OkStatus()));
auto& map = GetNodes();
EXPECT_THAT(map.find("node1"), Ne(map.end()));
EXPECT_THAT(map.find("node2"), Ne(map.end()));
EXPECT_THAT(map.find("node3"), Ne(map.end()));
}
TEST_F(GraphAnalyzerTest, BuildMapError) {
(*graph_3n_self_control_.add_node()) = MakeNodeConst("node1");
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs0) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 0);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(0));
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs1) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(3));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: BroadcastGradientArgs(node3)",
"1: Const(node1)",
"1: Sub(node2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphsTooLarge) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseIn) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto parent = std::make_unique<Subgraph>(Subgraph::Identity());
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(parent.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsIncomplete) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("add2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsMulti) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("add1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add1"),
GetNode("add2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("add1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add1), AddN(add2), Sub(sub)",
"1: AddN(add1), Const(const1_1), Const(const1_2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsNoControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass1"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSeparateControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("pass1"),
GenNode::Port(true, -1));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("pass2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: IdentityN(pass2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)",
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsAllOrNone) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("pass1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass1"),
GetNode("pass2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("pass1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: IdentityN(pass1), IdentityN(pass2), Sub(sub)",
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/graph_analyzer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/graph_analyzer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits