ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
b7c4b8a1-814a-467f-8bfa-161939660f06 | cpp | tensorflow/tensorflow | gen_node | tensorflow/core/grappler/graph_analyzer/gen_node.cc | tensorflow/core/grappler/graph_analyzer/gen_node_test.cc | #include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/graph_analyzer/hash_tools.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GenNode::GenNode(const NodeDef* node) : node_(node), op_(nullptr) {}
Status GenNode::BuildGraphInMap(const GraphDef& source, GenNodeMap* map) {
for (const auto& n : source.node()) {
const string& name = n.name();
if (map->find(name) != map->end()) {
return Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name '" + name + "'.");
}
(*map)[name] = std::make_unique<GenNode>(&n);
}
for (const auto& mapit : *map) {
Status st = mapit.second->ParseInputs(map);
if (!st.ok()) {
return st;
}
}
return absl::OkStatus();
}
Status GenNode::ParseInputs(const GenNodeMap* map) {
all_inputs_or_none_ = false;
Status st = OpRegistry::Global()->LookUpOpDef(opcode(), &op_);
if (!st.ok()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("Node '%s' contains an undefined operation '%s': %s",
name(), opcode(), st.message()));
}
int n_inputs = node_->input_size();
int n_named_inputs = op_->input_arg_size();
int n_multi_inputs = 0;
for (const auto& inarg : op_->input_arg()) {
if (!inarg.number_attr().empty() || !inarg.type_list_attr().empty()) {
++n_multi_inputs;
}
}
bool is_commutative = grappler::IsCommutative(*node_);
if (n_multi_inputs > 1 || (n_multi_inputs > 0 && n_named_inputs > 1)) {
is_commutative = false;
}
if (is_commutative) {
n_named_inputs = 1;
all_inputs_or_none_ = false;
} else if (n_multi_inputs > 0) {
all_inputs_or_none_ = true;
}
for (int i = 0; i < n_inputs; ++i) {
int other_position;
string other_name = ParseNodeName(node_->input(i), &other_position);
auto other_it = map->find(other_name);
if (other_it == map->end()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' input %d refers to a non-existing node '%s'.", name(),
i, other_name));
}
GenNode* other_node = other_it->second.get();
int this_position = other_position < 0 ? -1 : (is_commutative ? 0 : i);
if (this_position >= 0 && n_multi_inputs == 0 &&
this_position >= n_named_inputs) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"Node '%s' has a non-control input from '%s' at index %d but its "
"operation '%s' defines only %d inputs.",
name(), other_name, this_position, op_->name(), n_named_inputs));
}
Port this_port(true, this_position);
Port other_port(false, other_position);
links_[this_port].emplace_back(LinkTarget(other_node, other_port));
other_node->links_[other_port].emplace_back(LinkTarget(this, this_port));
}
return absl::OkStatus();
}
bool GenNode::IsMultiInput(Port port) const {
if (!port.IsInbound()) {
return false;
}
auto it = links_.find(port);
if (it == links_.end()) {
return false;
}
return (it->second.size() > 1);
}
GenNode::Port::operator string() const {
string result = this->IsInbound() ? "i" : "o";
if (this->IsControl()) {
result.append("C");
} else {
result.append(absl::StrFormat("%d", this->Id()));
}
return result;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
TEST(GenNodeTest, Port) {
{
GenNode::Port p(true, 100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(100));
}
{
GenNode::Port p(false, 0);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(false));
EXPECT_THAT(p.Id(), Eq(0));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(false));
EXPECT_THAT(p2.Id(), Eq(0));
}
{
GenNode::Port p(true, -100);
EXPECT_THAT(p.IsInbound(), Eq(true));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-100));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(true));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-100));
}
{
GenNode::Port p(false, -1);
EXPECT_THAT(p.IsInbound(), Eq(false));
EXPECT_THAT(p.IsControl(), Eq(true));
EXPECT_THAT(p.Id(), Eq(-1));
GenNode::Port p2 = GenNode::Port::Decode(p.Encoded());
EXPECT_THAT(p2.IsInbound(), Eq(false));
EXPECT_THAT(p2.IsControl(), Eq(true));
EXPECT_THAT(p2.Id(), Eq(-1));
}
}
TEST(GenNodeTest, ParseNodeNoInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre());
}
TEST(GenNodeTest, ParseNodeWithControl) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
node3.add_input("^node1");
node3.add_input("^node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]",
"oC: node3[iC]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"iC: node1[oC], node2[oC]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, -1)), Eq(true));
EXPECT_FALSE(gn1->AllInputsOrNone());
EXPECT_FALSE(gn2->AllInputsOrNone());
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeMul("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeAddN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0], node2[o0]"
));
EXPECT_THAT(gn2->IsMultiInput(GenNode::Port(false, 0)), Eq(false));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(true));
EXPECT_FALSE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputNotCommutative) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiInputList) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeIdentityN("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
ASSERT_THAT(gn3->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]"
));
EXPECT_THAT(gn3->IsMultiInput(GenNode::Port(true, 0)), Eq(false));
EXPECT_TRUE(gn3->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiMultiInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeConst("node4");
map["node4"] = std::make_unique<GenNode>(&node4);
NodeDef node5 =
MakeNodeQuantizedConcat("node5", "node1", "node2", "node3", "node4");
map["node5"] = std::make_unique<GenNode>(&node5);
auto gn1 = map["node1"].get();
auto gn2 = map["node2"].get();
auto gn3 = map["node3"].get();
auto gn4 = map["node4"].get();
auto gn5 = map["node5"].get();
ASSERT_THAT(gn5->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"o0: node5[i0]"
));
EXPECT_THAT(DumpLinkMap(gn2->links()), ElementsAre(
"o0: node5[i1]"
));
EXPECT_THAT(DumpLinkMap(gn3->links()), ElementsAre(
"o0: node5[i2]"
));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"o0: node5[i3]"
));
EXPECT_THAT(DumpLinkMap(gn5->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"i2: node3[o0]",
"i3: node4[o0]"
));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 1)), Eq(false));
EXPECT_THAT(gn5->IsMultiInput(GenNode::Port(true, 2)), Eq(false));
EXPECT_TRUE(gn5->AllInputsOrNone());
}
TEST(GenNodeTest, ParseNodeMultiOutput) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
NodeDef node4 = MakeNodeSub("node4", "node3:1", "node3:0");
map["node4"] = std::make_unique<GenNode>(&node4);
auto gn4 = map["node4"].get();
ASSERT_THAT(gn4->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn4->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]"
));
}
TEST(GenNodeTest, ParseNodeUndefinedOp) {
GenNodeMap map;
NodeDef node1;
node1.set_name("node1");
node1.set_op("Zzzx");
map["node1"] = std::make_unique<GenNode>(&node1);
const OpDef* opdef;
Status nested_error = OpRegistry::Global()->LookUpOpDef("Zzzx", &opdef);
auto gn = map["node1"].get();
ASSERT_THAT(
gn->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Node 'node1' contains an undefined operation 'Zzzx': ",
nested_error.message()))));
}
TEST(GenNodeTest, ParseNodeUnexpectedInputs) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
EXPECT_THAT(gn1->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node1' has a non-control "
"input from 'node1' at index 0 but its operation "
"'Const' defines only 0 inputs.")));
NodeDef node2 = MakeNodeConst("node2");
map["node2"] = std::make_unique<GenNode>(&node2);
NodeDef node3 = MakeNodeSub("node3", "node1", "node2");
map["node3"] = std::make_unique<GenNode>(&node3);
node3.add_input("node1");
auto gn3 = map["node3"].get();
EXPECT_THAT(gn3->ParseInputs(&map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Node 'node3' has a non-control "
"input from 'node1' at index 2 but its operation "
"'Sub' defines only 2 inputs.")));
}
TEST(GenNodeTest, ParseNodeControlInputsAlwaysOk) {
GenNodeMap map;
NodeDef node1 = MakeNodeConst("node1");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("^node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(gn1->ParseInputs(&map), Eq(absl::OkStatus()));
EXPECT_THAT(DumpLinkMap(gn1->links()), ElementsAre(
"iC: node1[oC]",
"oC: node1[iC]"
));
}
TEST(GenNodeTest, ParseNodeInvalidInput) {
GenNodeMap map;
NodeDef node1 = MakeNodeAddN("node1", "node2", "node3");
map["node1"] = std::make_unique<GenNode>(&node1);
node1.add_input("node1");
auto gn1 = map["node1"].get();
ASSERT_THAT(
gn1->ParseInputs(&map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node1' input 0 refers to a non-existing node 'node2'.")));
}
TEST(GenNodeTest, BuildGraphInMap) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
(*graph.add_node()) =
MakeNodeBroadcastGradientArgs("node3", "node1", "node2");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map), Eq(absl::OkStatus()));
ASSERT_THAT(map.find("node1"), Ne(map.end()));
ASSERT_THAT(map.find("node2"), Ne(map.end()));
ASSERT_THAT(map.find("node3"), Ne(map.end()));
EXPECT_THAT(map["node1"]->name(), Eq("node1"));
EXPECT_THAT(map["node2"]->name(), Eq("node2"));
EXPECT_THAT(map["node3"]->name(), Eq("node3"));
EXPECT_THAT(DumpLinkMap(map["node1"]->links()), ElementsAre(
"o0: node3[i0]"
));
EXPECT_THAT(DumpLinkMap(map["node2"]->links()), ElementsAre(
"i0: node3[o1]",
"i1: node3[o0]",
"o0: node3[i1]"
));
EXPECT_THAT(DumpLinkMap(map["node3"]->links()), ElementsAre(
"i0: node1[o0]",
"i1: node2[o0]",
"o0: node2[i1]",
"o1: node2[i0]"
));
}
TEST(GenNodeTest, BuildGraphInMapDuplicateNode) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeConst("node1");
GenNodeMap map;
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &map),
Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST(GenNodeTest, BuildGraphInMapParseError) {
GraphDef graph;
(*graph.add_node()) = MakeNodeConst("node1");
(*graph.add_node()) = MakeNodeSub("node2", "node3:1", "node3:0");
GenNodeMap map;
ASSERT_THAT(
GenNode::BuildGraphInMap(graph, &map),
Eq(Status(
absl::StatusCode::kInvalidArgument,
"Node 'node2' input 0 refers to a non-existing node 'node3'.")));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/gen_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/gen_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
08c7ac11-785d-49c7-8f9f-282729b8967d | cpp | tensorflow/tensorflow | single_machine | tensorflow/core/grappler/clusters/single_machine.cc | tensorflow/core/grappler/clusters/single_machine_test.cc | #include "tensorflow/core/grappler/clusters/single_machine.h"
#include <atomic>
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/training/queue_runner.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id.h"
#include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace grappler {
static std::atomic<bool> already_provisioned(false);
SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus)
: Cluster(timeout_s), expected_init_time_s_(0), closing_(false) {
VLOG(1) << "Number of CPU cores: " << num_cpu_cores
<< " Number of GPUs: " << num_gpus;
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
(*options_.config.mutable_device_count())["CPU"] = 1;
if (num_gpus > 0) {
(*options_.config.mutable_device_count())["GPU"] = num_gpus;
}
CHECK_GE(num_cpu_cores, 1);
options_.config.set_intra_op_parallelism_threads(num_cpu_cores);
options_.config.add_session_inter_op_thread_pool()->set_num_threads(
num_cpu_cores);
if (timeout_s > 0) {
options_.config.set_operation_timeout_in_ms(timeout_s * 1000);
}
}
SingleMachine::~SingleMachine() {
CloseSession(false ).IgnoreError();
thread_pool_.reset();
}
Status SingleMachine::Provision() {
if (already_provisioned) {
return absl::UnavailableError(
"Can't provision more than one single cluster at a time");
}
TF_RETURN_IF_ERROR(ResetSession());
std::vector<DeviceAttributes> devices;
TF_RETURN_IF_ERROR(session_->ListDevices(&devices));
for (const auto& dev : devices) {
DeviceProperties attr;
if (dev.device_type() == "CPU") {
attr = GetLocalCPUInfo();
} else if (dev.device_type() == "GPU") {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(dev.name(), &parsed)) {
return absl::InvalidArgumentError(
absl::StrCat("Not able to parse GPU device name: ", dev.name()));
}
TfDeviceId tf_device_id(parsed.id);
PlatformDeviceId platform_device_id;
Status s =
GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id);
if (!s.ok()) {
return absl::UnavailableError(
absl::StrCat("Unknown TF GPU device with id ", tf_device_id.value(),
": ", s.message()));
}
attr = GetLocalGPUInfo(platform_device_id);
} else if (dev.device_type().find("XLA") == string::npos) {
attr.set_type(dev.device_type());
}
attr.set_memory_size(dev.memory_limit());
devices_[dev.name()] = attr;
}
already_provisioned = true;
if (cpu_allocator_stats_enabled_) {
TF_RETURN_IF_ERROR(ClearAllocatorStats());
}
return absl::OkStatus();
}
Status SingleMachine::Initialize(const GrapplerItem& item) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &item.graph || last_graph_id_ != item.id) {
init_ops_ = item.init_ops;
expected_init_time_s_ = item.expected_init_time;
last_graph_ = nullptr;
queue_runner_defs_ = item.queue_runners;
last_graph_id_ = item.id;
}
return absl::OkStatus();
}
Status SingleMachine::Shutdown() {
TF_RETURN_IF_ERROR(ShutdownSession());
mutex_lock l(this->last_graph_mu_);
last_graph_ = nullptr;
already_provisioned = false;
return absl::OkStatus();
}
Status SingleMachine::Run(const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
mutex_lock l(this->last_graph_mu_);
if (last_graph_ != &graph_def) {
TF_RETURN_IF_ERROR(ResetSession());
TF_RETURN_IF_ERROR(session_->Create(graph_def));
if (!init_ops_.empty()) {
init_metadata_ = RunMetadata();
int64_t timeout_s = timeout_s_ + expected_init_time_s_;
TF_RETURN_IF_ERROR(
RunWithTimeout({}, init_ops_, &init_metadata_, timeout_s));
for (auto node : *init_metadata_.mutable_cost_graph()->mutable_node()) {
node.clear_compute_cost();
}
init_metadata_.clear_step_stats();
}
RunOptions queue_options = run_options_;
if (queue_options.trace_level() >= RunOptions::HARDWARE_TRACE) {
queue_options.set_trace_level(RunOptions::SOFTWARE_TRACE);
}
for (size_t i = 0; i < queue_runner_defs_.size(); ++i) {
std::unique_ptr<QueueRunner> queue_runner;
TF_RETURN_IF_ERROR(QueueRunner::New(queue_runner_defs_[i],
coordinator_.get(), &queue_runner));
TF_RETURN_IF_ERROR(queue_runner->StartAndCollectCostGraph(session_.get(),
queue_options));
TF_RETURN_IF_ERROR(coordinator_->RegisterRunner(std::move(queue_runner)));
TF_RETURN_IF_ERROR(coordinator_->GetStatus());
}
for (int i = 0; i < NumWarmupSteps(); ++i) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
}
if (metadata) {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, metadata));
CostGraphDef queue_costs;
TF_RETURN_IF_ERROR(coordinator_->ExportCostGraph(&queue_costs));
MergeCosts(metadata->mutable_cost_graph(), init_metadata_.cost_graph(),
queue_costs);
} else {
TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr));
}
last_graph_ = &graph_def;
return absl::OkStatus();
}
Status SingleMachine::EnablePeakMemoryStats() {
EnableCPUAllocatorStats();
cpu_allocator_stats_enabled_ = true;
return absl::OkStatus();
}
Status SingleMachine::GetPeakMemoryUsage(
std::unordered_map<string, uint64>* device_peak_memory) const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
device_peak_memory->clear();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
absl::optional<AllocatorStats> stats = allocator->GetStats();
(*device_peak_memory)[device->name()] =
(stats ? stats->peak_bytes_in_use : 0);
}
return absl::OkStatus();
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata) {
return RunWithTimeout(feed, fetch, run_metadata, timeout_s_);
}
Status SingleMachine::RunWithTimeout(
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch, RunMetadata* run_metadata,
int64_t timeout_s) {
{
mutex_lock l(close_mu_);
CHECK(!closing_);
}
auto status = std::make_shared<Status>();
auto local_metadata = std::make_shared<RunMetadata>();
const bool executed_in_time = ExecuteWithTimeout(
[this, status, local_metadata, feed, fetch]() {
*status = session_->Run(run_options_, feed, {}, fetch, nullptr,
local_metadata.get());
},
timeout_s * 1000, thread_pool_.get());
if (!executed_in_time) {
return absl::DeadlineExceededError(absl::StrCat(
"Failed to run the graph after ", timeout_s, " seconds, aborting"));
} else if (run_metadata && status->ok()) {
*run_metadata = *local_metadata;
}
return *status;
}
Status SingleMachine::CloseSession(bool use_timeout) {
if (!session_ || !thread_pool_) {
return absl::OkStatus();
}
{
mutex_lock l(close_mu_);
if (!closing_) {
closing_ = true;
}
}
const bool executed_in_time = ExecuteWithTimeout(
[&]() {
if (this->coordinator_) {
this->coordinator_->RequestStop().IgnoreError();
while (!this->coordinator_->AllRunnersStopped()) {
Env::Default()->SleepForMicroseconds(1000000);
}
this->session_->Close().IgnoreError();
this->coordinator_.reset();
} else {
this->session_->Close().IgnoreError();
}
mutex_lock l2(close_mu_);
closing_ = false;
},
use_timeout ? timeout_s_ * 1000 : -1, thread_pool_.get());
if (!executed_in_time) {
return absl::UnavailableError(
absl::StrCat("Failed to close the previous session after ", timeout_s_,
" seconds, aborting"));
}
return absl::OkStatus();
}
Status SingleMachine::ShutdownSession() {
TF_RETURN_IF_ERROR(CloseSession(true ));
auto n = std::make_shared<Notification>();
Env::Default()->SchedClosure([this, n]() {
thread_pool_.reset();
n->Notify();
});
int64_t timeout_us = 1000000ll * timeout_s_;
const bool notified = WaitForNotificationWithTimeout(n.get(), timeout_us);
if (!notified) {
return absl::UnavailableError(absl::StrCat(
"The session is still running graphs after ", timeout_s_, " seconds"));
}
return absl::OkStatus();
}
Status SingleMachine::ResetSession() {
if (session_) {
LOG(INFO) << "Cleaning up previous session";
TF_RETURN_IF_ERROR(ShutdownSession());
session_.reset();
}
LOG(INFO) << "Starting new session";
thread_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), SanitizeThreadSuffix("single_machine"), 2);
session_.reset(NewSession(options_));
if (!session_) {
return absl::UnknownError("Failed to create session");
}
coordinator_ = std::make_unique<Coordinator>();
device_set_ = std::make_unique<DeviceSet>();
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
for (auto d : device_mgr->ListDevices()) {
device_set_->AddDevice(d);
}
return absl::OkStatus();
}
void SingleMachine::MergeCosts(CostGraphDef* graph_costs,
const CostGraphDef& init_costs,
const CostGraphDef& queue_costs) {
graph_costs->mutable_node()->Reserve(graph_costs->node_size() +
init_costs.node_size() +
queue_costs.node_size());
std::unordered_set<string> nodes_seen;
int queue_costs_id_offset = graph_costs->node_size();
for (const auto& node : graph_costs->node()) {
nodes_seen.insert(node.name());
if (node.id() >= queue_costs_id_offset) {
queue_costs_id_offset = node.id() + 1;
}
}
int init_costs_id_offset = queue_costs_id_offset + queue_costs.node_size();
for (const auto& node : queue_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + queue_costs_id_offset);
if (new_node->id() >= init_costs_id_offset) {
init_costs_id_offset = new_node->id() + 1;
}
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
queue_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += queue_costs_id_offset;
}
}
for (const auto& node : init_costs.node()) {
if (nodes_seen.find(node.name()) != nodes_seen.end()) {
continue;
}
auto* new_node = graph_costs->add_node();
new_node->MergeFrom(node);
new_node->set_id(node.id() + init_costs_id_offset);
for (auto& input_info : *new_node->mutable_input_info()) {
input_info.set_preceding_node(input_info.preceding_node() +
init_costs_id_offset);
}
for (auto& control_input : *new_node->mutable_control_input()) {
control_input += init_costs_id_offset;
}
}
}
Status SingleMachine::ClearAllocatorStats() const {
if (!cpu_allocator_stats_enabled_) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation for CPU is not enabled.");
}
const DeviceMgr* device_mgr;
TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr));
std::vector<Device*> devices = device_mgr->ListDevices();
for (Device* device : devices) {
auto* allocator = device->GetAllocator(AllocatorAttributes());
if (!allocator->TracksAllocationSizes()) {
return Status(absl::StatusCode::kInvalidArgument,
"Tracking allocation is not enabled.");
}
if (!allocator->ClearStats()) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Clearing allocation stats is not supported for ",
device->name()));
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/single_machine.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class SingleMachineTest : public ::testing::Test {
public:
void SetUp() override {
#if TENSORFLOW_USE_ROCM
int timeout_s = 10;
#else
int timeout_s = 5;
#endif
#ifdef THREAD_SANITIZER
timeout_s *= 5;
#endif
cluster_ = std::make_unique<SingleMachine>(timeout_s, 3 ,
0 );
TF_CHECK_OK(cluster_->EnablePeakMemoryStats());
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
if (cluster_) {
TF_CHECK_OK(cluster_->Shutdown());
}
cluster_.reset();
}
protected:
std::unique_ptr<SingleMachine> cluster_;
};
TEST_F(SingleMachineTest, ClusterType) {
CHECK_EQ("single_machine", cluster_->type());
}
TEST_F(SingleMachineTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
const int64_t start_micros = Env::Default()->NowMicros();
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
const int64_t run_duration_micros =
Env::Default()->NowMicros() - start_micros;
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
EXPECT_LE(8, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
EXPECT_LE(0, node.compute_cost());
EXPECT_GE(run_duration_micros, node.compute_cost());
}
}
TEST_F(SingleMachineTest, Queue) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, true,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
}
TEST_F(SingleMachineTest, MultipleItems) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
for (int i = 0; i < 3; ++i) {
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata1;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata1));
RunMetadata metadata2;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata2));
EXPECT_LE(6, metadata1.cost_graph().node_size());
for (const auto& node : metadata1.cost_graph().node()) {
if (node.name()[0] == '_' || node.name().find("/_") != string::npos ||
node.name() == "queue") {
continue;
}
#ifndef INTEL_MKL
EXPECT_EQ(1, node.output_info_size());
#endif
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
}
for (int i = 0; i < metadata1.cost_graph().node_size(); ++i) {
metadata1.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata1.clear_step_stats();
}
for (int i = 0; i < metadata2.cost_graph().node_size(); ++i) {
metadata2.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0);
metadata2.clear_step_stats();
}
string s1;
::tensorflow::protobuf::TextFormat::PrintToString(metadata1, &s1);
string s2;
::tensorflow::protobuf::TextFormat::PrintToString(metadata2, &s2);
EXPECT_EQ(s1, s2);
}
}
TEST_F(SingleMachineTest, GraphOptimizations) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Const(root.WithOpName("zero"), 0.0f, {2, 3});
auto one = ops::Const(root.WithOpName("one"), 1.0f, {2, 3});
auto add = ops::Add(root.WithOpName("add"), zero, one);
auto square = ops::Square(root.WithOpName("square"), add);
auto new_shape = ops::Const(root.WithOpName("new_shape"), {3, -1}, {2});
auto reshaped = ops::Reshape(root.WithOpName("reshaped"), square, new_shape);
auto final_shape = ops::Shape(root.WithOpName("final_shape"), reshaped);
auto expected_shape =
ops::Const(root.WithOpName("expected_shape"), {3, 2}, {2});
auto valid =
ops::Equal(root.WithOpName("valid"), final_shape, expected_shape);
auto all_dims = ops::Const(root.WithOpName("all_dims"), {0}, {1});
auto all_valid = ops::All(root.WithOpName("all_valid"), valid, all_dims);
auto assert_valid = ops::Assert(root.WithOpName("assert_valid"), all_valid,
{final_shape.output});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("assert_valid");
for (auto& node : *item.graph.mutable_node()) {
node.set_device("/cpu:0");
}
TF_CHECK_OK(cluster_->Shutdown());
cluster_->DisableOptimizer(true);
TF_CHECK_OK(cluster_->Provision());
RunMetadata metadata;
TF_CHECK_OK(cluster_->Initialize(item));
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::set<string> cost_nodes;
for (const auto& node : metadata.cost_graph().node()) {
#ifdef INTEL_MKL
if (node.name()[0] == '_' || node.name().find("/_") != string::npos) {
continue;
}
cost_nodes.insert(node.name());
#else
if (node.name()[0] != '_') {
cost_nodes.insert(node.name());
}
#endif
}
const std::set<string> expected_cost_nodes = {
"zero", "one", "add", "square",
"new_shape", "reshaped", "final_shape", "expected_shape",
"valid", "all_dims", "all_valid", "assert_valid"};
EXPECT_EQ(expected_cost_nodes, cost_nodes);
}
TEST_F(SingleMachineTest, TimeOuts) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto q = ops::FIFOQueue(root.WithOpName("queue"), {DataType::DT_INT32});
auto dequeue =
ops::QueueDequeue(root.WithOpName("dequeue"), q, {DataType::DT_INT32});
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("dequeue");
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
Status s1 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s1));
Status s2 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata);
EXPECT_TRUE(errors::IsDeadlineExceeded(s2));
}
static void RunInfiniteTFLoop() {
GrapplerItem item;
NodeDef* shp = item.graph.add_node();
shp->set_name("shape");
shp->set_op("Const");
(*shp->mutable_attr())["dtype"].set_type(DT_INT32);
Tensor shp_tensor(DT_INT32, TensorShape({1}));
shp_tensor.flat<int32>()(0) = 1;
shp_tensor.AsProtoTensorContent(
(*shp->mutable_attr())["value"].mutable_tensor());
NodeDef* r = item.graph.add_node();
r->set_name("random");
r->set_op("RandomUniform");
(*r->mutable_attr())["dtype"].set_type(DT_FLOAT);
(*r->mutable_attr())["T"].set_type(DT_INT32);
*r->add_input() = "shape";
NodeDef* e = item.graph.add_node();
e->set_name("while/Enter");
e->set_op("Enter");
(*e->mutable_attr())["T"].set_type(DT_FLOAT);
(*e->mutable_attr())["frame_name"].set_s("while/while/");
*e->add_input() = "random";
NodeDef* m = item.graph.add_node();
m->set_name("while/Merge");
m->set_op("Merge");
(*m->mutable_attr())["T"].set_type(DT_FLOAT);
(*m->mutable_attr())["N"].set_i(2);
*m->add_input() = "while/Enter";
*m->add_input() = "while/NextIteration";
NodeDef* t = item.graph.add_node();
t->set_name("always_true");
t->set_op("Const");
(*t->mutable_attr())["dtype"].set_type(DT_BOOL);
*t->add_input() = "^while/Merge";
Tensor true_tensor(DT_BOOL, TensorShape());
true_tensor.flat<bool>()(0) = true;
true_tensor.AsProtoTensorContent(
(*t->mutable_attr())["value"].mutable_tensor());
NodeDef* c = item.graph.add_node();
c->set_name("while/LoopCond");
c->set_op("LoopCond");
*c->add_input() = "always_true";
NodeDef* s = item.graph.add_node();
s->set_name("while/Switch");
(*s->mutable_attr())["T"].set_type(DT_FLOAT);
s->set_op("Switch");
*s->add_input() = "while/Merge";
*s->add_input() = "while/LoopCond";
NodeDef* i = item.graph.add_node();
i->set_name("while/Identity");
i->set_op("Identity");
(*i->mutable_attr())["T"].set_type(DT_FLOAT);
*i->add_input() = "while/Switch:1";
NodeDef* n = item.graph.add_node();
n->set_name("while/NextIteration");
n->set_op("NextIteration");
(*n->mutable_attr())["T"].set_type(DT_FLOAT);
*n->add_input() = "while/Identity";
NodeDef* x = item.graph.add_node();
x->set_name("while/Exit");
x->set_op("Exit");
(*x->mutable_attr())["T"].set_type(DT_FLOAT);
*x->add_input() = "while/Switch";
item.fetch.push_back("while/Exit");
SingleMachine cluster(5, 3, 0);
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
Status s1 = cluster.Run(item.graph, item.feed, item.fetch, nullptr);
if (!errors::IsDeadlineExceeded(s1)) {
LOG(ERROR) << "Expected 'deadline exceeded' error, got " << s1;
_exit(1);
}
Status s2 = cluster.Shutdown();
if (!errors::IsUnavailable(s2)) {
LOG(ERROR) << "Expected 'unavailable' error, got " << s2;
_exit(2);
}
_exit(0);
}
TEST_F(SingleMachineTest, InfiniteLoops) {
#if !(TENSORFLOW_USE_ROCM)
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EXIT(RunInfiniteTFLoop(), ::testing::ExitedWithCode(0), ".*");
#endif
}
TEST_F(SingleMachineTest, InitializationMemory) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
int batch_size = 10;
Output x =
ops::RandomNormal(s.WithOpName("x"), {batch_size, 1}, DataType::DT_FLOAT);
Output v = ops::Variable(s.WithOpName("v"), TensorShape({batch_size, 1}),
DataType::DT_FLOAT);
Output init = ops::Assign(s.WithOpName("init"), v, x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.init_ops.push_back(init.name());
item.fetch.push_back(v.name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found = false;
for (const auto& node : metadata.cost_graph().node()) {
found |= (node.name() == NodeName(init.name()));
}
EXPECT_TRUE(found);
}
namespace {
template <class T>
inline void SetNodeAttr(const string& key, const T& value, NodeDef* node) {
AttrValue attr_value;
SetAttrValue(value, &attr_value);
auto* attr_map = node->mutable_attr();
(*attr_map)[key] = attr_value;
}
template <>
inline void SetNodeAttr(const string& key, const Tensor& tensor,
NodeDef* node) {
TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
SetNodeAttr(key, tensor_proto, node);
}
}
TEST_F(SingleMachineTest, PersistentMemory) {
GrapplerItem item;
const DataType key_dtype = DT_INT64;
const DataType data_dtype = DT_INT64;
NodeDef* hashtable_node = item.graph.add_node();
hashtable_node->set_op("HashTable");
hashtable_node->set_name("hash_table");
SetNodeAttr("key_dtype", key_dtype, hashtable_node);
SetNodeAttr("value_dtype", data_dtype, hashtable_node);
NodeDef* keys_node = item.graph.add_node();
keys_node->set_op("Const");
keys_node->set_name("table_keys");
SetNodeAttr("dtype", key_dtype, keys_node);
Tensor keys(key_dtype, TensorShape{2});
keys.vec<int64_t>()(0) = 123;
keys.vec<int64_t>()(1) = 321;
SetNodeAttr("value", keys, keys_node);
NodeDef* values_node = item.graph.add_node();
values_node->set_op("Const");
values_node->set_name("table_values");
SetNodeAttr("dtype", data_dtype, values_node);
Tensor values(data_dtype, TensorShape{2});
values.vec<int64_t>()(0) = 789;
values.vec<int64_t>()(1) = 987;
SetNodeAttr("value", values, values_node);
NodeDef* init_table_node = item.graph.add_node();
init_table_node->set_op("InitializeTable");
init_table_node->set_name("initialize_table");
SetNodeAttr("Tkey", key_dtype, init_table_node);
SetNodeAttr("Tval", data_dtype, init_table_node);
*init_table_node->add_input() = "hash_table";
*init_table_node->add_input() = "table_keys";
*init_table_node->add_input() = "table_values";
item.init_ops.push_back(init_table_node->name());
NodeDef* query_node = item.graph.add_node();
query_node->set_op("Const");
query_node->set_name("query");
SetNodeAttr("dtype", key_dtype, query_node);
Tensor query(key_dtype, TensorShape({}));
query.flat<int64_t>()(0) = 0;
SetNodeAttr("value", query, query_node);
NodeDef* default_value_node = item.graph.add_node();
default_value_node->set_op("Const");
default_value_node->set_name("default_table_value");
SetNodeAttr("dtype", data_dtype, default_value_node);
Tensor dflt(data_dtype, TensorShape({}));
dflt.flat<int64_t>()(0) = 456;
SetNodeAttr("value", dflt, default_value_node);
NodeDef* lookup_node = item.graph.add_node();
lookup_node->set_op("LookupTableFind");
lookup_node->set_name("table_lookup");
SetNodeAttr("Tin", key_dtype, lookup_node);
SetNodeAttr("Tout", data_dtype, lookup_node);
*lookup_node->add_input() = "hash_table";
*lookup_node->add_input() = "query";
*lookup_node->add_input() = "default_table_value";
item.fetch.push_back(lookup_node->name());
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
bool found_table_init = false;
bool found_hashtable = false;
for (const auto& node : metadata.cost_graph().node()) {
if (node.name() == "hash_table") {
found_hashtable = true;
EXPECT_EQ(0, node.persistent_memory_size());
} else if (node.name() == "initialize_table") {
found_table_init = true;
EXPECT_LE(4 * sizeof(int64_t), node.persistent_memory_size());
}
}
EXPECT_TRUE(found_table_init);
EXPECT_TRUE(found_hashtable);
}
GrapplerItem CreateGrapplerItemWithResourceMemory() {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), TensorShape({128, 256}),
DataType::DT_FLOAT);
Output a_init =
ops::RandomNormal(s.WithOpName("a/init"), {128, 256}, DataType::DT_FLOAT);
Output a_init_assign = ops::Assign(s.WithOpName("a/init/assign"), a, a_init);
Output b =
ops::VarHandleOp(s.WithOpName("b"), DataType::DT_FLOAT, {256, 512});
Output b_read =
ops::ReadVariableOp(s.WithOpName("b/read"), b, DataType::DT_FLOAT);
Output b_init =
ops::RandomNormal(s.WithOpName("b/init"), {256, 512}, DataType::DT_FLOAT);
auto b_init_assign =
ops::AssignVariableOp(s.WithOpName("b/init/assign"), b, b_init);
ops::FIFOQueue queue(s.WithOpName("queue"), {DataType::DT_STRING});
Output some_string =
ops::Const(s.WithOpName("some_string"), string("nothing"));
ops::QueueEnqueue enqueue(s.WithOpName("enqueue"), queue, {some_string});
ops::QueueDequeue dequeue(s.WithOpName("dequeue"), queue,
{DataType::DT_STRING});
ops::IdentityReader reader(s.WithOpName("identity_reader"));
ops::ReaderRead read(s.WithOpName("read_from_queue"), reader, queue);
Output var_mul = ops::MatMul(s.WithOpName("var_matmul"), a, b_read);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
QueueRunnerDef queue_runner;
queue_runner.set_queue_name("queue");
*queue_runner.add_enqueue_op_name() = "enqueue";
item.queue_runners.push_back(queue_runner);
item.init_ops.push_back("a/init/assign");
item.init_ops.push_back("b/init/assign");
item.fetch.push_back("var_matmul");
item.fetch.push_back("dequeue");
return item;
}
#if defined(PLATFORM_GOOGLE)
TEST_F(SingleMachineTest, ReleaseMemoryAfterDestruction) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
std::unordered_map<string, uint64> device_peak_memory_before;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_before));
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
EXPECT_EQ(device_peak_memory.size(), 1);
EXPECT_GT(device_peak_memory.begin()->second, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
std::unordered_map<string, uint64> device_peak_memory_after;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_after));
TF_CHECK_OK(cluster_->Shutdown());
EXPECT_EQ(device_peak_memory_before.size(), 1);
EXPECT_EQ(device_peak_memory_after.size(), 1);
EXPECT_LT(device_peak_memory_before.begin()->second, 400);
EXPECT_LT(device_peak_memory_after.begin()->second, 400);
}
TEST_F(SingleMachineTest, PeakMemory) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
std::unordered_map<string, uint64> device_peak_memory;
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
uint64 cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_GT(cpu_memory, 0);
TF_CHECK_OK(cluster_->Shutdown());
TF_CHECK_OK(cluster_->Provision());
device_peak_memory.clear();
TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory));
TF_CHECK_OK(cluster_->Shutdown());
ASSERT_NE(
device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"),
device_peak_memory.end());
cpu_memory =
device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"];
EXPECT_LT(cpu_memory, 200);
}
TEST_F(SingleMachineTest, PeakMemoryStatsNotEnabled) {
GrapplerItem item = CreateGrapplerItemWithResourceMemory();
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
SingleMachine cluster(60 , 3 ,
0 );
TF_CHECK_OK(cluster.Provision());
TF_CHECK_OK(cluster.Initialize(item));
std::unordered_map<string, uint64> device_peak_memory;
Status s = cluster.GetPeakMemoryUsage(&device_peak_memory);
TF_CHECK_OK(cluster.Shutdown());
ASSERT_FALSE(s.ok());
EXPECT_TRUE(errors::IsInvalidArgument(s));
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/single_machine.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/single_machine_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b6c598b-463d-44f0-8dff-85cc24436344 | cpp | tensorflow/tensorflow | virtual_cluster | tensorflow/core/grappler/clusters/virtual_cluster.cc | tensorflow/core/grappler/clusters/virtual_cluster_test.cc | #include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/clusters/utils.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
namespace tensorflow {
namespace grappler {
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices)
: VirtualCluster(devices, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady")) {}
VirtualCluster::VirtualCluster(
const std::unordered_map<string, DeviceProperties>& devices,
std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager)
: Cluster(0) {
devices_ = devices;
estimator_ = std::make_unique<AnalyticalCostEstimator>(
this, std::move(node_estimator), std::move(node_manager),
true, false);
}
VirtualCluster::VirtualCluster(const DeviceSet* device_set)
: VirtualCluster(std::unordered_map<string, DeviceProperties>()) {
device_set_ = device_set;
for (const auto& device : device_set_->devices()) {
DeviceProperties props = GetDeviceInfo(device->parsed_name());
if (props.type() == "UNKNOWN") continue;
auto attrs = device->attributes();
props.set_memory_size(attrs.memory_limit());
devices_[device->name()] = props;
}
}
VirtualCluster::~VirtualCluster() {}
Status VirtualCluster::Provision() { return absl::OkStatus(); }
Status VirtualCluster::Initialize(const GrapplerItem& item) {
return absl::OkStatus();
}
Status VirtualCluster::Run(const GraphDef& graph,
const std::vector<std::pair<string, Tensor>>& feed,
const std::vector<string>& fetch,
RunMetadata* metadata) {
GrapplerItem item;
item.graph = graph;
item.feed = feed;
item.fetch = fetch;
return Run(item, metadata);
}
Status VirtualCluster::Run(const GrapplerItem& item, RunMetadata* metadata) {
if (metadata) {
metadata->clear_step_stats();
metadata->clear_cost_graph();
metadata->clear_partition_graphs();
}
TF_RETURN_IF_ERROR(estimator_->Initialize(item));
TF_RETURN_IF_ERROR(
estimator_->PredictCosts(item.graph, metadata, nullptr));
const std::unordered_map<string, DeviceProperties>& device = GetDevices();
std::unordered_map<string, int64_t> peak_mem_usage =
estimator_->GetScheduler()->GetPeakMemoryUsage();
for (const auto& mem_usage : peak_mem_usage) {
const string& device_name = mem_usage.first;
auto it = device.find(device_name);
if (it == device.end()) {
continue;
}
const DeviceProperties& dev = it->second;
if (dev.memory_size() <= 0) {
continue;
}
int64_t peak_mem = mem_usage.second;
if (peak_mem >= dev.memory_size()) {
return errors::ResourceExhausted(
"Graph requires ", peak_mem, " bytes of memory on device ",
device_name, " to run ", " but device only has ", dev.memory_size(),
" available.");
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include <memory>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class VirtualClusterTest : public ::testing::Test {
public:
void SetUp() override {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override {
TF_CHECK_OK(cluster_->Shutdown());
cluster_.reset();
}
protected:
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(VirtualClusterTest, ClusterType) {
CHECK_EQ("virtual", cluster_->type());
}
TEST_F(VirtualClusterTest, CostModel) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_CHECK_OK(cluster_->Initialize(item));
RunMetadata metadata;
TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata));
EXPECT_LE(4, metadata.cost_graph().node_size());
for (const auto& node : metadata.cost_graph().node()) {
if (node.name().find("Const/Const") != string::npos) {
continue;
}
EXPECT_EQ(1, node.output_info_size());
EXPECT_EQ(40, node.output_info(0).size());
const TensorShapeProto& shape = node.output_info(0).shape();
EXPECT_EQ(2, shape.dim_size());
EXPECT_EQ(10, shape.dim(0).size());
EXPECT_EQ(1, shape.dim(1).size());
if (node.name() == "x") {
EXPECT_EQ(1500, node.compute_cost());
} else {
EXPECT_EQ(2500, node.compute_cost());
}
}
for (const auto& dev_stat : metadata.step_stats().dev_stats()) {
EXPECT_EQ("/job:localhost/replica:0/task:0/cpu:0", dev_stat.device());
for (const auto& node : dev_stat.node_stats()) {
if (node.node_name() == "AddN") {
EXPECT_EQ(2500, node.op_end_rel_micros());
}
}
}
}
TEST_F(VirtualClusterTest, OutOfMemory) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
auto zero = ops::Variable(root.WithOpName("zero"), {1024, 1024}, DT_FLOAT);
auto identity = ops::Identity(root.WithOpName("i"), zero);
auto identity2 = ops::Identity(root.WithOpName("i2"), identity);
GrapplerItem item;
TF_CHECK_OK(root.ToGraphDef(&item.graph));
item.fetch.push_back("i2");
TF_CHECK_OK(cluster_->Initialize(item));
Status s = cluster_->Run(item.graph, item.feed, item.fetch, nullptr);
EXPECT_EQ(error::RESOURCE_EXHAUSTED, s.code());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/virtual_cluster.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/virtual_cluster_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8516a4c9-8e79-4fdf-822d-a8ebae3ce68e | cpp | tensorflow/tensorflow | structure_verifier | tensorflow/core/grappler/verifiers/structure_verifier.cc | tensorflow/core/grappler/verifiers/structure_verifier_test.cc | #include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <string>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/verifiers/graph_verifier.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
Status StructureVerifier::Verify(const GraphDef& graph) {
StatusGroup status_group;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.library());
status_group.Update(tensorflow::graph::ValidateGraphDefAgainstOpRegistry(
graph, function_library));
status_group.Update(tensorflow::graph::VerifyNoDuplicateNodeNames(graph));
std::vector<const NodeDef*> topo_order;
status_group.Update(ComputeTopologicalOrder(graph, &topo_order));
return status_group.as_concatenated_status();
}
}
} | #include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <memory>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/parsing_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class StructureVerifierTest : public ::testing::Test {
protected:
StructureVerifierTest() { verifier_ = std::make_unique<StructureVerifier>(); }
void SetGraph(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &graph_));
}
GraphDef graph_;
std::unique_ptr<StructureVerifier> verifier_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
TEST_F(StructureVerifierTest, ValidGraphs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GraphDef graph;
TF_CHECK_OK(s.ToGraphDef(&graph));
TF_EXPECT_OK(verifier_->Verify(graph));
SetGraph(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
TF_EXPECT_OK(verifier_->Verify(graph_));
}
TEST_F(StructureVerifierTest, OpNotRegistered) {
SetGraph(
"node { name: 'input' op: 'OpNotRegistered' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsNotFound(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Op type not registered"));
}
TEST_F(StructureVerifierTest, DuplicateNodeNames) {
SetGraph(
"node { name: 'A' op: 'TestParams' }"
"node { name: 'A' op: 'TestInput' }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsAlreadyExists(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Node already exists:"));
}
TEST_F(StructureVerifierTest, GraphWithInvalidCycle) {
SetGraph(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StrContains(
status.message(), "The graph couldn't be sorted in topological order"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/verifiers/structure_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/verifiers/structure_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
410e7e92-82a6-4242-b4e3-ef2747fc3700 | cpp | tensorflow/tensorflow | canonicalizer | tensorflow/core/grappler/utils/canonicalizer.cc | tensorflow/core/grappler/utils/canonicalizer_test.cc | #include "tensorflow/core/grappler/utils/canonicalizer.h"
#include <algorithm>
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
void CanonicalizeNode(NodeDef* node) {
if (node->input_size() < 2) return;
int index = 0;
for (; index < node->input_size(); ++index) {
if (IsControlInput(node->input(index))) {
break;
}
}
auto* input = node->mutable_input();
if (IsCommutative(*node) && index > 0) {
std::sort(input->begin(), input->begin() + index);
}
if (index < node->input_size()) {
std::sort(input->begin() + index, input->end());
input->erase(std::unique(input->begin() + index, input->end()),
input->end());
}
}
void CanonicalizeGraph(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
CanonicalizeNode(graph->mutable_node(i));
}
}
void CompressConstants(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if ((IsConstant(*node) || IsHostConstant(*node)) &&
HasNodeAttr(*node, "value")) {
AttrValue& attr_val = (*node->mutable_attr())["value"];
if (attr_val.has_tensor()) {
tensor::CompressTensorProtoInPlace(attr_val.mutable_tensor());
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeNode(const string& op) {
NodeDef node;
node.set_name("node");
node.set_op(op);
*node.add_input() = "b";
*node.add_input() = "a";
*node.add_input() = "^z";
*node.add_input() = "^y";
*node.add_input() = "^x";
*node.add_input() = "^z";
return node;
}
void Verify(const NodeDef& node) {
EXPECT_EQ(node.name(), "node");
ASSERT_EQ(node.input_size(), 5);
if (node.op() == "Div") {
EXPECT_EQ(node.input(0), "b");
EXPECT_EQ(node.input(1), "a");
} else {
EXPECT_EQ(node.input(0), "a");
EXPECT_EQ(node.input(1), "b");
}
EXPECT_EQ(node.input(2), "^x");
EXPECT_EQ(node.input(3), "^y");
EXPECT_EQ(node.input(4), "^z");
}
TEST(CanonicalizeNode, NonCommutative) {
NodeDef node = MakeNode("Div");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeNode, Commutative) {
NodeDef node = MakeNode("Mul");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeGraph, Simple) {
GraphDef graph;
*graph.add_node() = MakeNode("Div");
*graph.add_node() = MakeNode("Mul");
CanonicalizeGraph(&graph);
for (auto node : graph.node()) {
Verify(node);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/canonicalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/canonicalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fadef9d-22c1-478a-82f1-11ef5eabfa35 | cpp | tensorflow/tensorflow | symbolic_shapes | tensorflow/core/grappler/utils/symbolic_shapes.cc | tensorflow/core/grappler/utils/symbolic_shapes_test.cc | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include <unordered_map>
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace grappler {
namespace {
BCast::Vec ShapeDims(const TensorShapeProto& shape) {
BCast::Vec dims;
dims.reserve(shape.dim_size());
for (int i = 0; i < shape.dim_size(); ++i)
dims.push_back(shape.dim(i).size());
return dims;
}
}
bool IsKnown(const TensorShapeProto::Dim& dim) { return dim.size() >= 0; }
bool IsKnownSymbolically(const TensorShapeProto::Dim& dim) {
return dim.size() <= -2;
}
bool IsUnknown(const TensorShapeProto::Dim& dim) { return dim.size() == -1; }
bool ShapeIsSymbolicallyDefined(const TensorShapeProto& shape) {
return !shape.unknown_rank() &&
std::all_of(
shape.dim().begin(), shape.dim().end(),
[](const TensorShapeProto::Dim& dim) { return !IsUnknown(dim); });
}
bool ShapeIsSymbolicallyDefined(const OpInfo::TensorProperties& properties) {
return ShapeIsSymbolicallyDefined(properties.shape());
}
int Rank(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
return shape.dim_size();
}
int64_t NumCoefficients(const TensorShapeProto& shape) {
if (shape.unknown_rank()) {
return -1;
}
int64_t num_coefficients = 1;
for (const auto& dim : shape.dim()) {
if (dim.size() < 0) {
return -1;
}
num_coefficients *= dim.size();
}
return num_coefficients;
}
bool ShapesSymbolicallyEqual(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank() ||
left.dim_size() != right.dim_size()) {
return false;
}
for (int i = 0; i < left.dim_size(); ++i) {
const auto& ldim = left.dim(i);
const auto& rdim = right.dim(i);
if (IsUnknown(ldim) || IsUnknown(rdim) || ldim.size() != rdim.size()) {
return false;
}
}
return true;
}
bool ShapesSymbolicallyEqual(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesSymbolicallyEqual(left.shape(), right.shape());
}
bool ShapesBroadcastable(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
return bcast.IsValid();
}
bool ShapesBroadcastable(const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return ShapesBroadcastable(left.shape(), right.shape());
}
bool ShapeAfterBroadcast(const TensorShapeProto& left,
const TensorShapeProto& right,
TensorShapeProto* output_shape) {
if (!ShapeIsSymbolicallyDefined(left) || !ShapeIsSymbolicallyDefined(right)) {
return false;
}
BCast bcast(ShapeDims(left), ShapeDims(right),
false);
if (!bcast.IsValid()) {
return false;
}
output_shape->set_unknown_rank(false);
output_shape->clear_dim();
for (const auto& dim : bcast.output_shape()) {
output_shape->add_dim()->set_size(dim);
}
return true;
}
bool CompareSymbolicallyShapedTensorSizes(const TensorShapeProto& left,
const TensorShapeProto& right) {
if (left.unknown_rank() || right.unknown_rank()) {
return false;
}
int64_t left_defined_size = 1;
int64_t right_defined_size = 1;
std::unordered_map<int64_t, int64_t> left_unknown_dims;
std::unordered_map<int64_t, int64_t> right_unknown_dims;
int64_t unknown_dim_id = 1;
auto process_dimensions =
[&unknown_dim_id](const TensorShapeProto& shape, int64* defined_size,
std::unordered_map<int64, int64>* unknown_dims) {
for (int i = 0; i < shape.dim_size(); ++i) {
const auto& dim = shape.dim(i);
int64_t dim_size = dim.size();
if (dim_size > 0) {
*defined_size *= dim_size;
} else if (IsUnknown(dim)) {
++(*unknown_dims)[unknown_dim_id++];
} else if (IsKnownSymbolically(dim)) {
++(*unknown_dims)[dim_size];
}
}
};
process_dimensions(left, &left_defined_size, &left_unknown_dims);
process_dimensions(right, &right_defined_size, &right_unknown_dims);
std::set<int64_t> unknown_dims;
for (const auto& el : left_unknown_dims) unknown_dims.insert(el.first);
for (const auto& el : right_unknown_dims) unknown_dims.insert(el.first);
for (int64_t unknown_dim : unknown_dims) {
int64_t co_occurrence = std::min(left_unknown_dims[unknown_dim],
right_unknown_dims[unknown_dim]);
left_unknown_dims[unknown_dim] -= co_occurrence;
right_unknown_dims[unknown_dim] -= co_occurrence;
}
int64_t left_unbalanced_unknown_dims = 0;
int64_t right_unbalanced_unknown_dims = 0;
for (const auto& el : left_unknown_dims)
left_unbalanced_unknown_dims += el.second;
for (const auto& el : right_unknown_dims)
right_unbalanced_unknown_dims += el.second;
if (left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims == 0) {
return left_defined_size < right_defined_size;
}
if (left_defined_size <= right_defined_size &&
left_unbalanced_unknown_dims == 0 && right_unbalanced_unknown_dims > 0) {
return true;
}
return false;
}
bool CompareSymbolicallyShapedTensorSizes(
const OpInfo::TensorProperties& left,
const OpInfo::TensorProperties& right) {
return CompareSymbolicallyShapedTensorSizes(left.shape(), right.shape());
}
int64_t ComputeSizeRatio(const TensorShapeProto& numerator,
const TensorShapeProto& denominator) {
if (numerator.unknown_rank() || denominator.unknown_rank()) {
return -1;
}
std::multiset<int> symbolic_dims;
int64_t num = 1;
for (const auto& dim : numerator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
symbolic_dims.insert(dim.size());
} else {
num *= dim.size();
}
}
int64_t denom = 1;
for (const auto& dim : denominator.dim()) {
if (dim.size() == -1) {
return -1;
} else if (dim.size() < -1) {
auto it = symbolic_dims.find(dim.size());
if (it == symbolic_dims.end()) {
return -1;
}
symbolic_dims.erase(it);
} else {
denom *= dim.size();
}
}
if (denom == 0) {
return -1;
}
if (!symbolic_dims.empty()) {
return -1;
}
return num / denom;
}
}
} | #include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SymbolicShapesTest : public ::testing::Test {
protected:
TensorShapeProto MakeUnknown() {
TensorShapeProto shape;
shape.set_unknown_rank(true);
return shape;
}
TensorShapeProto MakeShape(std::vector<int> dims) {
TensorShapeProto shape;
for (int dim_size : dims) {
TensorShapeProto::Dim dim;
dim.set_size(dim_size);
*shape.add_dim() = dim;
}
return shape;
}
};
bool operator<(const TensorShapeProto& lhs, const TensorShapeProto& rhs) {
return CompareSymbolicallyShapedTensorSizes(lhs, rhs);
}
TEST_F(SymbolicShapesTest, ShapeIsSymbolicallyDefined) {
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeUnknown()));
EXPECT_FALSE(ShapeIsSymbolicallyDefined(MakeShape({-1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({1, 2})));
EXPECT_TRUE(ShapeIsSymbolicallyDefined(MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesSymbolicallyEqual) {
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), MakeShape({-2, 2})));
}
TEST_F(SymbolicShapesTest, ShapesBroadcastable) {
EXPECT_FALSE(ShapesBroadcastable(MakeUnknown(), MakeUnknown()));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2}), MakeShape({1, -3})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-1, 2}), MakeShape({-1, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-3, 2})));
EXPECT_FALSE(ShapesBroadcastable(MakeShape({-2, 4}), MakeShape({-2, 8})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({1, 2}), MakeShape({1, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 2}), MakeShape({-2, 2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 32}), MakeShape({-2, 1})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -2})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-2, 1}), MakeShape({1, -3})));
EXPECT_TRUE(ShapesBroadcastable(MakeShape({-3}), MakeShape({-2, -3})));
TensorShapeProto output_shape;
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({1, 2}), MakeShape({1, 2}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({1, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 2}), MakeShape({-2, 2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 32}), MakeShape({-2, 1}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, 32}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -2}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -2}), output_shape));
EXPECT_TRUE(ShapeAfterBroadcast(MakeShape({-2, 1}), MakeShape({1, -3}),
&output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
EXPECT_TRUE(
ShapeAfterBroadcast(MakeShape({-3}), MakeShape({-2, -3}), &output_shape));
EXPECT_TRUE(ShapesSymbolicallyEqual(MakeShape({-2, -3}), output_shape));
}
TEST_F(SymbolicShapesTest, CompareSymbolicallyShapedTensorSizes) {
EXPECT_TRUE(MakeShape({1, 1, 32}) < MakeShape({32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({2048}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-2, 32, 32}));
EXPECT_TRUE(MakeShape({1, 32, 32}) < MakeShape({-1, 32, 32}));
EXPECT_TRUE(MakeShape({1, -2, 32}) < MakeShape({-2, -2, 32}));
EXPECT_FALSE(MakeShape({1, -2, 32}) < MakeShape({-3, 32, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({1, -1, 32}));
EXPECT_FALSE(MakeShape({1, -1, 32}) < MakeShape({-1, -1, 32}));
EXPECT_FALSE(MakeShape({-1, -1, 32}) < MakeShape({1, -1, 32}));
}
TEST_F(SymbolicShapesTest, RankAndNumCoeff) {
EXPECT_EQ(2, Rank(MakeShape({32, 32})));
EXPECT_EQ(32 * 32, NumCoefficients(MakeShape({32, 32})));
EXPECT_EQ(2, Rank(MakeShape({-2, 32})));
EXPECT_EQ(-1, NumCoefficients(MakeShape({-2, 32})));
TensorShapeProto shape;
shape.set_unknown_rank(true);
EXPECT_EQ(-1, Rank(shape));
EXPECT_EQ(-1, NumCoefficients(shape));
}
TEST_F(SymbolicShapesTest, SizeRatio) {
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({32, 32}), MakeShape({32, 2})));
EXPECT_EQ(16, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, 2})));
EXPECT_EQ(16,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, -2, 32}), MakeShape({-2, 2, 2})));
EXPECT_EQ(-1,
ComputeSizeRatio(MakeShape({-2, 2, 32}), MakeShape({-2, 2, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, -2}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-2, 32}), MakeShape({-2, -2})));
EXPECT_EQ(1, ComputeSizeRatio(MakeShape({-2, -3}), MakeShape({-3, -2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 2})));
EXPECT_EQ(-1, ComputeSizeRatio(MakeShape({-1, 32}), MakeShape({-2, 0})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/symbolic_shapes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/symbolic_shapes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84dc13a8-40c4-46d9-9c1a-bea4f801ff37 | cpp | tensorflow/tensorflow | grappler_test | tensorflow/c/experimental/grappler/grappler_test.cc | tensorflow/core/grappler/utils/grappler_test_test.cc | #include "tensorflow/c/experimental/grappler/grappler.h"
#include "absl/log/check.h"
#include "tensorflow/c/experimental/grappler/grappler_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
void optimize_func(void* optimizer, const TF_Buffer* graph_buf,
const TF_GrapplerItem* item, TF_Buffer* optimized_graph_buf,
TF_Status* tf_status) {}
void PopulateDefaultParam(TP_OptimizerRegistrationParams* params) {
params->struct_size = TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE;
params->optimizer_configs->struct_size = TP_OPTIMIZER_CONFIGS_STRUCT_SIZE;
params->optimizer->struct_size = TP_OPTIMIZER_STRUCT_SIZE;
params->optimizer->create_func = nullptr;
params->optimizer->optimize_func = optimize_func;
params->optimizer->destroy_func = nullptr;
}
TEST(Grappler, SuccessfulRegistration) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Success";
params->optimizer_configs->remapping = TF_TriState_Off;
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Success"})
.size(),
1);
ConfigList config = PluginGraphOptimizerRegistry::GetPluginConfigs(
true, std::set<string>{"Success"});
ASSERT_EQ(config.toggle_config["remapping"], RewriterConfig::OFF);
}
TEST(Grappler, MultiplePluginRegistration) {
auto plugin_init_0 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device0";
};
auto plugin_init_1 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device1";
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init_0));
TF_ASSERT_OK(InitGraphPlugin(plugin_init_1));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Device0", "Device1"})
.size(),
2);
}
TEST(Grappler, DeviceTypeNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.message(),
"'device_type' field in TP_OptimizerRegistrationParams must be set.");
}
TEST(Grappler, OptimizeFuncNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "FuncNotSet";
params->optimizer->optimize_func = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.message(),
"'optimize_func' field in TP_Optimizer must be set.");
}
TEST(TF_GrapplerItem, NodesToPreserve) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
std::unordered_set<string> nodes_preserved = item.NodesToPreserve();
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : nodes_preserved) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetNodesToPreserveListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(nodes_preserved.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[nodes_preserved.size()]);
std::unique_ptr<size_t[]> lens(new size_t[nodes_preserved.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetNodesToPreserveList(c_item, values.get(), lens.get(),
nodes_preserved.size(), storage.get(), storage_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < nodes_preserved.size(); ++i) {
EXPECT_EQ(nodes_preserved.find(string(static_cast<const char*>(values[i]),
lens[i])) != nodes_preserved.end(),
true);
}
TF_DeleteStatus(status);
}
TEST(TF_GrapplerItem, FetchNodes) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : item.fetch) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetFetchNodesListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(item.fetch.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[item.fetch.size()]);
std::unique_ptr<size_t[]> lens(new size_t[item.fetch.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetFetchNodesList(c_item, values.get(), lens.get(), item.fetch.size(),
storage.get(), storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < item.fetch.size(); ++i) {
EXPECT_EQ(item.fetch[i].size(), lens[i]) << i;
EXPECT_EQ(item.fetch[i],
string(static_cast<const char*>(values[i]), lens[i]))
<< i;
}
TF_DeleteStatus(status);
}
TEST(TF_GraphProperties, InputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetInputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> in_props_buf(num_values, TF_NewBuffer());
TF_GetInputPropertiesList(graph_properties, node.name().c_str(),
in_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties in_props;
Status s = tensorflow::BufferToMessage(in_props_buf[0], &in_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, in_props.dtype());
EXPECT_FALSE(in_props.shape().unknown_rank());
EXPECT_EQ(2, in_props.shape().dim_size());
EXPECT_EQ(10, in_props.shape().dim(0).size());
EXPECT_EQ(1, in_props.shape().dim(1).size());
for (int i = 0; i < in_props_buf.size(); i++)
TF_DeleteBuffer(in_props_buf[i]);
}
}
TF_DeleteGraphProperties(graph_properties);
TF_DeleteStatus(status);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_GraphProperties, OutputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetOutputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> out_props_buf(num_values, TF_NewBuffer());
TF_GetOutputPropertiesList(graph_properties, node.name().c_str(),
out_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties out_props;
Status s = tensorflow::BufferToMessage(out_props_buf[0], &out_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, out_props.dtype());
EXPECT_FALSE(out_props.shape().unknown_rank());
EXPECT_EQ(2, out_props.shape().dim_size());
EXPECT_EQ(10, out_props.shape().dim(0).size());
EXPECT_EQ(1, out_props.shape().dim(1).size());
for (int i = 0; i < out_props_buf.size(); i++)
TF_DeleteBuffer(out_props_buf[i]);
}
}
TF_DeleteStatus(status);
TF_DeleteGraphProperties(graph_properties);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_FunctionLibraryDefinition, LookUpOpDef) {
TF_Buffer* g_buf = TF_NewBuffer();
TF_Buffer* op_buf = TF_NewBuffer();
TF_Status* status = TF_NewStatus();
GraphDef g_def;
Status s = MessageToBuffer(g_def, g_buf);
TF_ASSERT_OK(s);
TF_FunctionLibraryDefinition* func =
TF_NewFunctionLibraryDefinition(g_buf, status);
TF_LookUpOpDef(func, "Add", op_buf, status);
string actual_string(reinterpret_cast<const char*>(op_buf->data),
op_buf->length);
ASSERT_EQ(TF_OK, TF_GetCode(status));
const OpDef* expected_op_def;
TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef("Add", &expected_op_def));
string expected_serialized;
expected_op_def->SerializeToString(&expected_serialized);
EXPECT_EQ(expected_serialized, actual_string);
TF_DeleteBuffer(g_buf);
TF_DeleteBuffer(op_buf);
TF_DeleteStatus(status);
TF_DeleteFunctionLibraryDefinition(func);
}
}
}
} | #include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerTestTest : public GrapplerTest {};
TEST_F(GrapplerTestTest, CompareIdenticalGraphs) {
tensorflow::Scope s1 = tensorflow::Scope::NewRootScope();
auto s1_a = ops::Variable(s1.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s1_b = ops::Variable(s1.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s1_add = ops::Add(s1.WithOpName("Add_1"), s1_a, s1_b);
tensorflow::Scope s2 = tensorflow::Scope::NewRootScope();
auto s2_a = ops::Variable(s2.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s2_b = ops::Variable(s2.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s2_add = ops::Add(s2.WithOpName("Add_1"), s2_a, s2_b);
GraphDef graph1;
TF_ASSERT_OK(s1.ToGraphDef(&graph1));
GraphDef graph2;
TF_ASSERT_OK(s2.ToGraphDef(&graph2));
CompareGraphs(graph1, graph2);
}
TEST_F(GrapplerTestTest, CheckNodesConnectivity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add_1 = ops::Add(s.WithOpName("Add_1"), a, b);
auto add_2 = ops::Add(s.WithOpName("Add_2"), add_1, b);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
NodeMap node_map(&graph);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "a", "Add_1", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_1", 1));
EXPECT_FALSE(IsNodesDirectlyConnected(node_map, "a", "Add_2", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_2", 1));
}
TEST_F(GrapplerTestTest, CountOpNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c);
auto mul_ab = ops::Mul(s.WithOpName("Mull_ab"), a, b);
auto mul_bc = ops::Mul(s.WithOpName("Mull_bc"), a, b);
InputList inputs{
Output(add_ab),
Output(add_bc),
Output(mul_ab),
Output(mul_bc),
};
auto add_all = ops::AddN(s.WithOpName("Add_all"), inputs);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
EXPECT_EQ(2, CountOpNodes(graph, "Add"));
EXPECT_EQ(2, CountOpNodes(graph, "Mul"));
EXPECT_EQ(1, CountOpNodes(graph, "AddN"));
EXPECT_EQ(0, CountOpNodes(graph, "Transpose"));
}
TEST_F(GrapplerTestTest, EvaluateNodes) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"mul"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors.size(), 1);
EXPECT_EQ(tensors[0].flat<float>()(0), 3.0f);
EXPECT_EQ(tensors[0].flat<float>()(1), 8.0f);
}
TEST_F(GrapplerTestTest, EvaluateNodesInvalidFetch) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"no_such_node"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_DEATH(EvaluateNodes(item.graph, item.fetch),
"Tensor no_such_node:0, specified in either "
"feed_devices or fetch_devices was not found in the Graph");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler_test.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/grappler_test_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad0c7e00-9ae2-4443-ae99-f6f4b58dc22e | cpp | tensorflow/tensorflow | scc | tensorflow/core/grappler/utils/scc.cc | tensorflow/core/grappler/utils/scc_test.cc | #include "tensorflow/core/grappler/utils/scc.h"
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
struct SCCNodeData {
SCCNodeData()
: node(nullptr),
index(-1),
lowlink(-1),
onstack(false),
caller(nullptr),
caller_loop_location(-1) {}
void ResetStack(int new_index, SCCNodeData* new_caller) {
index = new_index;
lowlink = new_index;
onstack = true;
caller = new_caller;
caller_loop_location = 0;
}
const NodeDef* node;
int index;
int lowlink;
bool onstack;
std::vector<SCCNodeData*> children;
SCCNodeData* caller;
int caller_loop_location;
};
void StrongConnect(SCCNodeData* v, std::stack<SCCNodeData*>* stack, int* index,
std::unordered_map<const NodeDef*, int>* components,
int* scc_index) {
v->ResetStack(*index , nullptr );
++*index;
stack->push(v);
v->caller = nullptr;
v->caller_loop_location = 0;
SCCNodeData* last = v;
while (true) {
if (last->caller_loop_location < last->children.size()) {
SCCNodeData* w = last->children[last->caller_loop_location];
++(last->caller_loop_location);
if (w->index == -1) {
w->ResetStack(*index , last );
++*index;
stack->push(w);
last = w;
} else if (w->onstack == true) {
last->lowlink = std::min(last->lowlink, w->index);
}
} else {
if (last->lowlink == last->index) {
SCCNodeData* top;
while (true) {
top = stack->top();
stack->pop();
top->onstack = false;
(*components)[top->node] = *scc_index;
if (top == last) {
break;
}
}
++*scc_index;
}
SCCNodeData* next_last = last->caller;
if (next_last == nullptr) {
break;
} else {
next_last->lowlink = std::min(next_last->lowlink, last->lowlink);
last = next_last;
}
}
}
}
void StronglyConnectedComponents(
const GraphDef& graph, std::unordered_map<const NodeDef*, int>* components,
int* num_components) {
std::stack<SCCNodeData*> stack;
std::unordered_map<string, SCCNodeData*> name_to_data;
std::vector<SCCNodeData> node_data_container;
node_data_container.reserve(graph.node_size());
std::unordered_map<const NodeDef*, SCCNodeData*> node_to_data;
for (const NodeDef& node : graph.node()) {
SCCNodeData node_data;
node_data.node = &node;
node_data_container.push_back(node_data);
name_to_data[node.name()] = &(*node_data_container.rbegin());
node_to_data[&node] = &(*node_data_container.rbegin());
}
for (const NodeDef& node : graph.node()) {
for (const string& input : node.input()) {
auto it = name_to_data.find(NodeName(input));
if (it != name_to_data.end()) {
it->second->children.push_back(node_to_data[&node]);
}
}
}
components->clear();
*num_components = 0;
int index = 0;
for (auto& v : node_data_container) {
if (v.index == -1) {
StrongConnect(&v, &stack, &index, components, num_components);
}
}
std::vector<int> counts_per_component(*num_components, 0);
for (auto& component : *components) {
DCHECK(component.second >= 0);
DCHECK(component.second < *num_components);
counts_per_component[component.second]++;
}
bool has_single_element_component = false;
for (auto& component : *components) {
if (counts_per_component[component.second] == 1) {
component.second = -1;
(*num_components)--;
has_single_element_component = true;
}
}
if (has_single_element_component) {
(*num_components) += 1;
}
}
int IdentifyLoops(const GraphDef& graph,
std::unordered_map<const NodeDef*, std::vector<int>>* loops) {
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
if (num_components <= 1) {
if (!components.empty() && components.begin()->second == -1) {
return 0;
}
}
std::unordered_map<int, std::vector<const NodeDef*>> component_ids;
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
component_ids[id].push_back(it.first);
}
int loop_id = 0;
for (const auto& component : component_ids) {
const std::vector<const NodeDef*>& component_nodes = component.second;
std::vector<std::pair<NodeDef*, string>> next_iter_nodes;
GraphDef subgraph;
std::unordered_map<const NodeDef*, const NodeDef*> subgraph_mapping;
for (const auto& component_node : component_nodes) {
NodeDef* node = subgraph.add_node();
*node = *component_node;
subgraph_mapping[node] = component_node;
if (IsNextIteration(*node)) {
CHECK_EQ(1, node->input_size());
next_iter_nodes.emplace_back(node, node->input(0));
}
}
if (next_iter_nodes.size() == 1) {
for (const auto& component_node : component_nodes) {
(*loops)[component_node].push_back(loop_id);
}
++loop_id;
} else {
for (int i = 0; i < next_iter_nodes.size(); ++i) {
for (int j = 0; j < next_iter_nodes.size(); ++j) {
next_iter_nodes[j].first->clear_input();
if (i == j) {
*next_iter_nodes[j].first->add_input() = next_iter_nodes[j].second;
}
}
int num_components = 0;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(subgraph, &components, &num_components);
CHECK_GE(num_components, 1);
for (const auto it : components) {
int id = it.second;
if (id < 0) {
continue;
}
(*loops)[subgraph_mapping[it.first]].push_back(loop_id);
}
++loop_id;
}
}
}
return loop_id;
}
}
} | #include "tensorflow/core/grappler/utils/scc.h"
#include <memory>
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class SCCTest : public ::testing::Test {
public:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties unknown_device;
devices["MY_DEVICE"] = unknown_device;
cluster_ = std::make_unique<VirtualCluster>(devices);
TF_CHECK_OK(cluster_->Provision());
}
void TearDown() override { cluster_.reset(); }
protected:
static NodeDef CreateNode(const string& name,
absl::Span<const string> inputs) {
NodeDef node;
node.set_name(name);
for (const string& input : inputs) {
node.add_input(input);
}
return node;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(SCCTest, NoLoops) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster_->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 1);
for (const auto& node : item.graph.node()) {
EXPECT_EQ(-1, components[&node]);
}
}
TEST_F(SCCTest, DisjointCycleAndPath) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"d"});
*graph.add_node() = CreateNode("b", {"a"});
*graph.add_node() = CreateNode("c", {"b"});
*graph.add_node() = CreateNode("d", {"c"});
*graph.add_node() = CreateNode("e", {});
*graph.add_node() = CreateNode("f", {"e"});
*graph.add_node() = CreateNode("g", {"f"});
*graph.add_node() = CreateNode("h", {"g"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& pair : {std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("a", "d")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& node : {"e", "f", "g", "h"})
EXPECT_EQ(-1, components[name_to_node[node]]);
}
}
TEST_F(SCCTest, WikipediaExample) {
GraphDef graph;
*graph.add_node() = CreateNode("a", {"c"});
*graph.add_node() = CreateNode("b", {"a", "d"});
*graph.add_node() = CreateNode("c", {"b", "d", "f"});
*graph.add_node() = CreateNode("d", {"e"});
*graph.add_node() = CreateNode("e", {"d"});
*graph.add_node() = CreateNode("f", {"e", "g"});
*graph.add_node() = CreateNode("g", {"f", "h"});
*graph.add_node() = CreateNode("h", {"h"});
std::vector<const NodeDef*> nodes;
std::unordered_map<string, const NodeDef*> name_to_node;
for (const auto& n : graph.node()) {
nodes.push_back(&n);
name_to_node[n.name()] = &n;
}
int num_components;
std::unordered_map<const NodeDef*, int> components;
StronglyConnectedComponents(graph, &components, &num_components);
EXPECT_EQ(num_components, 4);
for (const auto& pair :
{std::make_pair("a", "b"), std::make_pair("a", "c"),
std::make_pair("d", "e"), std::make_pair("f", "g")}) {
EXPECT_EQ(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
for (const auto& pair :
{std::make_pair("a", "d"), std::make_pair("a", "f"),
std::make_pair("a", "h"), std::make_pair("d", "f"),
std::make_pair("d", "h"), std::make_pair("f", "h")}) {
EXPECT_NE(components[name_to_node[pair.first]],
components[name_to_node[pair.second]]);
}
}
TEST_F(SCCTest, TensorFlowLoop) {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 0
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 10
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/Add"
op: "Add"
input: "while/Identity"
input: "while/Add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/Add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 11
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
std::unordered_map<const NodeDef*, int> components;
int num_components;
StronglyConnectedComponents(item.graph, &components, &num_components);
EXPECT_EQ(num_components, 2);
for (const auto& node : item.graph.node()) {
if (node.name() == "Const" || node.name() == "while/Enter" ||
node.name() == "while/Exit") {
EXPECT_EQ(-1, components[&node]);
} else {
EXPECT_LE(0, components[&node]);
}
}
}
TEST_F(SCCTest, NestedLoops) {
GrapplerItem item;
string filename = io::JoinPath(
testing::TensorFlowSrcRoot(),
"core/grappler/costs/graph_properties_testdata/nested_loop.pbtxt");
TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
for (const auto& node : item.graph.node()) {
std::cout << node.DebugString() << std::endl;
}
std::unordered_map<const NodeDef*, std::vector<int>> loops;
int num_loops = IdentifyLoops(item.graph, &loops);
EXPECT_EQ(4, num_loops);
for (const auto& node_info : loops) {
std::cout << node_info.first->name() << " [";
for (int i : node_info.second) {
std::cout << " " << i;
}
std::cout << "]" << std::endl;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/scc.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/scc_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe2c227f-65c5-4a70-b35a-da2c942eea18 | cpp | tensorflow/tensorflow | colocation | tensorflow/core/grappler/utils/colocation.cc | tensorflow/core/grappler/utils/colocation_test.cc | #include "tensorflow/core/grappler/utils/colocation.h"
#include <cstring>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
string GetColocationGroupRoot(std::unordered_map<string, string>* map,
const string& node_name) {
if (map->find(node_name) == map->end()) {
map->insert({node_name, node_name});
return node_name;
}
std::list<string> nodes_to_root;
string cur = node_name;
while ((*map)[cur] != cur) {
nodes_to_root.push_back(cur);
cur = (*map)[cur];
}
if (!nodes_to_root.empty()) {
nodes_to_root.pop_back();
for (const string& node : nodes_to_root) {
(*map)[node] = cur;
}
}
return cur;
}
void MergeColocationGroup(std::unordered_map<string, string>* map,
const string& left, const string& right) {
if (map->find(left) == map->end() || map->find(right) == map->end()) {
return;
}
if (left != right) {
map->at(right) = left;
}
}
}
void ReassignColocation(GraphDef* graph) {
constexpr char kClassAttr[] = "_class";
constexpr char kColocPrefix[] = "loc:@";
std::unordered_map<string, string> coloc_groups;
NodeMap node_map(graph);
for (const auto& node : graph->node()) {
auto iter = node.attr().find(kClassAttr);
if (iter != node.attr().end() && iter->second.has_list()) {
for (const auto& str : iter->second.list().s()) {
size_t pos = str.find(kColocPrefix);
if (pos == 0) {
string colocate_node = str.substr(pos + strlen(kColocPrefix));
MergeColocationGroup(
&coloc_groups, GetColocationGroupRoot(&coloc_groups, node.name()),
GetColocationGroupRoot(&coloc_groups, colocate_node));
}
}
}
}
for (const auto& pair : coloc_groups) {
if (pair.first != pair.second) {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
AttrValue new_value;
new_value.mutable_list()->add_s(
kColocPrefix + GetColocationGroupRoot(&coloc_groups, pair.first));
node->mutable_attr()->erase(kClassAttr);
node->mutable_attr()->insert({kClassAttr, new_value});
}
} else {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
node->mutable_attr()->erase(kClassAttr);
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/colocation.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class ColocationTest : public ::testing::Test {};
bool VerifyNodeHasColocation(const NodeDef& ndef, const string& coloc) {
if (ndef.attr().empty()) {
return false;
}
if (ndef.attr().find("_class") == ndef.attr().end()) {
return false;
}
return ndef.attr().at("_class").list().s(0) == coloc;
}
TEST(ColocationTest, ReassignColocation_SingleNode) {
NodeDef ndef;
const Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@B"}).Finalize(&ndef);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef({ndef});
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(1, gdef.node(0).attr_size());
ReassignColocation(&gdef);
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(0, gdef.node(0).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_SingleGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
GraphDef gdef =
test::function::GDef({ndef_a, ndef_b, ndef_c, ndef_d, ndef_e});
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
ReassignColocation(&gdef);
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_MultiGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("U", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_u);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("V", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_v);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef(
{ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v});
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@W"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(6), "loc:@W"));
ReassignColocation(&gdef);
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@V"));
EXPECT_EQ(0, gdef.node(6).attr_size());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/colocation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/colocation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99856370-42e8-403a-a6aa-3269bbcf2b79 | cpp | tensorflow/tensorflow | tpu | tensorflow/core/grappler/utils/tpu.cc | tensorflow/core/grappler/utils/tpu_test.cc | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def) {
for (const auto& node : def.node()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
if (!def.has_library()) return false;
for (const auto& function_def : def.library().function()) {
for (const auto& node : function_def.node_def()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
}
return false;
}
}
} | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class TpuTest : public ::testing::Test {};
TEST_F(TpuTest, NotTpuGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("Add");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("Mul");
EXPECT_FALSE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuMainGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuLibraryGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("BatchFunction");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/tpu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/tpu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7cf8e0a3-4cf1-437a-bcf9-712f019c16e5 | cpp | tensorflow/tensorflow | pattern_utils | tensorflow/core/grappler/utils/pattern_utils.cc | tensorflow/core/grappler/utils/pattern_utils_test.cc | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include <algorithm>
#include <memory>
#include "absl/container/flat_hash_set.h"
namespace tensorflow {
namespace grappler {
namespace utils {
const bool IsCommutativeOp(const string& op) {
std::vector<string> op_list = str_util::Split(op, '|');
static const auto* commutative_ops = new absl::flat_hash_set<string>(
{"Add", "AddV2", "Mul", "Maximum", "SquaredDifference"});
for (const string& op_ : op_list) {
if (commutative_ops->contains(op_)) return true;
}
return false;
}
bool IsSame(string op1, string op2) {
if (op1 == "*") return true;
std::vector<string> op1_list = str_util::Split(op1, '|');
for (const string& op_1 : op1_list) {
if (op_1 == op2) return true;
}
return false;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::DoesOpTypePatternMatch(
const OpTypePattern& pattern, MutableNodeView* node_view,
NodeViewMatch* match) {
if ((node_view->NumControllingFanins() > 0 &&
pattern.node_status != NodeStatus::kRemain) ||
(node_view->NumControlledFanouts() > 0 &&
pattern.node_status == NodeStatus::kRemove))
return false;
bool op_type_matched = false;
if (pattern.op == "*") {
op_type_matched = true;
} else {
std::vector<string> op_list = str_util::Split(pattern.op, '|');
for (const string& op : op_list) {
if (node_view->node()->op() == op) {
op_type_matched = true;
break;
}
}
}
if (op_type_matched) {
if (node_label_to_index_.find(pattern.label) ==
node_label_to_index_.end()) {
node_label_to_index_[pattern.label] = node_view->node_index();
matched_node_indices_.insert(node_view->node_index());
if (pattern.node_status == NodeStatus::kRemove) {
remove_node_indices_.insert(node_view->node_index());
}
} else if (node_label_to_index_[pattern.label] != node_view->node_index()) {
return false;
} else {
DCHECK(node_label_to_index_[pattern.label] == node_view->node_index());
}
} else {
return false;
}
match->node_view = node_view;
if (!pattern.children.empty()) {
auto graph_children = node_view->GetRegularFanins();
int num_children = graph_children.size();
if (num_children != pattern.children.size()) {
return false;
} else {
std::vector<int> pattern_child_indices(num_children);
std::iota(pattern_child_indices.begin(), pattern_child_indices.end(), 0);
string op_name = pattern.op;
if (IsCommutativeOp(op_name) && num_children == 2) {
MutableNodeView* graph_child0_node_view =
graph_view_->GetNode(graph_children[0].node_index());
MutableNodeView* graph_child1_node_view =
graph_view_->GetNode(graph_children[1].node_index());
if ((!IsSame(pattern.children[0].op, graph_child0_node_view->GetOp()) &&
IsSame(pattern.children[1].op, graph_child0_node_view->GetOp())) ||
(!IsSame(pattern.children[1].op, graph_child1_node_view->GetOp()) &&
IsSame(pattern.children[0].op, graph_child1_node_view->GetOp())))
std::swap(pattern_child_indices[0], pattern_child_indices[1]);
}
for (int i = 0; i < num_children; ++i) {
auto child_node_index = graph_children[i].node_index();
MutableNodeView* child_node_view =
graph_view_->GetNode(child_node_index);
const OpTypePattern& child_pattern =
pattern.children[pattern_child_indices[i]];
match->children.push_back(NodeViewMatch());
NodeViewMatch* child_match = &(match->children.back());
if (!DoesOpTypePatternMatch(child_pattern, child_node_view,
child_match)) {
return false;
}
}
}
}
return true;
}
template <>
bool SubGraphMatcher<MatchingDirection::kFollowInputs>::GetMatchedNodes(
const OpTypePattern& pattern,
const std::unordered_set<string>& nodes_to_preserve,
MutableNodeView* node_view, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
bool found_match = false;
match_ = std::make_unique<NodeViewMatch>();
if (DoesOpTypePatternMatch(pattern, node_view, match_.get())) {
if (IsSafeNodesToRemove(nodes_to_preserve)) {
found_match = true;
*matched_nodes_map = this->node_label_to_index_;
*remove_node_indices = this->remove_node_indices_;
}
} else {
found_match = false;
}
match_->Clear();
match_.reset(nullptr);
matched_node_indices_.clear();
node_label_to_index_.clear();
remove_node_indices_.clear();
return found_match;
}
}
}
} | #include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace {
using ::tensorflow::ops::Placeholder;
void GetMatMulBiasAddGeluGraph(GraphDef* graph,
bool add_external_dependent = false) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32});
auto weight_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto weight = Placeholder(s.WithOpName("weight"), DT_FLOAT, weight_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), input, weight);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
if (add_external_dependent) {
auto external_dependent =
ops::Identity(s.WithOpName("external_dependent"), bias_add);
}
auto one_over_square_root_two =
ops::Const(s.WithOpName("one_over_square_root_two"), {0.707f}, {});
auto bias_add_times_const = ops::Mul(s.WithOpName("bias_add_times_const"),
bias_add, one_over_square_root_two);
auto erf = ops::Erf(s.WithOpName("erf"), bias_add_times_const);
auto one = ops::Const(s.WithOpName("one"), {1.0f}, {});
auto erf_plus_one = ops::AddV2(s.WithOpName("erf_plus_one"), erf, one);
auto one_half = ops::Const(s.WithOpName("one_half"), {0.5f}, {});
auto one_half_times_erf_plus_one = ops::Mul(
s.WithOpName("one_half_times_erf_plus_one"), one_half, erf_plus_one);
auto gelu =
ops::Mul(s.WithOpName("gelu"), one_half_times_erf_plus_one, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), gelu);
TF_ASSERT_OK(s.ToGraphDef(graph));
}
OpTypePattern GetMatMulBiasAddGeluPattern() {
OpTypePattern pattern_syntax{"Mul", "my_gelu", NodeStatus::kReplace,
{
{"Mul", "my_one_half_times_erf_plus_one", NodeStatus::kRemove,
{
{"Const", "my_one_half", NodeStatus::kRemain},
{"AddV2", "my_erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "my_erf", NodeStatus::kRemove,
{
{"Mul", "my_bias_add_times_const", NodeStatus::kRemove,
{
{"BiasAdd", "my_bias_add", NodeStatus::kRemove},
{"Const", "my_one_over_square_root_two", NodeStatus::kRemain}
}
}
}
},
{"Const", "my_one", NodeStatus::kRemain}
}
}
}
},
{"BiasAdd", "my_bias_add", NodeStatus::kRemove,
{
{"MatMul", "my_matmul", NodeStatus::kRemove},
{"*", "my_bias", NodeStatus::kRemain}
}
}
}
};
return pattern_syntax;
}
class PatternMatcherTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(PatternMatcherTest, Tree) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove},
{"D", "my_d", NodeStatus::kRemove}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, DAG) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::unordered_set<string> nodes_to_preserve = {"foo"};
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
nodes_to_preserve.insert({"c", "d"});
matched_nodes_map.clear();
remove_node_indices.clear();
found_match =
graph_matcher.GetMatchedNodes(pattern, nodes_to_preserve, root_node_view,
&matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, DAGExternalDependent) {
::tensorflow::Status status;
GraphDef graph = CreateGraph({{"f", "F", {"d"}},
{"e", "E", {"c", "d"}},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{"E", "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGelu) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluExternalDependent) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph, true);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_FALSE(found_match);
EXPECT_TRUE(matched_nodes_map.empty());
EXPECT_TRUE(remove_node_indices.empty());
}
TEST_F(PatternMatcherTest, MatMulBiasAddGeluMutation) {
::tensorflow::Status status;
GraphDef graph;
GetMatMulBiasAddGeluGraph(&graph);
OpTypePattern pattern = GetMatMulBiasAddGeluPattern();
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("gelu");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map, &remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
int num_nodes_before = graph_view.NumNodes();
std::vector<string> remove_node_names;
for (auto const& node_idx : remove_node_indices) {
remove_node_names.push_back(graph_view.GetNode(node_idx)->GetName());
}
Mutation* mutation = graph_view.GetMutationBuilder();
NodeDef fused_node;
fused_node.set_name("gelu");
fused_node.set_op("_FusedMatMul");
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(0));
fused_node.add_input(graph_view.GetNode("matmul")->node()->input(1));
fused_node.add_input(graph_view.GetNode("bias_add")->node()->input(1));
mutation->AddNode(std::move(fused_node), &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(mutation->Apply());
for (auto const& node_idx : remove_node_indices) {
mutation->RemoveNode(graph_view.GetNode(node_idx));
}
TF_EXPECT_OK(mutation->Apply());
int num_nodes_after = graph_view.NumNodes();
EXPECT_EQ(num_nodes_before - remove_node_indices.size(), num_nodes_after);
bool remove_nodes_deleted = true;
for (auto const& node_name : remove_node_names) {
if (graph_view.GetNode(node_name) != nullptr) {
remove_nodes_deleted = false;
break;
}
}
EXPECT_TRUE(remove_nodes_deleted);
bool replace_node_exist = graph_view.HasNode("gelu") ? true : false;
EXPECT_TRUE(replace_node_exist);
}
TEST_F(PatternMatcherTest, CommutativeInputs) {
::tensorflow::Status status;
std::vector<string> commutative_ops = {"Mul", "Add", "AddV2"};
for (string op : commutative_ops) {
for (bool should_swap : {false, true}) {
std::vector<string> commutative_operands =
(should_swap ? std::vector<string>{"d", "c"}
: std::vector<string>{"c", "d"});
GraphDef graph = CreateGraph({{"e", op, commutative_operands},
{"c", "C", {"b"}},
{"d", "D", {"b"}},
{"b", "B", {"a"}},
{"a", "A", {}}});
OpTypePattern pattern{op, "my_e", NodeStatus::kReplace,
{
{"C", "my_c", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
},
{"D", "my_d", NodeStatus::kRemove,
{
{"B", "my_b", NodeStatus::kRemove}
}
}
}
};
MutableGraphView graph_view(&graph, &status);
TF_ASSERT_OK(status);
TF_EXPECT_OK(graph_view.SortTopologically(false, {}));
auto root_node_view = graph_view.GetNode("e");
SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&graph_view);
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool found_match = graph_matcher.GetMatchedNodes(
pattern, {}, root_node_view, &matched_nodes_map,
&remove_node_indices);
EXPECT_TRUE(found_match);
EXPECT_FALSE(matched_nodes_map.empty());
EXPECT_FALSE(remove_node_indices.empty());
bool all_indices_matched = true;
for (auto it = matched_nodes_map.begin(); it != matched_nodes_map.begin();
it++) {
auto label = absl::StripPrefix(it->first, "my_");
int matched_node_idx = it->second;
int expected_node_idx = graph_view.GetNode(label)->node_index();
if (matched_node_idx != expected_node_idx) {
all_indices_matched = false;
break;
}
}
EXPECT_TRUE(all_indices_matched);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/pattern_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/pattern_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cc7cc66-874f-47a7-a4f0-c1960ee89b92 | cpp | tensorflow/tensorflow | transitive_fanin | tensorflow/core/grappler/utils/transitive_fanin.cc | tensorflow/core/grappler/utils/transitive_fanin_test.cc | #include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <queue>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace grappler {
Status ComputeTransitiveFanin(
const GraphDef& graph, const std::vector<string>& terminal_nodes,
std::unordered_map<string, const NodeDef*>* name_to_fanin_node,
std::vector<const NodeDef*>* fanin_nodes) {
std::unordered_map<string, const NodeDef*> name_to_node;
std::unordered_map<string, const NodeDef*> name_to_send;
for (const auto& node : graph.node()) {
name_to_node[node.name()] = &node;
if (node.op() == "_Send") {
const auto& attr = node.attr();
name_to_send[attr.at("tensor_name").s()] = &node;
}
}
std::vector<const NodeDef*> queue;
for (const string& root : terminal_nodes) {
const NodeDef* node = name_to_node[NodeName(root)];
if (!node) {
return errors::InvalidArgument("Graph does not contain terminal node ",
root, ".");
}
queue.push_back(node);
}
std::unordered_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.back();
queue.pop_back();
if (!visited.insert(node).second) {
continue;
}
fanin_nodes->push_back(node);
if (name_to_fanin_node) {
name_to_fanin_node->insert(
std::pair<string, const NodeDef*>(node->name(), node));
}
for (const string& input : node->input()) {
const NodeDef* in = name_to_node[NodeName(input)];
if (!in) {
return errors::InvalidArgument("Graph does not contain input ",
NodeName(input), " of node ",
node->name(), ".");
}
queue.push_back(in);
}
if (node->op() == "_Recv") {
const auto& attr = node->attr();
const NodeDef* send = name_to_send[attr.at("tensor_name").s()];
if (send) {
queue.push_back(send);
}
}
}
return absl::OkStatus();
}
Status ComputeTransitiveFanin(const GraphDef& graph,
const std::vector<string>& terminal_nodes,
std::vector<const NodeDef*>* fanin_nodes) {
return ComputeTransitiveFanin(graph, terminal_nodes, nullptr, fanin_nodes);
}
Status SetTransitiveFaninGraph(const GraphDef& input_graph,
GraphDef* output_graph,
const std::vector<string>& terminal_nodes) {
std::vector<const NodeDef*> keep;
TF_RETURN_IF_ERROR(
ComputeTransitiveFanin(input_graph, terminal_nodes, &keep));
output_graph->mutable_node()->Reserve(keep.size());
for (int i = keep.size() - 1; i >= 0; --i) {
*output_graph->add_node() = *keep[i];
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class TransitiveFaninTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TransitiveFaninTest, NoPruning) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromSingleTerminalNode) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_FALSE(node_map.NodeExists("5"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromMultipleTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"2"}},
{"6", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_TRUE(node_map.NodeExists("5"));
ASSERT_FALSE(node_map.NodeExists("6"));
}
TEST_F(TransitiveFaninTest, InvalidGraphOrTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"6"}},
{"7", {"8"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
auto s = SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain input 6 of node 5.");
const std::vector<string> invalid_terminal_nodes = {"0", "1", "5"};
s = SetTransitiveFaninGraph(graph, &output_graph, invalid_terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain terminal node 0.");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/transitive_fanin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/transitive_fanin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a4a1614-9a0c-4bf7-975a-a434a63a9822 | cpp | tensorflow/tensorflow | frame | tensorflow/core/grappler/utils/frame.cc | tensorflow/core/grappler/utils/frame_test.cc | #include "tensorflow/core/grappler/utils/frame.h"
#include <deque>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
namespace {}
template <typename GraphViewT>
inline Status FrameView::InferFromGraphViewT(const GraphViewT& graph_view) {
if (is_inferred_) {
return errors::Internal("FrameView was already inferred from the graph");
}
is_inferred_ = true;
std::deque<int> ready_node_indices;
for (const auto& node : graph_view.GetNodes()) {
if (node.NumRegularFanins() + node.NumControllingFanins() == 0) {
ready_node_indices.push_back(node.node_index());
node_to_frames_[node.node()] = node_has_no_frames_;
}
}
const auto* graph = graph_view.graph();
absl::flat_hash_map<string, int> frame_name_to_id;
auto process_fanout = [this, graph](
absl::flat_hash_map<string, int>* frame_name_to_id,
std::deque<int>* ready_node_indices,
const NodeDef* ready_node, int fanout_node_index) {
const NodeDef* fanout_node = &graph->node(fanout_node_index);
if (!node_to_frames_.contains(fanout_node)) {
std::vector<int> frame_ids = node_to_frames_[ready_node];
if (IsExit(*ready_node)) {
frame_ids.pop_back();
}
if (IsEnter(*fanout_node)) {
const AttrValue* frame_name_attr =
AttrSlice(*fanout_node).Find("frame_name");
if (!frame_name_attr) {
return errors::InvalidArgument(
"Missing frame name for the Enter node: ",
SummarizeNodeDef(*fanout_node));
}
const string& frame_name = frame_name_attr->s();
int frame_id;
if (frame_name_to_id->contains(frame_name)) {
frame_id = (*frame_name_to_id)[frame_name];
} else {
frame_id = static_cast<int>(frame_name_to_id->size());
(*frame_name_to_id)[frame_name] = frame_id;
}
frame_ids.push_back(frame_id);
}
ready_node_indices->push_back(fanout_node_index);
node_to_frames_[fanout_node] = std::move(frame_ids);
} else {
std::vector<int> frame_ids_fanout = node_to_frames_[fanout_node];
std::vector<int> frame_ids_node = node_to_frames_[ready_node];
if (IsEnter(*fanout_node)) {
frame_ids_fanout.pop_back();
}
if (IsExit(*ready_node)) {
frame_ids_node.pop_back();
}
if (frame_ids_node != frame_ids_fanout) {
return errors::InvalidArgument(
"Invalid graph: Frame ids for node ", ready_node->name(),
" does not match frame ids for it's fanout ", fanout_node->name());
}
}
return absl::OkStatus();
};
while (!ready_node_indices.empty()) {
const int ready_node_index = ready_node_indices.front();
ready_node_indices.pop_front();
const auto* ready_node_view = graph_view.GetNode(ready_node_index);
const NodeDef* ready_node_def = ready_node_view->node();
for (const auto& regular_fanouts_port_i :
ready_node_view->GetRegularFanouts()) {
for (const auto& regular_fanout : regular_fanouts_port_i) {
TF_RETURN_IF_ERROR(process_fanout(&frame_name_to_id,
&ready_node_indices, ready_node_def,
regular_fanout.node_index()));
}
}
for (const auto& controlled_fanout :
ready_node_view->GetControlledFanouts()) {
TF_RETURN_IF_ERROR(process_fanout(&frame_name_to_id, &ready_node_indices,
ready_node_def,
controlled_fanout.node_index()));
}
}
num_frames_ = static_cast<int>(frame_name_to_id.size());
return absl::OkStatus();
}
Status FrameView::InferFromGraphView(const utils::GraphView& graph_view) {
return InferFromGraphViewT(graph_view);
}
Status FrameView::InferFromGraphView(
const utils::MutableGraphView& graph_view) {
return InferFromGraphViewT(graph_view);
}
Status FrameView::InferFromGraph(const GraphDef& graph) {
Status status;
utils::GraphView graph_view(&graph, &status);
TF_RETURN_IF_ERROR(status);
return InferFromGraphViewT(graph_view);
}
const std::vector<int>& FrameView::Frames(const NodeDef& node) const {
DCHECK(is_inferred_) << "FrameView is not initialized";
auto frames = node_to_frames_.find(&node);
if (frames == node_to_frames_.end()) {
LOG(WARNING) << "Node '" << node.name()
<< "' doesn't belong to the graph used for initialization";
return node_has_no_frames_;
} else {
return frames->second;
}
}
bool FrameView::IsInFrame(const NodeDef& node) const {
return !Frames(node).empty();
}
}
} | #include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using GraphTypes =
::testing::Types<GraphDef, utils::GraphView, utils::MutableGraphView>;
template <typename T>
class FrameViewTest : public ::testing::Test {
protected:
NodeDef CreateNode(const string& name, const std::vector<string>& inputs) {
return CreateNode(name, "", "", inputs);
}
NodeDef CreateNode(const string& name, const string& op,
const std::vector<string>& inputs) {
return CreateNode(name, op, "", inputs);
}
NodeDef CreateNode(const string& name, const string& op, const string& frame,
const std::vector<string>& inputs) {
NodeDef node;
node.set_name(name);
if (!op.empty()) {
node.set_op(op);
}
if (!frame.empty()) {
AttrValue frame_name;
frame_name.set_s(frame);
node.mutable_attr()->insert({"frame_name", frame_name});
}
for (const string& input : inputs) {
node.add_input(input);
}
return node;
}
};
TYPED_TEST_SUITE(FrameViewTest, GraphTypes);
template <typename T>
void InferFromGraph(FrameView* frame_view, GraphDef* graph, bool valid) {
Status status;
T graph_view(graph, &status);
TF_ASSERT_OK(status);
status = frame_view->InferFromGraphView(graph_view);
if (valid) {
TF_ASSERT_OK(status);
} else {
ASSERT_FALSE(status.ok());
}
}
template <>
void InferFromGraph<GraphDef>(FrameView* frame_view, GraphDef* graph,
bool valid) {
Status status = frame_view->InferFromGraph(*graph);
if (valid) {
TF_ASSERT_OK(status);
} else {
ASSERT_FALSE(status.ok());
}
}
TYPED_TEST(FrameViewTest, NestedLoop) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context1", {"0"});
*graph.add_node() = this->CreateNode("2", {"1"});
*graph.add_node() = this->CreateNode("3", "Merge", {"2", "14"});
*graph.add_node() = this->CreateNode("4", {"3"});
*graph.add_node() = this->CreateNode("5", "Switch", {"4"});
*graph.add_node() = this->CreateNode("6", {"5"});
*graph.add_node() = this->CreateNode("7", "Enter", "while/context2", {"6"});
*graph.add_node() = this->CreateNode("8", {"7"});
*graph.add_node() = this->CreateNode("9", "Merge", {"8", "12"});
*graph.add_node() = this->CreateNode("10", {"9"});
*graph.add_node() = this->CreateNode("11", "Switch", {"10"});
*graph.add_node() = this->CreateNode("12", "NextIteration", {"11"});
*graph.add_node() = this->CreateNode("13", "Exit", {"11"});
*graph.add_node() = this->CreateNode("14", "NextIteration", {"13"});
*graph.add_node() = this->CreateNode("15", {"5"});
*graph.add_node() = this->CreateNode("16", "Exit", {"15"});
*graph.add_node() = this->CreateNode("17", {"16"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {0}},
{"4", {0}}, {"5", {0}}, {"6", {0}}, {"7", {0, 1}},
{"8", {0, 1}}, {"9", {0, 1}}, {"10", {0, 1}}, {"11", {0, 1}},
{"12", {0, 1}}, {"13", {0, 1}}, {"14", {0}}, {"15", {0}},
{"16", {0}}, {"17", {}}};
EXPECT_EQ(frame_view.num_frames(), 2);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, MultipleInputsToEnter) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", {});
*graph.add_node() =
this->CreateNode("2", "Enter", "while/context", {"0", "1"});
*graph.add_node() = this->CreateNode("3", "Exit", {"2"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {}}, {"2", {0}}, {"3", {0}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, ExitOutput) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context", {"0"});
*graph.add_node() = this->CreateNode("2", "Exit", {"1"});
*graph.add_node() = this->CreateNode("3", {});
*graph.add_node() = this->CreateNode("4", {"2", "3"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {}}, {"4", {}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, MultipleEnterNodes) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context", {"0"});
*graph.add_node() = this->CreateNode("2", {"1"});
*graph.add_node() = this->CreateNode("5", {});
*graph.add_node() = this->CreateNode("4", "Enter", "while/context", {"5"});
*graph.add_node() = this->CreateNode("3", {"4", "2"});
*graph.add_node() = this->CreateNode("6", "Merge", {"3", "8"});
*graph.add_node() = this->CreateNode("7", "Switch", {"6"});
*graph.add_node() = this->CreateNode("8", "NextIteration", {"7"});
*graph.add_node() = this->CreateNode("9", "Exit", {"7"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, true);
std::unordered_map<string, std::vector<int>> expected = {
{"0", {}}, {"1", {0}}, {"2", {0}}, {"3", {0}}, {"4", {0}},
{"5", {}}, {"6", {0}}, {"7", {0}}, {"8", {0}}, {"9", {0}}};
EXPECT_EQ(frame_view.num_frames(), 1);
for (const NodeDef& node : graph.node()) {
std::vector<int> expected_frames = expected[node.name()];
std::vector<int> node_frames = frame_view.Frames(node);
EXPECT_EQ(expected_frames, node_frames);
}
}
TYPED_TEST(FrameViewTest, ConflictingFrames) {
GraphDef graph;
*graph.add_node() = this->CreateNode("0", {});
*graph.add_node() = this->CreateNode("1", "Enter", "while/context1", {"0"});
*graph.add_node() = this->CreateNode("2", "Enter", "while/context2", {"1"});
*graph.add_node() = this->CreateNode("3", {"1", "2"});
FrameView frame_view;
InferFromGraph<TypeParam>(&frame_view, &graph, false);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/frame.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/frame_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0a7e1071-a884-4f38-a6e5-24b3584f062c | cpp | tensorflow/tensorflow | traversal | tensorflow/core/grappler/utils/traversal.cc | tensorflow/core/grappler/utils/traversal_test.cc | #include "tensorflow/core/grappler/utils/traversal.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
namespace tensorflow {
namespace grappler {
namespace {
struct DfsStackElem {
DfsStackElem(int node, bool children_visited, int src)
: node(node), children_visited(children_visited), src(src) {}
explicit DfsStackElem(int node) : DfsStackElem(node, false, -1) {}
int node;
bool children_visited;
int src;
};
enum class NodeState { kNotVisited, kVisiting, kDone };
}
void DfsTraversal(const GraphTopologyView& graph_view,
const absl::Span<const NodeDef* const> from,
const TraversalDirection direction,
const DfsPredicates& predicates,
const DfsCallbacks& callbacks) {
std::vector<DfsStackElem> stack;
stack.reserve(from.size());
for (const NodeDef* node : from) {
const absl::optional<int> node_idx = graph_view.GetNodeIndex(*node);
DCHECK(node_idx.has_value()) << "Illegal start node: " << node->name();
if (node_idx.has_value()) {
stack.emplace_back(node_idx.value());
}
}
absl::flat_hash_map<int, NodeState> node_state;
while (!stack.empty()) {
DfsStackElem w = stack.back();
stack.pop_back();
NodeState& state = node_state[w.node];
if (state == NodeState::kDone) continue;
if (predicates.enter && !predicates.enter(graph_view.GetNode(w.node))) {
state = NodeState::kDone;
continue;
}
if (w.children_visited) {
state = NodeState::kDone;
if (callbacks.post_order) {
callbacks.post_order(graph_view.GetNode(w.node));
}
continue;
}
if (state == NodeState::kVisiting) {
if (callbacks.on_back_edge) {
callbacks.on_back_edge(graph_view.GetNode(w.src),
graph_view.GetNode(w.node));
}
continue;
}
state = NodeState::kVisiting;
if (callbacks.pre_order) {
callbacks.pre_order(graph_view.GetNode(w.node));
}
stack.emplace_back(w.node, true, w.src);
if (predicates.advance && !predicates.advance(graph_view.GetNode(w.node))) {
continue;
}
if (direction == TraversalDirection::kFollowInputs) {
for (const int fanin : graph_view.GetFanin(w.node)) {
stack.emplace_back(fanin, false, w.node);
}
} else {
for (const int fanout : graph_view.GetFanout(w.node)) {
stack.emplace_back(fanout, false, w.node);
}
}
}
}
void DfsTraversal(const GraphTopologyView& graph_view,
const absl::Span<const NodeDef* const> from,
TraversalDirection direction, const DfsCallbacks& callbacks) {
DfsTraversal(graph_view, from, direction, {}, callbacks);
}
}
} | #include "tensorflow/core/grappler/utils/traversal.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::NDef;
DfsCallbacks MkCallbacks(std::vector<string>* pre_order,
std::vector<string>* post_order,
std::vector<string>* back_edges) {
return {[pre_order](const NodeDef* n) { pre_order->push_back(n->name()); },
[post_order](const NodeDef* n) { post_order->push_back(n->name()); },
[back_edges](const NodeDef* src, const NodeDef* dst) {
back_edges->push_back(absl::StrCat(src->name(), "->", dst->name()));
}};
}
TEST(TraversalTest, OutputsDfsNoLoop) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", op, {"5"}, {}),
NDef("0", op, {"5", "4"}, {}),
NDef("1", op, {"4", "3"}, {}),
NDef("3", op, {"2"}, {}),
NDef("5", op, {}, {}),
NDef("4", op, {}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(4), &graph.node(5)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"4", "1", "0", "5", "2", "3"};
const std::vector<string> expected_post = {"1", "0", "4", "3", "2", "5"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, InputsDfsNoLoop) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", op, {"5"}, {}),
NDef("0", op, {"5", "4"}, {}),
NDef("1", op, {"4", "3"}, {}),
NDef("3", op, {"2"}, {}),
NDef("5", op, {}, {}),
NDef("4", op, {}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(1), &graph.node(2)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowInputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "3", "2", "5", "0"};
const std::vector<string> expected_post = {"4", "5", "2", "3", "1", "0"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, InputsDfsWithLoop) {
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", "Merge", {"1", "5"}, {}),
NDef("3", "Switch", {"2"}, {}),
NDef("4", "Identity", {"3"}, {}),
NDef("5", "NextIteration", {"4"}, {}),
NDef("1", "Enter", {}, {}),
NDef("6", "Exit", {"3"}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(5)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowInputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"6", "3", "2", "1", "5", "4"};
const std::vector<string> expected_post = {"1", "4", "5", "2", "3", "6"};
const std::vector<string> expected_edges = {"4->3"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_EQ(back_edges, expected_edges);
}
TEST(TraversalTest, OutputDfsWithLoop) {
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("2", "Merge", {"1", "5"}, {}),
NDef("3", "Switch", {"2"}, {}),
NDef("4", "Identity", {"3"}, {}),
NDef("5", "NextIteration", {"4"}, {}),
NDef("1", "Enter", {}, {}),
NDef("6", "Exit", {"3"}, {})},
{});
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"2", "3", "6", "4", "5"};
const std::vector<string> expected_post = {"6", "5", "4", "3", "2"};
const std::vector<string> expected_edges = {"5->2"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_EQ(back_edges, expected_edges);
}
TEST(TraversalTest, DfsWithEnterPredicate) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("1", op, {}, {}),
NDef("2", op, {"1"}, {}),
NDef("3", op, {"2"}, {}),
NDef("4", op, {"1"}, {}),
NDef("5", op, {"4"}, {}),
NDef("6", op, {"3", "5"}, {})},
{});
const auto enter = [](const NodeDef* node) {
return node->name() != "2" && node->name() != "3";
};
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
DfsPredicates::Enter(enter),
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "5", "6"};
const std::vector<string> expected_post = {"6", "5", "4", "1"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
TEST(TraversalTest, DfsWithAdvancePredicate) {
const string op = "OpIsNotImportantInThisTest";
GraphDef graph = ::tensorflow::test::function::GDef(
{NDef("1", op, {}, {}),
NDef("2", op, {"1"}, {}),
NDef("3", op, {"2"}, {}),
NDef("4", op, {"1"}, {}),
NDef("5", op, {"4"}, {}),
NDef("6", op, {"3", "5"}, {})},
{} );
const auto advance = [](const NodeDef* node) {
return node->name() != "2" && node->name() != "3";
};
std::vector<const NodeDef*> start_nodes = {&graph.node(0)};
std::vector<string> pre_order;
std::vector<string> post_order;
std::vector<string> back_edges;
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
DfsTraversal(graph_view, start_nodes, TraversalDirection::kFollowOutputs,
DfsPredicates::Advance(advance),
MkCallbacks(&pre_order, &post_order, &back_edges));
const std::vector<string> expected_pre = {"1", "4", "5", "6", "2"};
const std::vector<string> expected_post = {"6", "5", "4", "2", "1"};
EXPECT_EQ(pre_order, expected_pre);
EXPECT_EQ(post_order, expected_post);
EXPECT_TRUE(back_edges.empty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/traversal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/traversal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
334818dd-5c64-4cee-a523-41f3195eb4c3 | cpp | tensorflow/tensorflow | functions | tensorflow/core/grappler/utils/functions.cc | tensorflow/core/grappler/utils/functions_test.cc | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/strings/scanner.h"
namespace tensorflow {
namespace grappler {
GrapplerFunctionItem::GrapplerFunctionItem(
string func_name, string description, AttrSlice func_attr,
std::vector<const FunctionDef::ArgAttrs*> arg_attr,
std::vector<InputArgInstantiation> input_args,
std::vector<OutputArgInstantiation> output_args,
std::vector<ControlOutput> control_outputs, const int graph_def_version,
const bool is_stateful, GraphDef&& function_body)
: description_(std::move(description)),
func_attr_(func_attr),
arg_attr_(std::move(arg_attr)),
input_args_(std::move(input_args)),
output_args_(std::move(output_args)),
control_outputs_(std::move(control_outputs)),
is_stateful_(is_stateful) {
id = std::move(func_name);
graph = std::move(function_body);
graph.mutable_versions()->set_producer(graph_def_version);
for (const InputArgInstantiation& input_arg : input_args_) {
feed.push_back({input_arg.node_name, Tensor()});
}
for (const OutputArgInstantiation& output_arg : output_args_) {
fetch.push_back(output_arg.node_name);
}
for (const ControlOutput& control_output : control_outputs_) {
keep_ops.push_back(control_output.node_name);
}
optimization_options().allow_pruning_stateful_and_dataset_ops = false;
}
const string& GrapplerFunctionItem::description() const { return description_; }
const std::vector<InputArgInstantiation>& GrapplerFunctionItem::inputs() const {
return input_args_;
}
const InputArgInstantiation& GrapplerFunctionItem::input(int i) const {
return input_args_[i];
}
const std::size_t GrapplerFunctionItem::input_size() const {
return input_args_.size();
}
const std::vector<OutputArgInstantiation>& GrapplerFunctionItem::outputs()
const {
return output_args_;
}
const OutputArgInstantiation& GrapplerFunctionItem::output(int i) const {
return output_args_[i];
}
const std::size_t GrapplerFunctionItem::output_size() const {
return output_args_.size();
}
const std::vector<ControlOutput>& GrapplerFunctionItem::control_outputs()
const {
return control_outputs_;
}
const std::size_t GrapplerFunctionItem::control_output_size() const {
return control_outputs_.size();
}
const AttrSlice& GrapplerFunctionItem::func_attr() const { return func_attr_; }
const std::vector<const FunctionDef::ArgAttrs*>&
GrapplerFunctionItem::arg_attr() const {
return arg_attr_;
}
const GraphDef& GrapplerFunctionItem::function_body() const { return graph; }
GraphDef& GrapplerFunctionItem::mutable_function_body() { return graph; }
bool GrapplerFunctionItem::is_stateful() const { return is_stateful_; }
GrapplerFunctionItem& GrapplerFunctionItem::SwapFunctionBody(GraphDef&& other) {
graph = std::move(other);
return *this;
}
bool HasParametrizedType(const FunctionDef& func) {
const auto is_type_parametrized = [](const OpDef::ArgDef& arg) {
return !arg.type_attr().empty() || !arg.number_attr().empty() ||
!arg.type_list_attr().empty();
};
const auto& input = func.signature().input_arg();
const auto& output = func.signature().output_arg();
return std::any_of(input.begin(), input.end(), is_type_parametrized) ||
std::any_of(output.begin(), output.end(), is_type_parametrized);
}
bool HasParametrizedBody(const FunctionDef& func) {
const auto is_parametrized = [&](const NodeDef& node) {
for (const auto& attr : node.attr()) {
if (!attr.second.placeholder().empty()) return true;
}
return false;
};
return std::any_of(func.node_def().begin(), func.node_def().end(),
is_parametrized);
}
bool IsParametrized(const FunctionDef& func) {
return HasParametrizedType(func) || HasParametrizedBody(func);
}
Status InstantiationTypeParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, DataType>* type_parameters) {
if (!type_parameters->empty()) {
return absl::InvalidArgumentError(
"Type parameters output map must be empty");
}
const auto resolve_type_attr = [&](const OpDef::ArgDef& arg) -> Status {
if (!arg.type_attr().empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_attr(), &dtype));
type_parameters->emplace(arg.type_attr(), dtype);
} else if (!arg.type_list_attr().empty()) {
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(
GetNodeAttr(func_instantiation_attr, arg.type_list_attr(), &dtypes));
int index = 0;
for (const DataType& dtype : dtypes) {
type_parameters->emplace(absl::StrCat(arg.type_list_attr(), ":", index),
dtype);
++index;
}
}
return absl::OkStatus();
};
for (const auto& input : func.signature().input_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(input));
for (const auto& output : func.signature().output_arg())
TF_RETURN_IF_ERROR(resolve_type_attr(output));
return absl::OkStatus();
}
Status InstantiationBodyParameters(
const FunctionDef& func, const AttrSlice& func_instantiation_attr,
absl::flat_hash_map<string, AttrValue>* body_parameters) {
if (!body_parameters->empty()) {
return absl::InvalidArgumentError(
"Body parameters output map must be empty");
}
for (const NodeDef& func_body_node : func.node_def()) {
for (auto& attr : func_body_node.attr()) {
const string& placeholder = attr.second.placeholder();
if (placeholder.empty() || body_parameters->contains(placeholder)) {
continue;
}
const AttrValue* placeholder_value =
func_instantiation_attr.Find(placeholder);
if (placeholder_value) {
body_parameters->insert({placeholder, *placeholder_value});
} else {
return absl::InvalidArgumentError(
absl::StrCat("Can't resolve placeholder: ", placeholder));
}
}
}
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
const OpDef& signature = func.signature();
if (signature.name().empty()) {
return absl::InvalidArgumentError("Function name must be specified");
}
for (const OpDef::AttrDef& attr : signature.attr()) {
if (attr.type() != "type") {
return absl::InvalidArgumentError(
"Function signature must have only type attributes");
}
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(func, func_instantiation_attr, &flib, &fbody));
GraphDef function_body;
fbody->graph->ToGraphDef(&function_body);
*function_body.mutable_library() = flib.ReachableDefinitions(func).ToProto();
VLOG(3) << absl::Substitute(
"Deleted $0 unreachable functions from the Grappler function item "
"instantiation of $1 (library size = $2)",
flib.num_functions() - function_body.library().function_size(),
signature.name(), function_body.library().function_size());
const int num_instantiated_inputs = fbody->arg_types.size();
const int num_instantiated_outputs = fbody->ret_types.size();
std::vector<InputArgInstantiation> inputs;
inputs.reserve(num_instantiated_inputs);
for (int in_id = 0; in_id < num_instantiated_inputs; ++in_id) {
const Node* node = fbody->arg_nodes[in_id];
const DataType& dtype = fbody->arg_types[in_id];
inputs.emplace_back(node->name(), dtype);
}
std::vector<OutputArgInstantiation> outputs;
outputs.reserve(num_instantiated_outputs);
for (int out_id = 0; out_id < num_instantiated_outputs; ++out_id) {
const Node* node = fbody->ret_nodes[out_id];
const DataType& dtype = fbody->ret_types[out_id];
outputs.emplace_back(node->name(), dtype);
}
std::vector<ControlOutput> control_outputs;
control_outputs.reserve(func.control_ret_size());
for (const auto& control_ret : func.control_ret()) {
control_outputs.push_back({control_ret.first, control_ret.second});
}
std::sort(control_outputs.begin(), control_outputs.end());
std::vector<const FunctionDef::ArgAttrs*> arg_attr(inputs.size(), nullptr);
for (const auto& attr : func.arg_attr()) {
if (attr.first >= inputs.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid attribute index, got ", attr.first,
" but expected less than ", inputs.size()));
}
arg_attr.at(attr.first) = &attr.second;
}
*item = GrapplerFunctionItem(
signature.name(),
signature.description(),
AttrSlice(&func.attr()), std::move(arg_attr),
std::move(inputs), std::move(outputs), std::move(control_outputs),
graph_def_version, signature.is_stateful(), std::move(function_body));
return absl::OkStatus();
}
Status MakeGrapplerFunctionItem(const FunctionDef& func,
const FunctionLibraryDefinition& flib,
const int graph_def_version,
GrapplerFunctionItem* item) {
return MakeGrapplerFunctionItem(func, AttrSlice(), flib, graph_def_version,
item);
}
Status ReplaceInputWithConst(const NodeDef& input_const, int input_index,
GrapplerFunctionItem* item) {
if (!IsConstant(input_const)) {
return absl::InvalidArgumentError(absl::StrCat(
"Input node is not a constant: ", SummarizeNodeDef(input_const)));
}
const int item_input_size = item->input_size();
if (input_index < 0 || input_index >= item_input_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function input index is out of bound: index=", input_index,
" input_size=", item->input_size()));
}
const InputArgInstantiation& input_arg = item->input(input_index);
for (NodeDef& node : *item->graph.mutable_node()) {
if (node.name() == input_arg.node_name) {
node = input_const;
node.set_name(input_arg.node_name);
node.clear_input();
node.clear_device();
}
if (IsArg(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
if (index >= input_index) {
(*node.mutable_attr())["index"].set_i(index - 1);
}
}
}
item->input_args_.erase(item->input_args_.begin() + input_index);
item->arg_attr_.erase(item->arg_attr_.begin() + input_index);
return absl::OkStatus();
}
Status RemoveFunctionOutputs(const absl::flat_hash_set<int>& remove_outputs,
GrapplerFunctionItem* item,
std::vector<std::pair<int, int>>* output_mapping) {
DCHECK(output_mapping->empty());
for (int remove_output : remove_outputs) {
const int item_output_size = item->output_size();
if (remove_output < 0 || remove_output >= item_output_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Function output index is out of bound: index=", remove_output,
" output_size=", item->output_size()));
}
}
absl::flat_hash_set<const OutputArgInstantiation*> remove_output_args;
const auto is_remove_output_arg = [&](const OutputArgInstantiation& output) {
return remove_output_args.find(&output) != remove_output_args.end();
};
for (int i = 0, end = item->output_size(); i < end; ++i) {
const OutputArgInstantiation& output = item->output(i);
if (remove_outputs.contains(i)) {
VLOG(3) << "Remove functions output: name=" << output.node_name
<< "(index = " << i << ")";
remove_output_args.insert(&output);
} else if (!remove_output_args.empty()) {
output_mapping->push_back({i, i - remove_output_args.size()});
}
}
for (NodeDef& node : *item->graph.mutable_node()) {
if (IsRetval(node)) {
auto attrs = AttrSlice(node);
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "index", &index));
for (const auto& mapping : *output_mapping) {
const int from = mapping.first;
const int to = mapping.second;
if (index == from) {
(*node.mutable_attr())["index"].set_i(to);
}
}
}
}
auto& o = item->output_args_;
o.erase(std::remove_if(o.begin(), o.end(), is_remove_output_arg), o.end());
return absl::OkStatus();
}
namespace {
class MakeFunctionDefHelper {
public:
MakeFunctionDefHelper() = default;
Status Initialize(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib);
Status AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const;
Status AsFunctionDefNode(NodeDef* function_body_node) const;
bool IsInputNode(const NodeDef& node) const {
return input_nodes_.contains(node.name());
}
bool IsOutputNode(const NodeDef& node) const {
return output_nodes_.contains(node.name());
}
private:
absl::flat_hash_set<absl::string_view> input_nodes_;
absl::flat_hash_set<absl::string_view> output_nodes_;
absl::flat_hash_map<string, tensorflow::NameRangeMap> function_body_outputs_;
};
Status MakeFunctionDefHelper::Initialize(
const GrapplerFunctionItem& item, const FunctionLibraryDefinition& flib) {
for (const InputArgInstantiation& input_arg : item.inputs()) {
input_nodes_.insert(input_arg.node_name);
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
output_nodes_.insert(output_arg.node_name);
}
for (const NodeDef& node : item.function_body().node()) {
const OpRegistrationData* registration;
TF_RETURN_IF_ERROR(flib.LookUp(node.op(), ®istration));
tensorflow::NameRangeMap outputs_range_map;
TF_RETURN_IF_ERROR(tensorflow::NameRangesForNode(
node, registration->op_def, nullptr, &outputs_range_map));
function_body_outputs_.emplace(node.name(), std::move(outputs_range_map));
}
return absl::OkStatus();
}
Status MakeFunctionDefHelper::AsFunctionDefInput(const string& graph_def_input,
string* func_def_input) const {
if (IsControlInput(graph_def_input)) {
*func_def_input = graph_def_input;
return absl::OkStatus();
}
const SafeTensorId tensor = ParseTensorName(graph_def_input);
DCHECK_GE(tensor.index(), 0);
const auto is_input = input_nodes_.find(tensor.node());
if (is_input != input_nodes_.end()) {
DCHECK_EQ(tensor.index(), 0);
*func_def_input = tensor.node();
return absl::OkStatus();
}
const auto is_body_output = function_body_outputs_.find(tensor.node());
if (is_body_output != function_body_outputs_.end()) {
const tensorflow::NameRangeMap& outputs_range_map = is_body_output->second;
for (const auto& el : outputs_range_map) {
const auto& output_name = el.first;
const auto& output_range = el.second;
if (tensor.index() >= output_range.first &&
tensor.index() < output_range.second) {
*func_def_input = absl::StrCat(tensor.node(), ":", output_name, ":",
tensor.index() - output_range.first);
return absl::OkStatus();
}
}
}
return absl::InvalidArgumentError(
absl::StrCat("Unknown graph def input: ", graph_def_input));
}
Status MakeFunctionDefHelper::AsFunctionDefNode(
NodeDef* function_body_node) const {
string func_def_input;
for (int i = 0; i < function_body_node->input_size(); ++i) {
TF_RETURN_IF_ERROR(
AsFunctionDefInput(function_body_node->input(i), &func_def_input));
function_body_node->set_input(i, func_def_input);
}
return absl::OkStatus();
}
}
Status MakeFunctionDef(const GrapplerFunctionItem& item,
const FunctionLibraryDefinition& flib,
FunctionDef* func) {
func->mutable_signature()->set_name(item.id);
func->mutable_signature()->set_description(item.description());
func->mutable_signature()->set_is_stateful(item.is_stateful());
MakeFunctionDefHelper helper;
TF_RETURN_IF_ERROR(helper.Initialize(item, flib));
absl::flat_hash_map<absl::string_view, string> output_tensors;
for (const NodeDef& func_body_node : item.function_body().node()) {
if (!helper.IsOutputNode(func_body_node)) continue;
if (func_body_node.input_size() != 1) {
return absl::InternalError(
absl::StrCat("_Retval node must have single input: ",
SummarizeNodeDef(func_body_node)));
}
output_tensors.emplace(func_body_node.name(), func_body_node.input(0));
}
for (const InputArgInstantiation& input_arg : item.inputs()) {
OpDef::ArgDef arg_def;
arg_def.set_name(input_arg.node_name);
arg_def.set_type(input_arg.data_type);
arg_def.set_is_ref(IsRefType(input_arg.data_type));
*func->mutable_signature()->add_input_arg() = arg_def;
}
for (const OutputArgInstantiation& output_arg : item.outputs()) {
const string output_name =
absl::StrReplaceAll(output_arg.node_name, {{"_RetVal", ""}});
OpDef::ArgDef arg_def;
arg_def.set_name(output_name);
arg_def.set_type(output_arg.data_type);
arg_def.set_is_ref(IsRefType(output_arg.data_type));
*func->mutable_signature()->add_output_arg() = arg_def;
auto it = output_tensors.find(output_arg.node_name);
if (it == output_tensors.end()) {
return absl::InternalError(
absl::StrCat("Can't find an output tensor for the output node: ",
output_arg.node_name));
}
TF_RETURN_IF_ERROR(helper.AsFunctionDefInput(
it->second, &(*func->mutable_ret())[output_name]));
}
for (const ControlOutput& control_out : item.control_outputs()) {
func->mutable_control_ret()->insert(
{control_out.output_name, control_out.node_name});
*func->mutable_signature()->add_control_output() = control_out.output_name;
}
for (const auto& attr : item.func_attr()) {
const auto& attr_name = attr.first;
const auto& attr_value = attr.second;
(*func->mutable_attr())[attr_name] = attr_value;
}
for (int i = 0, end = item.arg_attr().size(); i < end; ++i) {
const auto* attr = item.arg_attr().at(i);
if (attr != nullptr) {
(*func->mutable_arg_attr())[i] = *attr;
}
}
for (const NodeDef& func_node : item.function_body().node()) {
if (IsArg(func_node) || IsRetval(func_node) ||
helper.IsInputNode(func_node) || helper.IsOutputNode(func_node))
continue;
NodeDef* func_def_node = func->add_node_def();
*func_def_node = func_node;
TF_RETURN_IF_ERROR(helper.AsFunctionDefNode(func_def_node));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/functions.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class FunctionsTest : public ::testing::Test {};
TEST_F(FunctionsTest, IsParametrized) {
FunctionDef parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef non_parametrized_func = FunctionDefHelper::Create(
"MyMul", {"x:float", "y:float"}, {"z:float"}, {},
{{{"output"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "output:z:0"}});
EXPECT_TRUE(HasParametrizedType(parametrized_func));
EXPECT_TRUE(HasParametrizedBody(parametrized_func));
EXPECT_TRUE(IsParametrized(parametrized_func));
EXPECT_FALSE(HasParametrizedType(non_parametrized_func));
EXPECT_FALSE(HasParametrizedBody(non_parametrized_func));
EXPECT_FALSE(IsParametrized(non_parametrized_func));
}
TEST_F(FunctionsTest, InstantiationParameters) {
FunctionDef func = FunctionDefHelper::Create(
"ParametrizedFunc",
{"input1:A", "input2:B", "input3:float", "input4: C"},
{"output1: A", "output2:D"},
{
"A: {float, double}",
"B: {float, int32}",
"C: list(type)",
"D: {float, double}",
},
{{{"output"}, "FakeOp", {"input1", "input2"}, {{"key", "$key"}}}},
{{"x", "cx:output:0"}, {"y", "cy:output:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["key"].set_s("key-value");
func_instantiation_attr["A"].set_type(DT_FLOAT);
func_instantiation_attr["B"].set_type(DT_INT32);
func_instantiation_attr["C"].mutable_list()->add_type(DT_FLOAT);
func_instantiation_attr["C"].mutable_list()->add_type(DT_INT32);
func_instantiation_attr["D"].set_type(DT_DOUBLE);
absl::flat_hash_map<string, DataType> type_parameters;
TF_EXPECT_OK(InstantiationTypeParameters(
func, AttrSlice(&func_instantiation_attr), &type_parameters));
ASSERT_EQ(5, type_parameters.size());
EXPECT_EQ(DT_FLOAT, type_parameters["A"]);
EXPECT_EQ(DT_INT32, type_parameters["B"]);
EXPECT_EQ(DT_FLOAT, type_parameters["C:0"]);
EXPECT_EQ(DT_INT32, type_parameters["C:1"]);
EXPECT_EQ(DT_DOUBLE, type_parameters["D"]);
absl::flat_hash_map<string, AttrValue> body_parameters;
TF_EXPECT_OK(InstantiationBodyParameters(
func, AttrSlice(&func_instantiation_attr), &body_parameters));
ASSERT_EQ(1, body_parameters.size());
EXPECT_EQ("key-value", body_parameters["key"].s());
}
TEST_F(FunctionsTest, FromSimpleFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("XTimesTwo", item.id);
EXPECT_EQ(5, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("y_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "two" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "scale" && ++count) {
EXPECT_EQ("Cast", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("two", node.input(0));
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale", node.input(1));
} else if (node.name() == "y_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(0, node.attr().at("index").i());
}
}
EXPECT_EQ(5, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithMultiOutputNodes) {
std::vector<FunctionDefHelper::Node> nodes = {
{{"sx"}, "Shape", {"x"}},
{{"sy"}, "Shape", {"y"}},
{{"gx"}, "Identity", {"dz"}},
{{"gy"}, "Neg", {"dz"}},
{{"rx", "ry"}, "BroadcastGradientArgs", {"sx", "sy"}},
{{"sum_gx"}, "Sum", {"gx", "rx"}},
{{"dx"}, "Reshape", {"sum_gx", "sx"}},
{{"sum_gy"}, "Sum", {"gy", "ry"}},
{{"dy"}, "Reshape", {"sum_gy", "sy"}},
};
for (auto &n : nodes) {
if (n.attr.empty() && n.op != "BroadcastGradientArgs") {
n.attr = {{"T", "$T"}};
}
}
FunctionDef func = FunctionDefHelper::Define(
"SubGrad",
{"x: T", "y: T", "dz: T"},
{"dx: T", "dy: T"},
{{"T: {half, float, double}"}},
nodes);
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SubGrad", item.id);
EXPECT_EQ(14, item.function_body().node_size());
ASSERT_EQ(3, item.input_size());
EXPECT_EQ("x", item.input(0).node_name);
EXPECT_EQ("y", item.input(1).node_name);
EXPECT_EQ("dz", item.input(2).node_name);
ASSERT_EQ(2, item.output_size());
EXPECT_EQ("dx_RetVal", item.output(0).node_name);
EXPECT_EQ("dy_RetVal", item.output(1).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y" || node.name() == "dz") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : node.name() == "y" ? 1 : 2;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "rx" && ++count) {
EXPECT_EQ("BroadcastGradientArgs", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("sx", node.input(0));
EXPECT_EQ("sy", node.input(1));
} else if (node.name() == "sum_gx" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gx", node.input(0));
EXPECT_EQ("rx", node.input(1));
} else if (node.name() == "sum_gy" && ++count) {
EXPECT_EQ("Sum", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("gy", node.input(0));
EXPECT_EQ("rx:1", node.input(1));
} else if (node.name() == "dx_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dx", node.input(0));
} else if (node.name() == "dy_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(1, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("dy", node.input(0));
}
}
EXPECT_EQ(8, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithNestedFuncs) {
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
TF_ASSERT_OK(flib.AddFunctionDef(FunctionDefHelper::Define(
"Swap",
{"i0: T", "i1: T"},
{"o0: T", "o1: T"},
{"T: {float, double}"},
{{{"o0"}, "Identity", {"i1"}, {{"T", "$T"}}},
{{"o1"}, "Identity", {"i0"}, {{"T", "$T"}}}})));
FunctionDef func = FunctionDefHelper::Create(
"ManySwapsFirst",
{"x: float", "y: float"},
{"o: float"},
{},
{{{"a0"}, "Swap", {"x", "y"}, {{"T", DT_FLOAT}}, {"x2"}},
{{"a1"}, "Swap", {"a0:o0:0", "a0:o1:0"}, {{"T", DT_FLOAT}}},
{{"x2"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}},
{{"y2"}, "Mul", {"y", "y"}, {{"T", DT_FLOAT}}, {"a1"}},
{{"o"}, "Add", {"x2:z:0", "y2:z:0"}, {{"T", DT_FLOAT}}}},
{{"o", "o:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "x" || node.name() == "y") {
count++;
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
int expected_index = node.name() == "x" ? 0 : 1;
EXPECT_EQ(expected_index, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "a0" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^x2", node.input(2));
} else if (node.name() == "a1" && ++count) {
EXPECT_EQ("Swap", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("a0", node.input(0));
EXPECT_EQ("a0:1", node.input(1));
} else if (node.name() == "x2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("x", node.input(1));
} else if (node.name() == "y2" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^a1", node.input(2));
} else if (node.name() == "o" && ++count) {
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x2", node.input(0));
EXPECT_EQ("y2", node.input(1));
}
}
EXPECT_EQ(7, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithOutputMappings) {
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("out_RetVal", item.output(0).node_name);
int count = 0;
for (const NodeDef &node : item.function_body().node()) {
if (node.name() == "in" && ++count) {
EXPECT_EQ("_Arg", node.op());
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
EXPECT_EQ(0, node.attr().at("index").i());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Linear_func" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("in", node.input(0));
} else if (node.name() == "Exp" && ++count) {
EXPECT_EQ("Exp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("Linear_func", node.input(0));
} else if (node.name() == "out_RetVal" && ++count) {
EXPECT_EQ("_Retval", node.op());
EXPECT_EQ(0, node.attr().at("index").i());
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("Exp", node.input(0));
}
}
EXPECT_EQ(4, count);
}
TEST_F(FunctionsTest, FromFunctionDefWithoutInput) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ(1, item.output_size());
EXPECT_EQ("o_RetVal", item.output(0).node_name);
EXPECT_EQ(3, item.function_body().node_size());
const NodeDef &two = item.function_body().node(0);
EXPECT_EQ("two", two.name());
EXPECT_EQ(0, two.input_size());
const NodeDef &cast = item.function_body().node(1);
EXPECT_EQ("o", cast.name());
EXPECT_EQ(1, cast.input_size());
EXPECT_EQ("two", cast.input(0));
const NodeDef &retval = item.function_body().node(2);
EXPECT_EQ("o_RetVal", retval.name());
EXPECT_EQ(1, retval.input_size());
EXPECT_EQ("o", retval.input(0));
}
TEST_F(FunctionsTest, FromFunctionDefWithSideEffectfulOps) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Define(
"SideEffects",
{"x: Ref(float)"},
{},
{},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one"}, {{"T", DT_FLOAT}}}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("SideEffects", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
const auto &opts = item.optimization_options();
EXPECT_FALSE(opts.allow_pruning_stateful_and_dataset_ops);
}
TEST_F(FunctionsTest, FromFunctionDefWithControlOutputs) {
const Tensor kOne = test::AsScalar<float>(1.0);
FunctionDef func = FunctionDefHelper::Create(
"WithControlOutputs", {"x: Ref(float)"}, {}, {},
{
{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"update"}, "AssignAdd", {"x", "one:output:0"}, {{"T", DT_FLOAT}}},
},
{}, {{"side_effects", "update"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ("WithControlOutputs", item.id);
EXPECT_EQ(3, item.function_body().node_size());
EXPECT_EQ(1, item.input_size());
EXPECT_EQ(0, item.output_size());
ASSERT_EQ(1, item.keep_ops.size());
EXPECT_EQ("update", item.keep_ops[0]);
ASSERT_EQ(1, item.control_output_size());
const ControlOutput &ctrl = item.control_outputs()[0];
EXPECT_EQ("side_effects", ctrl.output_name);
EXPECT_EQ("update", ctrl.node_name);
}
TEST_F(FunctionsTest, MakeFunctionDef) {
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"XTimesTwo",
{"x: T"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"scale"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", "$T"}}},
});
const uint32 arg_index = 0;
const std::pair<string, string> arg_attr_key_and_value = {"_arg_attr", "abc"};
FunctionDef::ArgAttrs arg_attr;
(*arg_attr.mutable_attr())[arg_attr_key_and_value.first].set_s(
arg_attr_key_and_value.second);
(*func.mutable_arg_attr())[arg_index] = arg_attr;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ("x", specialized.signature().input_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().input_arg(0).type());
EXPECT_EQ("y", specialized.signature().output_arg(0).name());
EXPECT_EQ(DT_FLOAT, specialized.signature().output_arg(0).type());
EXPECT_EQ(specialized.arg_attr().size(), 1);
EXPECT_EQ(specialized.arg_attr().at(arg_index).attr().size(), 1);
EXPECT_EQ(specialized.arg_attr()
.at(arg_index)
.attr()
.at(arg_attr_key_and_value.first)
.s(),
arg_attr_key_and_value.second);
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "scale" && ++count) {
EXPECT_EQ(DT_FLOAT, node.attr().at("DstT").type());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("scale:y:0", node.input(1));
EXPECT_EQ(DT_FLOAT, node.attr().at("T").type());
}
}
EXPECT_EQ(2, count);
}
TEST_F(FunctionsTest, ReplaceInputWithConst) {
FunctionDef func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
EXPECT_EQ(2, item.input_size());
EXPECT_EQ(1, item.output_size());
ASSERT_EQ(4, item.function_body().node_size());
const NodeDef &input_x = item.function_body().node(0);
const NodeDef &input_y = item.function_body().node(1);
EXPECT_EQ("_Arg", input_x.op());
EXPECT_EQ("_Arg", input_y.op());
NodeDef const_input_x;
const_input_x.set_op("Const");
AddNodeAttr("Tag", "const_input_x", &const_input_x);
NodeDef const_input_y;
const_input_y.set_op("Const");
AddNodeAttr("Tag", "const_input_y", &const_input_y);
TF_EXPECT_OK(ReplaceInputWithConst(const_input_x, 0, &item));
EXPECT_EQ(1, item.input_size());
EXPECT_EQ("Const", input_x.op());
EXPECT_EQ("const_input_x", input_x.attr().at("Tag").s());
TF_EXPECT_OK(ReplaceInputWithConst(const_input_y, 0, &item));
EXPECT_EQ(0, item.input_size());
EXPECT_EQ("Const", input_y.op());
EXPECT_EQ("const_input_y", input_y.attr().at("Tag").s());
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
EXPECT_EQ(0, specialized.signature().input_arg_size());
EXPECT_EQ(1, specialized.signature().output_arg_size());
EXPECT_EQ(3, specialized.node_def_size());
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "x" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_x", node.attr().at("Tag").s());
} else if (node.name() == "y" && ++count) {
EXPECT_EQ("Const", node.op());
EXPECT_EQ("const_input_y", node.attr().at("Tag").s());
} else if (node.name() == "output" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ("x:output:0", node.input(0));
EXPECT_EQ("y:output:0", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(FunctionsTest, SwapFunctionBodyAndMakeFunctionDef) {
using ::tensorflow::test::function::NDef;
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GraphDef id_func_body = test::function::GDef(
{
NDef("read_x", "Identity", {"x"}, {{"T", "float"}}),
NDef("z_RetVal", "_Retval", {"read_x"}, {{"T", "float"}})});
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_FLOAT);
FunctionDefLibrary lib_def;
*lib_def.add_function() = func;
*lib_def.add_function() = mul_func;
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
GrapplerFunctionItem item;
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
item.SwapFunctionBody(std::move(id_func_body));
FunctionDef specialized;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &specialized));
int count = 0;
for (const NodeDef &node : specialized.node_def()) {
if (node.name() == "read_x" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ("x", node.input(0));
}
}
EXPECT_EQ(1, count);
EXPECT_EQ("read_x:output:0", (*specialized.mutable_ret())["z"]);
}
TEST_F(FunctionsTest, FunctionDefGrapplerFunctionItemRoundTrip) {
FunctionDef func = FunctionDefHelper::Create(
"DoNothing", {"i: int32"}, {"o: int32"},
{},
{
{{"id"}, "Identity", {"i"}, {{"T", DT_INT32}}},
},
{{"o", "id:output:0"}},
{{"must_execute", "id"}});
constexpr char description[] = "This is a helpful description.";
func.mutable_signature()->set_description(description);
FunctionLibraryDefinition flib(OpRegistry::Global(), FunctionDefLibrary());
GrapplerFunctionItem item;
protobuf::Map<string, AttrValue> func_instantiation_attr;
func_instantiation_attr["T"].set_type(DT_INT32);
TF_EXPECT_OK(MakeGrapplerFunctionItem(func,
AttrSlice(&func_instantiation_attr),
flib, TF_GRAPH_DEF_VERSION, &item));
FunctionDef func2;
TF_EXPECT_OK(MakeFunctionDef(item, flib, &func2));
EXPECT_TRUE(FunctionDefsEqual(func, func2));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/functions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/functions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c8c31e5-79a6-45c5-8432-abce7d1be83d | cpp | tensorflow/tensorflow | topological_sort | tensorflow/compiler/mlir/tensorflow/utils/topological_sort.cc | tensorflow/core/grappler/utils/topological_sort_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/topological_sort.h"
#include <algorithm>
#include <queue>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
namespace mlir {
namespace TF {
ExtraDependenciesFunction no_extra_dependencies = nullptr;
std::vector<Operation*> SortBlockTopologically(
Block& block, PriorityFunction priorityFunction,
ExtraDependenciesFunction extraDependencies) {
llvm::DenseMap<Operation*, int> remaining_incoming_data_edges;
llvm::DenseMap<Operation*, int> remaining_incoming_ctrl_edges;
llvm::DenseMap<Operation*, int> position;
llvm::DenseMap<Operation*, Operation*> ancestor;
SmallVector<Operation*> ready;
llvm::SmallVector<mlir::Operation*, 4> empty_op_set;
auto ctrlPredecessors =
[&](Operation* op) -> llvm::SmallVector<mlir::Operation*, 4> const& {
if (extraDependencies) {
return extraDependencies(op, true);
} else {
return empty_op_set;
}
};
auto ctrlSuccessors =
[&](Operation* op) -> llvm::SmallVector<mlir::Operation*, 4> const& {
if (extraDependencies) {
return extraDependencies(op, false);
} else {
return empty_op_set;
}
};
int i = 0;
for (Operation& op : block.getOperations()) {
int incoming_ctrl_edges = 0;
int incoming_data_edges = 0;
op.walk([&](Operation* child) {
ancestor[child] = &op;
for (Operation* predecessor : ctrlPredecessors(child)) {
if (predecessor->getBlock() == &block) {
incoming_ctrl_edges++;
}
}
for (Value v : child->getOperands()) {
if (v.getParentBlock() == &block) {
incoming_data_edges++;
}
}
});
remaining_incoming_data_edges[&op] = incoming_data_edges;
remaining_incoming_ctrl_edges[&op] = incoming_ctrl_edges;
if (incoming_data_edges == 0 && incoming_ctrl_edges == 0) {
ready.push_back(&op);
}
position[&op] = i++;
}
std::queue<Value> todo;
for (Value value : block.getArguments()) {
todo.push(value);
}
std::vector<Operation*> result;
Operation* previous_op = nullptr;
while (!todo.empty() || !ready.empty()) {
while (!todo.empty()) {
Value value = todo.front();
todo.pop();
for (OpOperand& operand : value.getUses()) {
Operation* user = ancestor[operand.getOwner()];
remaining_incoming_data_edges[user]--;
if (remaining_incoming_data_edges[user] == 0 &&
remaining_incoming_ctrl_edges[user] == 0) {
ready.push_back(user);
}
}
}
auto better = [&](Operation* a, Operation* b) {
if (a->hasTrait<OpTrait::IsTerminator>() !=
b->hasTrait<OpTrait::IsTerminator>()) {
return b->hasTrait<OpTrait::IsTerminator>();
}
int a_priority = priorityFunction(previous_op, a);
int b_priority = priorityFunction(previous_op, b);
if (a_priority != b_priority) {
return a_priority > b_priority;
} else {
return position[a] < position[b];
}
};
Operation* best = nullptr;
for (Operation* op : ready) {
if (best == nullptr || better(op, best)) {
best = op;
}
}
if (!best) {
assert(ready.empty());
return result;
}
ready.erase(std::find(ready.begin(), ready.end(), best));
previous_op = best;
for (Value result : best->getResults()) {
todo.push(result);
}
for (Operation* successor : ctrlSuccessors(best)) {
if (ancestor.find(successor) != ancestor.end()) {
successor = ancestor[successor];
remaining_incoming_ctrl_edges[successor]--;
if (remaining_incoming_ctrl_edges[successor] == 0 &&
remaining_incoming_data_edges[successor] == 0) {
ready.push_back(successor);
}
}
}
result.push_back(best);
}
return result;
}
}
} | #include "tensorflow/core/grappler/utils/topological_sort.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
class TopologicalSortTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TopologicalSortTest, NoLoop) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"5", "4", "2", "0", "3", "1"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < topo_order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithLoop) {
GraphDef graph = CreateGraph({
{"2", "Merge", {"1", "5"}},
{"3", "Switch", {"2"}},
{"4", "Identity", {"3"}},
{"5", "NextIteration", {"4"}},
{"1", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"1", "2", "3", "4", "5"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithIllegalLoop) {
GraphDef graph = CreateGraph({
{"2", {"1", "3"}},
{"3", {"2"}},
{"1", {}}
});
EXPECT_FALSE(TopologicalSort(&graph).ok());
std::vector<string> order = {"2", "3", "1"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, DuplicatedInputs) {
GraphDef graph = CreateGraph({
{"2", {"1", "1"}},
{"1", {}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, Idempotent) {
GraphDef graph = CreateGraph({
{"1", {}},
{"2", {}},
{"3", {"1", "2"}},
{"4", {"1", "3"}},
{"5", {"2", "3"}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2", "3", "4", "5"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, ExtraDependencies) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<TopologicalDependency> extra_dependencies;
extra_dependencies.push_back({&graph.node(5), &graph.node(4)});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, extra_dependencies, &topo_order));
const std::vector<string> valid_order_1 = {"4", "5", "2", "0", "3", "1"};
const std::vector<string> valid_order_2 = {"4", "5", "0", "2", "3", "1"};
ASSERT_EQ(topo_order.size(), valid_order_1.size());
std::vector<string> computed_order(6, "");
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
computed_order[i] = node->name();
}
EXPECT_TRUE(computed_order == valid_order_1 ||
computed_order == valid_order_2);
extra_dependencies.push_back({&graph.node(1), &graph.node(5)});
EXPECT_FALSE(
ComputeTopologicalOrder(graph, extra_dependencies, &topo_order).ok());
}
static void BM_ComputeTopologicalOrder(::testing::benchmark::State& state) {
const int size = state.range(0);
GraphDef graph = test::CreateRandomGraph(size);
std::vector<const NodeDef*> topo_order;
for (auto s : state) {
topo_order.clear();
Status st = ComputeTopologicalOrder(graph, &topo_order);
CHECK(st.ok()) << "Failed to compute topological order";
}
}
BENCHMARK(BM_ComputeTopologicalOrder)
->Arg(10)
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(25000)
->Arg(50000)
->Arg(100000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/topological_sort.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/topological_sort_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a906af01-7ab9-4be4-9a21-19751f5ce494 | cpp | tensorflow/tensorflow | generic_layout_optimizer_transposer_factory | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory_test.cc | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include "tensorflow/core/grappler/op_types.h"
namespace tensorflow {
namespace grappler {
std::shared_ptr<Transposer> TransposerFactory::GetTransposer(
const NodeDef& node) {
if (IsDefaultLayoutSensitiveOp(node)) {
return GetOrCreateIfNotFound<DefaultLayoutSensitiveOpTransposer>(
"DefaultLayoutSensitiveOp");
}
if (IsAvgPoolGrad(node)) {
return GetOrCreateIfNotFound<AvgPoolGradTransposer>("AvgPoolGrad");
}
if (IsBiasAddV2(node)) {
return GetOrCreateIfNotFound<BiasAddTransposer>("BiasAdd");
}
if (IsBiasAddGrad(node)) {
return GetOrCreateIfNotFound<BiasAddGradTransposer>("BiasAddGrad");
}
if (IsConv2DBackpropFilter(node) ||
IsDepthwiseConv2dNativeBackpropFilter(node)) {
return GetOrCreateIfNotFound<Conv2DBackpropFilterTransposer>(
"Conv2DBackpropFilter");
}
if (IsConv2DBackpropInput(node) ||
IsDepthwiseConv2dNativeBackpropInput(node)) {
return GetOrCreateIfNotFound<Conv2DBackpropInputTransposer>(
"Conv2DBackpropInput");
}
if (IsConv3D(node)) {
return GetOrCreateIfNotFound<Conv3DTransposer>("Conv3D");
}
if (IsConv3DBackpropInputV2(node)) {
return GetOrCreateIfNotFound<Conv3DBackpropInputTransposer>(
"Conv3DBackpropInput");
}
if (IsConv3DBackpropFilterV2(node)) {
return GetOrCreateIfNotFound<Conv3DBackpropFilterTransposer>(
"Conv3DBackpropFilter");
}
if (IsFusedBatchNormEx(node)) {
return GetOrCreateIfNotFound<FusedBatchNormExTransposer>(
"FusedBatchNormEx");
}
if (IsFusedBatchNormGrad(node)) {
return GetOrCreateIfNotFound<FusedBatchNormGradTransposer>(
"FusedBatchNormGrad");
}
if (IsMaxPoolV2(node)) {
return GetOrCreateIfNotFound<MaxPoolV2Transposer>("MaxPoolV2");
}
if (IsMaxPoolGrad(node) || IsMaxPoolGradGradV1(node)) {
return GetOrCreateIfNotFound<MaxPoolGradTransposer>("MaxPoolGrad");
}
if (IsMaxPoolGradV2(node) || IsMaxPoolGradGradV2(node)) {
return GetOrCreateIfNotFound<MaxPoolGradV2Transposer>("MaxPoolGradV2");
}
if (IsMaxPool3D(node)) {
return GetOrCreateIfNotFound<MaxPool3DTransposer>("MaxPool3D");
}
if (IsDefaultLayoutAgnosticOp(node)) {
return GetOrCreateIfNotFound<DefaultLayoutAgnosticOpTransposer>(
"DefaultLayoutAgnosticOp");
}
if (IsAddN(node)) {
return GetOrCreateIfNotFound<AddNTransposer>("AddN");
}
if (IsBinaryOp(node)) {
return GetOrCreateIfNotFound<BinaryOpTransposer>("BinaryOp");
}
if (IsConcat(node)) {
return GetOrCreateIfNotFound<ConcatOpTransposer>("Concat");
}
if (IsFill(node)) {
return GetOrCreateIfNotFound<FillOpTransposer>("Fill");
}
if (IsIdentityN(node)) {
return GetOrCreateIfNotFound<IdentityNTransposer>("IdentityN");
}
if (IsMerge(node)) {
return GetOrCreateIfNotFound<MergeTransposer>("Merge");
}
if (IsMirrorPad(node) || IsMirrorPadGrad(node) || IsPad(node)) {
return GetOrCreateIfNotFound<PadTransposer>("Pad");
}
if (IsReduceOp(node)) {
return GetOrCreateIfNotFound<ReduceTransposer>("ReduceOp");
}
if (IsReverseV2(node)) {
return GetOrCreateIfNotFound<ReverseV2Transposer>("ReverseV2");
}
if (IsSelect(node)) {
return GetOrCreateIfNotFound<SelectTransposer>("Select");
}
if (IsShape(node)) {
return GetOrCreateIfNotFound<ShapeTransposer>("Shape");
}
if (IsShapeN(node)) {
return GetOrCreateIfNotFound<ShapeNTransposer>("ShapeN");
}
if (IsSlice(node)) {
return GetOrCreateIfNotFound<SliceTransposer>("Slice");
}
if (IsSplit(node)) {
return GetOrCreateIfNotFound<SplitTransposer>("Split");
}
if (IsSplitV(node)) {
return GetOrCreateIfNotFound<SplitVTransposer>("SplitV");
}
if (IsSqueeze(node)) {
return GetOrCreateIfNotFound<SqueezeTransposer>("Squeeze");
}
if (IsStridedSlice(node)) {
return GetOrCreateIfNotFound<StridedSliceTransposer>("StridedSlice");
}
if (IsSwitch(node)) {
return GetOrCreateIfNotFound<SwitchTransposer>("Switch");
}
if (IsTernaryOp(node)) {
return GetOrCreateIfNotFound<TernaryOpTransposer>("TernaryOp");
}
if (IsTile(node)) {
return GetOrCreateIfNotFound<TileTransposer>("Tile");
}
if (IsUnaryGrad(node)) {
return GetOrCreateIfNotFound<UnaryGradTransposer>("UnaryGrad");
}
return nullptr;
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void CheckSameTransposerForOps(absl::Span<const string> ops,
TransposerFactory* factory,
absl::flat_hash_set<Transposer*>* transposers) {
absl::flat_hash_set<Transposer*> created_transposers;
for (int i = 0; i < ops.size(); i++) {
NodeDef node;
node.set_op(ops[i]);
std::shared_ptr<Transposer> transposer1 = factory->GetTransposer(node);
ASSERT_NE(transposer1, nullptr);
if (i == 0) {
EXPECT_TRUE(transposers->insert(transposer1.get()).second);
} else {
EXPECT_FALSE(transposers->insert(transposer1.get()).second);
}
std::shared_ptr<Transposer> transposer2 = factory->GetTransposer(node);
ASSERT_NE(transposer2, nullptr);
EXPECT_EQ(transposer1.get(), transposer2.get());
created_transposers.insert(transposer1.get());
}
if (!ops.empty()) {
EXPECT_EQ(created_transposers.size(), 1);
}
}
TEST(TransposerFactoryTest, SanityCheck) {
TransposerFactory factory;
absl::flat_hash_set<Transposer*> transposers;
CheckSameTransposerForOps(
{"Conv2D", "FusedBatchNorm", "DepthwiseConv2dNative"}, &factory,
&transposers);
CheckSameTransposerForOps({"AvgPoolGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"BiasAddGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"_FusedBatchNormEx"}, &factory, &transposers);
CheckSameTransposerForOps({"FusedBatchNormGrad", "FusedBatchNormGradV2"},
&factory, &transposers);
CheckSameTransposerForOps(
{"Conv2DBackpropFilter", "DepthwiseConv2dNativeBackpropFilter"}, &factory,
&transposers);
CheckSameTransposerForOps(
{"Conv2DBackpropInput", "DepthwiseConv2dNativeBackpropInput"}, &factory,
&transposers);
CheckSameTransposerForOps({"MaxPoolGrad", "MaxPoolGradGrad"}, &factory,
&transposers);
CheckSameTransposerForOps({"MaxPoolGradV2", "MaxPoolGradGradV2"}, &factory,
&transposers);
CheckSameTransposerForOps({"AddN"}, &factory, &transposers);
CheckSameTransposerForOps({"IdentityN"}, &factory, &transposers);
CheckSameTransposerForOps({"Merge", "RefMerge"}, &factory, &transposers);
CheckSameTransposerForOps({"Select"}, &factory, &transposers);
CheckSameTransposerForOps({"Switch", "RefSwitch"}, &factory, &transposers);
CheckSameTransposerForOps({"Betainc"}, &factory, &transposers);
CheckSameTransposerForOps({"TanhGrad"}, &factory, &transposers);
CheckSameTransposerForOps({"Squeeze"}, &factory, &transposers);
CheckSameTransposerForOps({"MaxPoolV2"}, &factory, &transposers);
CheckSameTransposerForOps({"RealDiv", "Atan2", "Complex"}, &factory,
&transposers);
CheckSameTransposerForOps({"Concat", "ConcatV2"}, &factory, &transposers);
CheckSameTransposerForOps({"Pad", "PadV2", "MirrorPad", "MirrorPadGrad"},
&factory, &transposers);
CheckSameTransposerForOps({"ReverseV2"}, &factory, &transposers);
CheckSameTransposerForOps({"Tile"}, &factory, &transposers);
CheckSameTransposerForOps({"Shape"}, &factory, &transposers);
CheckSameTransposerForOps({"ShapeN"}, &factory, &transposers);
CheckSameTransposerForOps({"Fill"}, &factory, &transposers);
CheckSameTransposerForOps({"Slice"}, &factory, &transposers);
CheckSameTransposerForOps({"Split"}, &factory, &transposers);
CheckSameTransposerForOps({"SplitV"}, &factory, &transposers);
CheckSameTransposerForOps({"StridedSlice"}, &factory, &transposers);
CheckSameTransposerForOps({"Sum", "Mean", "Prod", "Max", "Min", "All", "Any"},
&factory, &transposers);
NodeDef node_unknown;
node_unknown.set_op("UnknownOp");
std::shared_ptr<Transposer> transposer_unknown =
factory.GetTransposer(node_unknown);
EXPECT_TRUE(transposer_unknown == nullptr);
}
TEST(TransposerFactoryTest, ShouldUseAllOpTransposer) {
TransposerFactory factory;
std::vector<OpDef> op_defs;
OpRegistry::Global()->GetRegisteredOps(&op_defs);
NodeDef node;
AttrValue value;
value.set_type(DataType::DT_DOUBLE);
node.mutable_attr()->insert({"T", value});
for (const OpDef& op_def : op_defs) {
node.set_op(op_def.name());
std::shared_ptr<Transposer> transposer = factory.GetTransposer(node);
if (transposer != nullptr) {
EXPECT_TRUE(IsLayoutSensitiveOp(node) || IsLayoutAgnosticOp(node))
<< "Transposer for op \"" << node.op()
<< "\" is created but not used. Add it to IsLayourSensitiveOp or "
"IslayoutAgnosticOp.";
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7138f169-d580-4837-819f-9549d7cdbd74 | cpp | tensorflow/tensorflow | memory_optimizer | tensorflow/core/grappler/optimizers/memory_optimizer.cc | tensorflow/core/grappler/optimizers/memory_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
#include <algorithm>
#include <queue>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/graph_memory.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
const char* kRecomputedNodePrefix = "Recomputed";
const char* kRecomputeTriggerNodePrefix = "RecomputeTrigger";
const char* kRecomputeHint = "_recompute_hint";
std::unordered_set<string> GetCheapToRecomputeOps() {
std::unordered_set<string> cheap_ops = {"Add",
"AddN",
"BiasAdd",
"Cast",
"Fill",
"FloorDiv",
"FloorMod",
"FusedBatchNorm",
"LeakyRelu",
"Mul",
"Neg",
"RealDiv",
"Reciprocal",
"Relu",
"Relu6",
"Reshape",
"Rsqrt",
"Sigmoid",
"Sqrt",
"Square",
"SquaredDifference",
"Sub",
"Tile",
"Transpose"};
return cheap_ops;
}
std::unordered_set<const NodeDef*> FindCandidateRecomputeNodes(
const NodeMap& node_map, const GraphDef* graph,
const std::function<bool(const NodeDef&)>& is_candidate,
const std::function<bool(const NodeDef&)>& is_target) {
std::unordered_set<const NodeDef*> candidate_recompute_nodes;
for (const auto& node : graph->node()) {
if (!is_candidate(node)) {
continue;
}
bool has_target_output = false;
for (const NodeDef* output : node_map.GetOutputs(node.name())) {
if (is_target(*output)) {
has_target_output = true;
break;
}
}
if (!has_target_output) {
continue;
}
bool has_target_input = false;
for (const string& input_name : node.input()) {
const NodeDef* input_node = node_map.GetNode(input_name);
if (is_target(*input_node)) {
has_target_input = true;
break;
}
}
if (has_target_input) {
continue;
}
candidate_recompute_nodes.insert(&node);
}
return candidate_recompute_nodes;
}
void connected_subgraph(const NodeMap& node_map, bool collect_inputs,
bool collect_outputs,
const std::function<bool(const NodeDef&)>& is_candidate,
std::unordered_set<const NodeDef*>* expanded_nodes) {
std::queue<const NodeDef*> to_visit;
for (const NodeDef* starting_node : *expanded_nodes) {
to_visit.push(starting_node);
}
expanded_nodes->clear();
while (!to_visit.empty()) {
const NodeDef* current_node = to_visit.front();
to_visit.pop();
if (!expanded_nodes->insert(current_node).second) {
continue;
}
if (collect_inputs) {
for (const string& input_name_raw : current_node->input()) {
const NodeDef* input_node = node_map.GetNode(input_name_raw);
if (expanded_nodes->count(input_node) == 0 &&
is_candidate(*input_node)) {
to_visit.push(input_node);
}
}
}
if (collect_outputs) {
for (const NodeDef* output : node_map.GetOutputs(current_node->name())) {
if (expanded_nodes->count(output) == 0 && is_candidate(*output)) {
to_visit.push(output);
}
}
}
}
}
struct RecomputedSubGraph {
std::unordered_set<const NodeDef*> recomputed_source_nodes;
std::unordered_set<NodeDef*> target_nodes;
};
std::vector<RecomputedSubGraph> GetOpGroupsToRecompute(
const GraphDef* graph, const NodeMap& node_map,
const std::function<bool(const NodeDef&)>& should_recompute,
const std::function<bool(const NodeDef&)>& is_target) {
std::unordered_set<const NodeDef*> visited_nodes;
std::vector<RecomputedSubGraph> subgraphs_to_recompute;
std::unordered_set<const NodeDef*> candidate_recompute_nodes =
FindCandidateRecomputeNodes(node_map, graph, should_recompute, is_target);
for (const NodeDef* recompute_node : candidate_recompute_nodes) {
if (visited_nodes.count(recompute_node) > 0) {
continue;
}
RecomputedSubGraph current_recomputation;
std::unordered_set<const NodeDef*> unpruned_recompute_nodes;
unpruned_recompute_nodes.insert(recompute_node);
connected_subgraph(node_map,
true,
true,
should_recompute, &unpruned_recompute_nodes);
visited_nodes.insert(unpruned_recompute_nodes.begin(),
unpruned_recompute_nodes.end());
for (const NodeDef* unpruned_recompute_node : unpruned_recompute_nodes) {
bool inserted_feed = false;
for (NodeDef* output :
node_map.GetOutputs(unpruned_recompute_node->name())) {
if (is_target(*output)) {
current_recomputation.target_nodes.insert(output);
if (!inserted_feed) {
current_recomputation.recomputed_source_nodes.insert(
unpruned_recompute_node);
inserted_feed = true;
}
}
}
}
connected_subgraph(
node_map,
true,
false,
[&unpruned_recompute_nodes](const NodeDef& node) {
return unpruned_recompute_nodes.count(&node) != 0;
},
¤t_recomputation.recomputed_source_nodes);
if (current_recomputation.target_nodes.empty()) {
continue;
}
subgraphs_to_recompute.push_back(current_recomputation);
}
return subgraphs_to_recompute;
}
std::unordered_map<const NodeDef*, int> GetMaxDownstreamComponents(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components) {
std::unordered_map<const NodeDef*, int> recomputed_node_components;
for (const NodeDef* original_recompute_node : recomputed_source_nodes) {
int max_target_component = -1;
for (NodeDef* output :
node_map.GetOutputs(original_recompute_node->name())) {
if (target_nodes.count(output) != 0) {
int current_target_component = components.find(output)->second;
if (current_target_component > max_target_component) {
max_target_component = current_target_component;
}
}
}
if (max_target_component > -1) {
recomputed_node_components[original_recompute_node] =
max_target_component;
}
}
std::vector<const NodeDef*> recomputed_source_nodes_topological(
recomputed_source_nodes.begin(), recomputed_source_nodes.end());
std::sort(recomputed_source_nodes_topological.begin(),
recomputed_source_nodes_topological.end(),
[&components](const NodeDef* first, const NodeDef* second) {
return components.find(first)->second <
components.find(second)->second;
});
for (const NodeDef* original_recompute_node :
recomputed_source_nodes_topological) {
int max_component;
auto recomputed_component_iterator =
recomputed_node_components.find(original_recompute_node);
if (recomputed_component_iterator != recomputed_node_components.end()) {
max_component = recomputed_component_iterator->second;
} else {
max_component = -1;
}
for (NodeDef* output :
node_map.GetOutputs(original_recompute_node->name())) {
if (recomputed_source_nodes.count(output) == 0) {
continue;
}
auto child_component_iterator = recomputed_node_components.find(output);
CHECK(child_component_iterator != recomputed_node_components.end());
int child_component = child_component_iterator->second;
if (child_component > max_component) {
max_component = child_component;
}
}
CHECK_GE(max_component, 0);
recomputed_node_components[original_recompute_node] = max_component;
}
return recomputed_node_components;
}
std::unordered_map<const NodeDef*, const NodeDef*>
AddRecomputeControlDependencyNodes(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components,
const std::unordered_map<const NodeDef*, int>&
recomputed_node_max_feed_components,
GraphDef* graph) {
std::vector<const NodeDef*> recomputed_source_nodes_topological(
recomputed_source_nodes.begin(), recomputed_source_nodes.end());
std::sort(recomputed_source_nodes_topological.begin(),
recomputed_source_nodes_topological.end(),
[&recomputed_node_max_feed_components](const NodeDef* first,
const NodeDef* second) {
int first_component =
recomputed_node_max_feed_components.find(first)->second;
int second_component =
recomputed_node_max_feed_components.find(second)->second;
return first_component > second_component
|| (first_component == second_component &&
first->name() > second->name());
});
std::vector<const NodeDef*> target_inputs_topological;
for (const NodeDef* target_node : target_nodes) {
for (const string& target_input_name_raw : target_node->input()) {
const NodeDef* target_input = node_map.GetNode(target_input_name_raw);
if (target_input == nullptr ||
recomputed_source_nodes.count(target_input) != 0 ||
components.find(target_node)->second ==
components.find(target_input)->second) {
continue;
}
target_inputs_topological.push_back(target_input);
}
}
std::sort(target_inputs_topological.begin(), target_inputs_topological.end(),
[&components](const NodeDef* first, const NodeDef* second) {
return components.find(first)->second >
components.find(second)->second;
});
auto target_input_iterator = target_inputs_topological.begin();
NodeDef* current_trigger_node = nullptr;
std::unordered_map<const NodeDef*, const NodeDef*> triggers;
for (const NodeDef* original_recomputed_node :
recomputed_source_nodes_topological) {
NodeDef* new_trigger_node = graph->add_node();
new_trigger_node->set_name(AddPrefixToNodeName(
original_recomputed_node->name(), kRecomputeTriggerNodePrefix));
new_trigger_node->set_op("NoOp");
new_trigger_node->set_device(original_recomputed_node->device());
if (current_trigger_node != nullptr) {
*new_trigger_node->add_input() =
strings::StrCat("^", current_trigger_node->name());
}
current_trigger_node = new_trigger_node;
triggers[original_recomputed_node] = current_trigger_node;
for (;
target_input_iterator != target_inputs_topological.end() &&
components.find(*target_input_iterator)->second >
recomputed_node_max_feed_components.find(original_recomputed_node)
->second;
++target_input_iterator) {
*current_trigger_node->add_input() =
strings::StrCat("^", (*target_input_iterator)->name());
VLOG(2) << " Recomputation trigger " << current_trigger_node->name()
<< " depends on " << (*target_input_iterator)->name();
}
}
return triggers;
}
string RecomputedOrOriginalNodeName(
const std::unordered_set<string>& recomputed_node_names,
const string& original_node_name) {
if (recomputed_node_names.find(original_node_name) ==
recomputed_node_names.end()) {
return original_node_name;
} else {
return AddPrefixToNodeName(original_node_name, kRecomputedNodePrefix);
}
}
void RecomputeSubgraph(
const std::unordered_set<const NodeDef*>& recomputed_source_nodes,
const std::unordered_set<NodeDef*>& target_nodes, const NodeMap& node_map,
const std::unordered_map<const NodeDef*, int>& components,
GraphDef* graph) {
std::unordered_set<string> recomputed_node_names;
VLOG(1) << "Recomputing a " << recomputed_source_nodes.size()
<< " node subgraph";
std::unordered_map<const NodeDef*, int> recomputed_node_components =
GetMaxDownstreamComponents(recomputed_source_nodes, target_nodes,
node_map, components);
for (const NodeDef* original_node : recomputed_source_nodes) {
VLOG(2) << " " << original_node->name();
recomputed_node_names.insert(original_node->name());
}
std::unordered_map<const NodeDef*, const NodeDef*> triggers =
AddRecomputeControlDependencyNodes(recomputed_source_nodes, target_nodes,
node_map, components,
recomputed_node_components, graph);
for (const NodeDef* original_node : recomputed_source_nodes) {
NodeDef* copied_node = graph->add_node();
copied_node->set_name(
AddPrefixToNodeName(original_node->name(), kRecomputedNodePrefix));
copied_node->set_op(original_node->op());
*copied_node->mutable_attr() = original_node->attr();
copied_node->set_device(original_node->device());
for (const string& original_input_name : original_node->input()) {
*copied_node->add_input() = RecomputedOrOriginalNodeName(
recomputed_node_names, original_input_name);
}
*copied_node->add_input() =
strings::StrCat("^", triggers[original_node]->name());
}
for (NodeDef* target_node : target_nodes) {
for (string& target_input_name : *target_node->mutable_input()) {
target_input_name = RecomputedOrOriginalNodeName(recomputed_node_names,
target_input_name);
}
}
}
void RecomputationRewritingPass(RewriterConfig::MemOptType optimization_level,
const string& recomputation_targets_name_scope,
GraphDef* graph, const GrapplerItem& item) {
TF_CHECK_OK(TopologicalSort(graph));
NodeMap node_map(graph);
std::vector<RecomputedSubGraph> recomputed_subgraphs;
std::unordered_set<string> feeds;
for (const auto& feed : item.feed) {
feeds.insert(NodeName(feed.first));
}
std::function<bool(const NodeDef&)> is_target =
[&recomputation_targets_name_scope](const NodeDef& node) {
return absl::StartsWith(node.name(),
recomputation_targets_name_scope) ||
static_cast<int>(node.name().find(
"/" + recomputation_targets_name_scope)) != -1;
};
if (optimization_level == RewriterConfig::RECOMPUTATION_HEURISTICS ||
optimization_level == RewriterConfig::HEURISTICS) {
std::unordered_set<string> cheap_to_recompute_ops =
GetCheapToRecomputeOps();
recomputed_subgraphs = GetOpGroupsToRecompute(
graph, node_map,
[&cheap_to_recompute_ops, &feeds, &is_target](const NodeDef& node) {
return !is_target(node) && feeds.count(node.name()) == 0 &&
(cheap_to_recompute_ops.count(node.op()) > 0 ||
node.attr().count(kRecomputeHint) > 0);
},
is_target);
} else if (optimization_level == RewriterConfig::MANUAL) {
recomputed_subgraphs = GetOpGroupsToRecompute(
graph, node_map,
[&feeds, &is_target](const NodeDef& node) {
return !is_target(node) && feeds.count(node.name()) == 0 &&
node.attr().count(kRecomputeHint) > 0;
},
is_target);
}
if (!recomputed_subgraphs.empty()) {
std::unordered_map<const NodeDef*, int> topological_numbering;
for (int node_number = 0; node_number < graph->node().size();
++node_number) {
topological_numbering[graph->mutable_node(node_number)] =
graph->node().size() - node_number - 1;
}
for (const RecomputedSubGraph& subgraph : recomputed_subgraphs) {
RecomputeSubgraph(subgraph.recomputed_source_nodes, subgraph.target_nodes,
node_map, topological_numbering, graph);
}
}
}
bool SchedulingPass(Cluster* cluster, std::unique_ptr<GraphMemory>* memory_ptr,
GrapplerItem* item) {
MutableGraphView view(&item->graph);
std::unordered_map<string, std::unordered_set<NodeDef*>> addn_list;
for (NodeDef& node : *item->graph.mutable_node()) {
if (!IsAddN(node) && node.op() != "AccumulateNV2") {
continue;
}
if (view.NumFanins(node, false) <= 2) {
continue;
}
for (const auto& input : view.GetFanins(node, false)) {
if (input.node->device() == node.device()) {
string tensor_name =
strings::StrCat(input.node->name(), ":", input.port_id);
addn_list[tensor_name].insert(&node);
}
}
}
if (addn_list.empty()) {
return false;
}
if ((*memory_ptr) == nullptr) {
memory_ptr->reset(new GraphMemory(*item));
Status s = (*memory_ptr)->InferStatically(cluster->GetDevices());
if (!s.ok()) {
memory_ptr->reset();
VLOG(1) << "Failed to infer memory usage: " << s.message();
return false;
}
}
const GraphMemory& memory = **memory_ptr;
std::unordered_set<NodeDef*> addn_to_rewrite;
for (const auto& device : cluster->GetDevices()) {
const string& name = device.first;
const DeviceProperties& prop = device.second;
if (prop.memory_size() <= 0) {
VLOG(1) << "Available memory unknown for device " << name;
continue;
}
const GraphMemory::MemoryUsage& mem_usage = memory.GetPeakMemoryUsage(name);
if (mem_usage.used_memory <= prop.memory_size() * 0.8) {
continue;
}
for (const auto& live : mem_usage.live_tensors) {
string tensor_name = strings::StrCat(live.node, ":", live.output_id);
auto it = addn_list.find(tensor_name);
if (it != addn_list.end()) {
addn_to_rewrite.insert(it->second.begin(), it->second.end());
}
}
}
if (addn_to_rewrite.empty()) {
return false;
}
GraphProperties properties(*item);
Status s = properties.InferStatically(false,
false,
false);
if (!s.ok()) {
VLOG(1) << "Failed to infer shapes: " << s.message();
return false;
}
GraphTopologyView graph_topology;
Status initialized_topology = graph_topology.InitializeFromGraph(item->graph);
if (!initialized_topology.ok()) {
VLOG(1) << "Failed to initialize graph topology view: "
<< initialized_topology.message();
return false;
}
bool updated_graph = false;
for (NodeDef* node : addn_to_rewrite) {
if (!properties.HasOutputProperties(node->name())) {
VLOG(1) << "Missing properties for " << node->name();
continue;
}
const TensorShapeProto& shape =
properties.GetOutputProperties(node->name())[0].shape();
PartialTensorShape shp(shape);
if (!shp.IsFullyDefined()) {
VLOG(1) << "Shape not fully known for " << node->name();
continue;
}
DataType dtype = node->attr().at("T").type();
if (dtype != DT_HALF && dtype != DT_FLOAT && dtype != DT_DOUBLE &&
dtype != DT_INT64) {
VLOG(1) << "Unsupported dtype for " << node->name();
continue;
}
std::unordered_map<const NodeDef*, int> topo_order;
DfsTraversal(graph_topology, {node}, TraversalDirection::kFollowInputs,
DfsCallbacks::PostOrder([&topo_order](const NodeDef* n) {
int topo_index = static_cast<int>(topo_order.size());
topo_order[n] = topo_index;
}));
std::vector<int> input_topo_index;
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
const string node_name = NodeName(input);
const NodeDef* node = view.GetNode(node_name);
input_topo_index.push_back(topo_order.at(node));
}
int min_input_topo_index = INT_MAX;
int min_input_id = -1;
for (int i = 0; i < node->input_size(); ++i) {
if (IsControlInput(node->input(i))) {
break;
}
const int current = input_topo_index[i];
if (current < min_input_topo_index) {
min_input_topo_index = current;
min_input_id = i;
}
}
CHECK_LE(0, min_input_id);
std::vector<string> pre_ctrl_deps;
std::vector<string> post_ctrl_deps;
for (int i = node->input_size() - 1; i >= 0; --i) {
if (!IsControlInput(node->input(i))) {
break;
}
if (input_topo_index[i] < min_input_topo_index) {
pre_ctrl_deps.push_back(node->input(i));
} else {
post_ctrl_deps.push_back(node->input(i));
}
}
const string& device = node->device();
const string tmp_var_name = strings::StrCat(node->name(), "/tmp_var");
if (view.GetNode(tmp_var_name) != nullptr) {
VLOG(1) << "Temporary variable already exists " << tmp_var_name;
return false;
}
NodeDef* tmp_var = item->graph.add_node();
tmp_var->set_name(tmp_var_name);
tmp_var->set_op("TemporaryVariable");
tmp_var->set_device(device);
(*tmp_var->mutable_attr())["dtype"].set_type(dtype);
*(*tmp_var->mutable_attr())["shape"].mutable_shape() = shape;
(*tmp_var->mutable_attr())["var_name"].set_s(tmp_var->name());
for (const string& ctrl_dep : pre_ctrl_deps) {
*tmp_var->add_input() = ctrl_dep;
}
*tmp_var->add_input() =
AsControlDependency(NodeName(node->input(min_input_id)));
NodeDef* zeros = item->graph.add_node();
zeros->set_name(strings::StrCat(node->name(), "/tmp_var_zeros"));
zeros->set_op("ZerosLike");
zeros->set_device(device);
(*zeros->mutable_attr())["T"].set_type(dtype);
*zeros->add_input() = node->input(min_input_id);
NodeDef* initialize = item->graph.add_node();
initialize->set_name(strings::StrCat(node->name(), "/tmp_var_initializer"));
initialize->set_op("Assign");
initialize->set_device(device);
(*initialize->mutable_attr())["T"].set_type(dtype);
(*initialize->mutable_attr())["use_locking"].set_b(false);
(*initialize->mutable_attr())["validate_shape"].set_b(false);
*initialize->add_input() = tmp_var->name();
*initialize->add_input() = zeros->name();
std::vector<NodeDef*> accumulates;
for (int i = 0; i < node->input_size(); ++i) {
const string& input = node->input(i);
if (!IsControlInput(input)) {
NodeDef* accumulate = item->graph.add_node();
accumulate->set_name(
strings::StrCat(node->name(), "/tmp_var_accum_", i));
accumulate->set_op("AssignAdd");
accumulate->set_device(device);
(*accumulate->mutable_attr())["T"].set_type(dtype);
(*accumulate->mutable_attr())["use_locking"].set_b(true);
*accumulate->add_input() = initialize->name();
*accumulate->add_input() = input;
accumulates.push_back(accumulate);
}
}
node->set_op("DestroyTemporaryVariable");
node->clear_input();
EraseRegularNodeAttributes(node);
(*node->mutable_attr())["T"].set_type(dtype);
(*node->mutable_attr())["var_name"].set_s(tmp_var->name());
*node->add_input() = initialize->name();
for (const NodeDef* accum : accumulates) {
*node->add_input() = AsControlDependency(accum->name());
}
for (const string& ctrl_dep : post_ctrl_deps) {
*node->add_input() = ctrl_dep;
}
updated_graph = true;
}
return updated_graph;
}
Status BuildSwapPair(NodeDef* node, int input_to_swap,
const std::unordered_map<string, const NodeDef*>& name_map,
GraphDef* graph,
std::pair<NodeDef*, NodeDef*>* swap_pair) {
string task, device;
if (!DeviceNameUtils::SplitDeviceName(node->device(), &task, &device) ||
!absl::StrContains(device, DEVICE_GPU)) {
return errors::InvalidArgument("Can't swap input ", input_to_swap,
" of node ", node->name(),
" since it is not on GPU");
}
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node->op(), &op_def));
DataType input_type;
TF_RETURN_IF_ERROR(
InputTypeForNode(*node, *op_def, input_to_swap, &input_type));
if (IsRefType(input_type)) {
return errors::InvalidArgument("Can't swap input ", input_to_swap,
" of node ", node->name(),
" since it expects a reference");
}
string tensor_to_swap = strings::StrCat(node->name(), "_", input_to_swap);
string swap_out_name = strings::StrCat("swap_out_", tensor_to_swap);
string swap_in_name = strings::StrCat("swap_in_", tensor_to_swap);
if (name_map.find(swap_out_name) != name_map.end() ||
name_map.find(swap_in_name) != name_map.end()) {
return errors::InvalidArgument("Input ", input_to_swap, " of node ",
node->name(), " is already swapped");
}
NodeDef* swap_out_node = graph->add_node();
swap_out_node->set_name(swap_out_name);
swap_out_node->set_op("_CopyFromGpuToHost");
NodeDef* swap_in_node = graph->add_node();
swap_in_node->set_name(swap_in_name);
swap_in_node->set_op("_CopyFromHostToGpu");
*swap_in_node->add_input() = swap_out_node->name();
swap_out_node->set_device(node->device());
swap_in_node->set_device(node->device());
string coloc_group = strings::StrCat("loc@", tensor_to_swap);
(*swap_out_node->mutable_attr())["_class"].mutable_list()->add_s(coloc_group);
(*swap_in_node->mutable_attr())["_class"].mutable_list()->add_s(coloc_group);
(*node->mutable_attr())["_class"].mutable_list()->add_s(coloc_group);
(*swap_in_node->mutable_attr())["T"].set_type(input_type);
(*swap_out_node->mutable_attr())["T"].set_type(input_type);
*swap_pair = std::make_pair(swap_out_node, swap_in_node);
return absl::OkStatus();
}
struct SwapInfo {
std::vector<int> inputs_to_swap;
Costs::NanoSeconds time_to_swap = 0;
};
static const NodeDef* FindSwapInTrigger(
const NodeDef* node, const SwapInfo& swap_info,
const std::unordered_map<string, const NodeDef*>& name_map,
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>&
execution_times) {
Costs::NanoSeconds max_trigger_time(0);
std::set<string> possible_inputs;
for (int i = 0; i < node->input_size(); ++i) {
const string input_node_name = NodeName(node->input(i));
auto it1 = name_map.find(input_node_name);
if (it1 == name_map.end()) {
return nullptr;
}
const NodeDef* input_node = it1->second;
auto it2 = execution_times.find(input_node);
if (it2 == execution_times.end()) {
return nullptr;
}
max_trigger_time = std::max(max_trigger_time, it2->second);
possible_inputs.insert(input_node_name);
}
for (const int i : swap_info.inputs_to_swap) {
const string input_node_name = NodeName(node->input(i));
possible_inputs.erase(input_node_name);
}
if (possible_inputs.empty()) {
return nullptr;
}
max_trigger_time -= swap_info.time_to_swap;
std::map<Costs::NanoSeconds, const NodeDef*> candidates;
std::set<string> already_processed;
while (!possible_inputs.empty()) {
const string input_node_name = *possible_inputs.begin();
possible_inputs.erase(possible_inputs.begin());
already_processed.insert(input_node_name);
auto it1 = name_map.find(input_node_name);
if (it1 == name_map.end()) {
return nullptr;
}
const NodeDef* input_node = it1->second;
if (ModifiesFrameInfo(*input_node) || IsSwitch(*input_node) ||
IsMerge(*input_node)) {
continue;
}
auto it2 = execution_times.find(input_node);
if (it2 == execution_times.end()) {
return nullptr;
}
if (it2->second < max_trigger_time) {
candidates[it2->second] = input_node;
} else {
for (const string& fanin : input_node->input()) {
string name = NodeName(fanin);
if (already_processed.find(name) == already_processed.end()) {
possible_inputs.insert(name);
}
}
}
}
if (!candidates.empty()) {
return candidates.rbegin()->second;
}
return nullptr;
}
static bool IsSwappable(const MutableGraphView& graph,
MutableGraphView::OutputPort output) {
const NodeDef& node = *output.node;
if (IsPersistent(node)) {
return false;
}
const OpDef* op_def;
if (!OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) {
return false;
}
DataType dtype;
if (!OutputTypeForNode(node, *op_def, output.port_id, &dtype).ok()) {
return false;
}
if (IsRefType(dtype)) {
return false;
}
if (output.node->op() == "Identity" || output.node->op() == "Reshape") {
MutableGraphView::InputPort input;
input.node = output.node;
input.port_id = 0;
MutableGraphView::OutputPort fanin = graph.GetRegularFanin(input);
if (fanin.node->device() == node.device()) {
return IsSwappable(graph, fanin);
}
}
return true;
}
static NodeDef* FindSwapOutTrigger(
const NodeDef* node, int input_id, const MutableGraphView& view,
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>&
execution_times) {
MutableGraphView::InputPort swap;
swap.node = const_cast<NodeDef*>(node);
swap.port_id = input_id;
MutableGraphView::OutputPort generator = view.GetRegularFanin(swap);
if (!generator.node) {
return nullptr;
}
const absl::flat_hash_set<MutableGraphView::InputPort>& fanout =
view.GetFanout(generator);
NodeDef* trigger = nullptr;
Costs::NanoSeconds earliest_fanout(Costs::NanoSeconds::infinity());
for (const auto& port : fanout) {
if (port.node == node) {
continue;
}
auto it = execution_times.find(port.node);
if (it != execution_times.end() && it->second < earliest_fanout) {
earliest_fanout = it->second;
trigger = port.node;
}
}
return trigger;
}
static bool IsSwappable(MutableGraphView::InputPort input) {
const NodeDef& node = *input.node;
const OpDef* op_def;
if (!OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) {
return false;
}
DataType dtype;
if (!InputTypeForNode(node, *op_def, input.port_id, &dtype).ok()) {
return false;
}
return !IsRefType(dtype);
}
struct MemInfo {
MutableGraphView::OutputPort port;
int64_t memory_used;
std::vector<MutableGraphView::InputPort> uses_left;
double fitness;
bool operator<(const MemInfo& other) const { return fitness < other.fitness; }
};
static bool IdentifySwappingCandidates(
Cluster* cluster, GrapplerItem* item,
std::unique_ptr<GraphMemory>* memory_ptr,
std::unordered_set<string>* skip_list,
std::unordered_map<NodeDef*, SwapInfo>* nodes_to_swap) {
if ((*memory_ptr) == nullptr) {
memory_ptr->reset(new GraphMemory(*item));
Status s = (*memory_ptr)->InferStatically(cluster->GetDevices());
if (!s.ok()) {
memory_ptr->reset();
VLOG(1) << "Failed to infer memory usage: " << s.message();
return false;
}
}
const GraphMemory& memory = **memory_ptr;
bool updated_graph = false;
for (const auto& device : cluster->GetDevices()) {
const string& name = device.first;
const DeviceProperties& prop = device.second;
if (prop.type() != "GPU") {
continue;
}
if (prop.memory_size() <= 0) {
VLOG(1) << "Peak memory usage unknown for device " << name;
continue;
}
const GraphMemory::MemoryUsage& mem_usage = memory.GetPeakMemoryUsage(name);
if (mem_usage.used_memory <= prop.memory_size()) {
continue;
}
int64_t required_savings = mem_usage.used_memory - prop.memory_size();
std::unordered_map<string, Costs::NanoSeconds> op_completion_times;
{
VirtualCluster vcluster(cluster->GetDevices());
if (!vcluster.Provision().ok()) {
return false;
}
if (!vcluster.Initialize(*item).ok()) {
return false;
}
RunMetadata metadata;
Status s = vcluster.Run(item->graph, item->feed, item->fetch, &metadata);
if (!s.ok() && s.code() != error::RESOURCE_EXHAUSTED) {
return false;
}
for (const auto& dev_stats : metadata.step_stats().dev_stats()) {
for (const auto& node_stats : dev_stats.node_stats()) {
Costs::NanoSeconds exec_time =
Costs::NanoSeconds(1) +
Costs::MicroSeconds(node_stats.all_start_micros() +
node_stats.op_end_rel_micros());
op_completion_times.emplace(node_stats.node_name(), exec_time);
}
}
}
Costs::Duration peak_time = -1;
for (const auto& live_tensor : mem_usage.live_tensors) {
if (live_tensor.allocation_time > peak_time) {
peak_time = live_tensor.allocation_time;
}
}
std::vector<MemInfo> mem_state;
MutableGraphView graph(&item->graph);
for (const auto& live_tensor : mem_usage.live_tensors) {
if (live_tensor.memory_used <= 1024) {
continue;
}
if (live_tensor.deallocation_time - live_tensor.allocation_time <=
Costs::Duration(1e6)) {
VLOG(1) << "Not enough time to swap: skipping " << live_tensor.node;
continue;
}
if (skip_list->find(live_tensor.node) != skip_list->end()) {
continue;
}
MutableGraphView::OutputPort port =
graph.GetOutputPort(live_tensor.node, live_tensor.output_id);
if (!IsSwappable(graph, port)) {
continue;
}
MemInfo mem_info;
mem_info.port = port;
mem_info.memory_used = live_tensor.memory_used;
Costs::Duration allocation_time = live_tensor.allocation_time;
Costs::Duration earliest_use(Costs::Duration::infinity());
bool valid = true;
for (MutableGraphView::InputPort input : graph.GetFanout(port)) {
auto it = op_completion_times.find(input.node->name());
if (it == op_completion_times.end()) {
valid = false;
break;
}
if (it->second <= peak_time) {
continue;
}
if (skip_list->find(input.node->name()) != skip_list->end()) {
valid = false;
break;
}
string input_name =
strings::StrCat(input.node->name(), ":", input.port_id);
if (skip_list->find(input_name) != skip_list->end()) {
valid = false;
break;
}
if (!IsSwappable(input)) {
valid = false;
break;
}
mem_info.uses_left.emplace_back(input);
earliest_use = std::min(earliest_use, it->second);
}
if (valid && !mem_info.uses_left.empty()) {
mem_info.fitness =
MathUtil::IPow<double>((earliest_use - peak_time).count(), 2) /
MathUtil::IPow<double>(mem_info.uses_left.size(), 2) +
MathUtil::IPow<double>((allocation_time - peak_time).count(), 2);
mem_info.fitness = -mem_info.fitness;
mem_state.push_back(mem_info);
}
}
std::sort(mem_state.begin(), mem_state.end());
for (const MemInfo& mem_info : mem_state) {
for (const MutableGraphView::InputPort fanout_to_swap :
mem_info.uses_left) {
VLOG(1) << "Will swap fanout " << fanout_to_swap.node->name() << ":"
<< fanout_to_swap.port_id << " of tensor "
<< mem_info.port.node->name() << ":" << mem_info.port.port_id
<< " of size " << mem_info.memory_used;
(*nodes_to_swap)[fanout_to_swap.node].inputs_to_swap.push_back(
fanout_to_swap.port_id);
}
required_savings -= mem_info.memory_used;
updated_graph = true;
if (required_savings < 0) {
break;
}
}
}
return updated_graph;
}
bool SwappingPass(RewriterConfig::MemOptType optimization_level,
Cluster* cluster, std::unique_ptr<GraphMemory>* memory,
GrapplerItem* item, std::unordered_set<string>* skip_list) {
std::unordered_map<NodeDef*, SwapInfo> nodes_to_swap;
if (optimization_level == RewriterConfig::DEFAULT_MEM_OPT ||
optimization_level == RewriterConfig::SWAPPING_HEURISTICS ||
optimization_level == RewriterConfig::HEURISTICS) {
IdentifySwappingCandidates(cluster, item, memory, skip_list,
&nodes_to_swap);
}
for (auto& node : *item->graph.mutable_node()) {
if (node.attr().count("_swap_to_host") != 0) {
SwapInfo& swap_info = nodes_to_swap[&node];
const AttrValue& val = node.attr().at("_swap_to_host");
if (val.has_list()) {
for (int64_t input_id : val.list().i()) {
swap_info.inputs_to_swap.push_back(input_id);
}
} else {
int64_t input_id = val.i();
swap_info.inputs_to_swap.push_back(input_id);
}
}
}
if (nodes_to_swap.empty()) {
return false;
}
GraphProperties properties(*item);
if (!properties
.InferStatically(true,
false,
false)
.ok()) {
return false;
}
for (auto& swap : nodes_to_swap) {
const NodeDef* node = swap.first;
const std::vector<OpInfo::TensorProperties>& props =
properties.GetInputProperties(node->name());
SwapInfo& swap_info = swap.second;
int64_t bytes_to_swap = 0;
for (int64_t input_id : swap_info.inputs_to_swap) {
const OpInfo::TensorProperties& t = props[input_id];
bytes_to_swap += CalculateTensorSize(t);
}
swap_info.time_to_swap = bytes_to_swap / 16;
}
std::unordered_map<const NodeDef*, Costs::NanoSeconds> execution_times;
if (!EstimateEarliestExecutionTimes(*item, cluster, &execution_times).ok()) {
return false;
}
std::unordered_map<string, const NodeDef*> name_map;
for (const auto& node : item->graph.node()) {
name_map[node.name()] = &node;
}
MutableGraphView view(&item->graph);
bool updated_graph = false;
for (auto& swap : nodes_to_swap) {
NodeDef* node = swap.first;
const SwapInfo& swap_info = swap.second;
if (skip_list->find(node->name()) != skip_list->end()) {
continue;
}
const NodeDef* in_trigger =
FindSwapInTrigger(node, swap_info, name_map, execution_times);
if (!in_trigger) {
skip_list->insert(node->name());
continue;
}
for (int input_id : swap_info.inputs_to_swap) {
string input_name = strings::StrCat(node->name(), ":", input_id);
if (skip_list->find(input_name) != skip_list->end()) {
continue;
} else {
skip_list->insert(input_name);
}
NodeDef* out_trigger =
FindSwapOutTrigger(node, input_id, view, execution_times);
if (!out_trigger) {
continue;
}
std::pair<NodeDef*, NodeDef*> swap_nodes;
if (!BuildSwapPair(node, input_id, name_map, &item->graph, &swap_nodes)
.ok()) {
continue;
}
*swap_nodes.first->add_input() = node->input(input_id);
*node->mutable_input(input_id) = swap_nodes.second->name();
out_trigger->add_input(strings::StrCat("^", swap_nodes.first->name()));
swap_nodes.second->add_input(strings::StrCat("^", in_trigger->name()));
skip_list->insert(swap_nodes.first->name());
skip_list->insert(swap_nodes.second->name());
}
}
return updated_graph;
}
bool CrossesTaskOrCpuGpuBoundary(const NodeDef& node1, const NodeDef& node2) {
string task1;
string device1;
DeviceNameUtils::SplitDeviceName(node1.device(), &task1, &device1);
string task2;
string device2;
DeviceNameUtils::SplitDeviceName(node2.device(), &task2, &device2);
return task1 != task2 ||
(absl::StrContains(device1, DEVICE_CPU) &&
absl::StrContains(device2, DEVICE_GPU)) ||
(absl::StrContains(device1, DEVICE_GPU) &&
absl::StrContains(device2, DEVICE_CPU));
}
void RelaxAssignNodes(const std::set<int>& nodes_to_relax,
GraphDef* optimized_graph) {
for (int idx : nodes_to_relax) {
NodeDef* assign_node = optimized_graph->mutable_node(idx);
(*assign_node->mutable_attr())["_grappler_relax_allocator_constraints"]
.set_b(true);
}
}
Status FindAssignNodesToRelax(const GraphDef& graph,
std::set<int>* nodes_to_relax) {
std::unordered_set<string> devices;
std::vector<int> assign_nodes;
bool found_send = false;
for (int i = 0; i < graph.node_size(); ++i) {
const NodeDef& node = graph.node(i);
devices.insert(node.device());
if (IsAssign(node)) {
assign_nodes.push_back(i);
}
if (IsSend(node)) {
found_send = true;
break;
}
}
if (!found_send && devices.size() == 1) {
nodes_to_relax->insert(assign_nodes.begin(), assign_nodes.end());
return absl::OkStatus();
}
GraphTopologyView graph_view;
TF_RETURN_IF_ERROR(
graph_view.InitializeFromGraph(graph, true));
std::unordered_set<const NodeDef*> optimized_nodes;
for (int i : assign_nodes) {
const NodeDef& assign_node = graph.node(i);
if (optimized_nodes.find(&assign_node) == optimized_nodes.end()) {
std::vector<const NodeDef*> assign_nodes_in_fanout;
optimized_nodes.insert(&assign_node);
assign_nodes_in_fanout.push_back(&assign_node);
std::vector<const NodeDef*> transitive_fanout;
DfsTraversal(graph_view, {graph_view.GetNode(i)},
TraversalDirection::kFollowOutputs,
DfsPredicates::Advance([&](const NodeDef* node) {
return !NeverForwardsInputs(*node);
}),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
transitive_fanout.push_back(node);
}));
bool relax_constraint = true;
for (const NodeDef* fanout_node : transitive_fanout) {
if (relax_constraint &&
(IsSend(*fanout_node) ||
CrossesTaskOrCpuGpuBoundary(*fanout_node, assign_node))) {
relax_constraint = false;
break;
}
if (optimized_nodes.find(fanout_node) == optimized_nodes.end() &&
IsAssign(*fanout_node)) {
assign_nodes_in_fanout.push_back(fanout_node);
}
}
if (relax_constraint) {
for (const NodeDef* assign_node_in_fanout : assign_nodes_in_fanout) {
optimized_nodes.insert(assign_node_in_fanout);
const absl::optional<int> assign_node_idx =
graph_view.GetNodeIndex(*assign_node_in_fanout);
nodes_to_relax->insert(assign_node_idx.value());
}
}
}
}
return absl::OkStatus();
}
}
Status MemoryOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
std::set<int> nodes_to_relax;
TF_RETURN_IF_ERROR(FindAssignNodesToRelax(item.graph, &nodes_to_relax));
bool run_recomputation_pass =
(optimization_level_ == RewriterConfig::RECOMPUTATION_HEURISTICS ||
optimization_level_ == RewriterConfig::HEURISTICS ||
optimization_level_ == RewriterConfig::MANUAL);
if (!run_recomputation_pass && nodes_to_relax.empty() && item.fetch.empty()) {
return errors::Aborted("Nothing to do.");
}
GrapplerItem optimized_item(item);
RelaxAssignNodes(nodes_to_relax, &optimized_item.graph);
if (run_recomputation_pass) {
RecomputationRewritingPass(optimization_level_,
recomputation_targets_name_scope_,
&optimized_item.graph, item);
}
std::unordered_set<string> skip_list;
std::unique_ptr<GraphMemory> memory;
if (!item.fetch.empty() && cluster != nullptr) {
bool updated_graph = true;
for (int i = 0; i < 25 && updated_graph; ++i) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
updated_graph = false;
if ((optimization_level_ == RewriterConfig::DEFAULT_MEM_OPT ||
optimization_level_ == RewriterConfig::SCHEDULING_HEURISTICS ||
optimization_level_ == RewriterConfig::HEURISTICS) &&
cluster != nullptr) {
if (SchedulingPass(cluster, &memory, &optimized_item)) {
memory.reset();
updated_graph = true;
}
}
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
if ((optimization_level_ == RewriterConfig::DEFAULT_MEM_OPT ||
optimization_level_ == RewriterConfig::SWAPPING_HEURISTICS ||
optimization_level_ == RewriterConfig::HEURISTICS ||
optimization_level_ == RewriterConfig::MANUAL) &&
cluster != nullptr) {
if (SwappingPass(optimization_level_, cluster, &memory, &optimized_item,
&skip_list)) {
memory.reset();
updated_graph = true;
}
}
}
}
optimized_graph->Swap(&optimized_item.graph);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/memory_optimizer.h"
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class RecomputeSubgraphTest : public GrapplerTest {};
TEST_F(RecomputeSubgraphTest, SimpleSubgraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::AddN(s.WithOpName("gradients/d"), {c});
Output e = ops::AddN(s.WithOpName("gradients/e"), {d, b});
Output f = ops::AddN(s.WithOpName("gradients/f"), {e, a});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(6, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
NodeMap post_transform_node_map(&output);
EXPECT_EQ(8, output.node_size());
NodeDef* transformed_e = post_transform_node_map.GetNode(e.name());
EXPECT_EQ(2, transformed_e->input_size());
EXPECT_EQ("gradients/d", transformed_e->input(0));
EXPECT_EQ("Recomputed/b", transformed_e->input(1));
NodeDef* recomputed_b = post_transform_node_map.GetNode("Recomputed/b");
EXPECT_EQ(2, recomputed_b->input_size());
EXPECT_EQ("a", recomputed_b->input(0));
EXPECT_EQ("^RecomputeTrigger/b", recomputed_b->input(1));
NodeDef* recompute_trigger =
post_transform_node_map.GetNode("RecomputeTrigger/b");
EXPECT_EQ(1, recompute_trigger->input_size());
EXPECT_EQ("^gradients/d", recompute_trigger->input(0));
}
TEST_F(RecomputeSubgraphTest, NoFeedsRecomputed) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::AddN(s.WithOpName("gradients/d"), {c});
Output e = ops::AddN(s.WithOpName("gradients/e"), {d, b});
Output f = ops::AddN(s.WithOpName("gradients/f"), {e, a});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.feed.emplace_back("b", Tensor());
EXPECT_EQ(6, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(6, output.node_size());
}
TEST_F(RecomputeSubgraphTest, TwoInputSubgraphs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("a"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Variable(s.WithOpName("b"), {2, 3, 4}, DT_FLOAT);
Output d = ops::AddN(
s.WithOpName("some_name_scope/gradients/two_subgraph_inputs"), {a, b});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(3, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
(*pre_transform_node_map.GetNode("a")->mutable_attr())["_recompute_hint"]
.set_i(0);
(*pre_transform_node_map.GetNode("b")->mutable_attr())["_recompute_hint"]
.set_i(0);
MemoryOptimizer optimizer(RewriterConfig::MANUAL,
"some_name_scope/gradients");
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
NodeMap post_transform_node_map(&output);
EXPECT_EQ(7, output.node_size());
EXPECT_NE(post_transform_node_map.GetNode("Recomputed/a"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("Recomputed/b"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("RecomputeTrigger/a"), nullptr);
EXPECT_NE(post_transform_node_map.GetNode("RecomputeTrigger/b"), nullptr);
}
TEST_F(RecomputeSubgraphTest, MultiNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Variable(s.WithOpName("Conv"), {2, 3, 4}, DT_FLOAT);
Output b = ops::Identity(s.WithOpName("BN"), a);
Output c = ops::Identity(s.WithOpName("ReLU"), b);
Output d = ops::Identity(s.WithOpName("Conv1"), c);
Output trigger = ops::AddN(s.WithOpName("gradients/BN1Grad"), {d});
Output e = ops::AddN(s.WithOpName("gradients/Conv1Grad"), {trigger, c});
Output f = ops::AddN(s.WithOpName("gradients/ReLUGrad"), {e, c});
Output g = ops::AddN(s.WithOpName("gradients/BNGrad"), {f, a});
Output h = ops::AddN(s.WithOpName("gradients/ConvGrad"), {g});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ(9, item.graph.node_size());
NodeMap pre_transform_node_map(&item.graph);
pre_transform_node_map.GetNode("BN")->set_op("FusedBatchNorm");
pre_transform_node_map.GetNode("ReLU")->set_op("Relu");
MemoryOptimizer optimizer(RewriterConfig::RECOMPUTATION_HEURISTICS);
GraphDef first_pass_output;
Status first_pass_status =
optimizer.Optimize(nullptr, item, &first_pass_output);
TF_EXPECT_OK(first_pass_status);
NodeMap post_transform_node_map(&first_pass_output);
EXPECT_EQ(13, first_pass_output.node_size());
NodeDef* transformed_e = post_transform_node_map.GetNode(e.name());
EXPECT_EQ(2, transformed_e->input_size());
EXPECT_EQ("gradients/BN1Grad", transformed_e->input(0));
EXPECT_EQ("Recomputed/ReLU", transformed_e->input(1));
NodeDef* transformed_f = post_transform_node_map.GetNode(f.name());
EXPECT_EQ(2, transformed_f->input_size());
EXPECT_EQ("gradients/Conv1Grad", transformed_f->input(0));
EXPECT_EQ("Recomputed/ReLU", transformed_f->input(1));
NodeDef* transformed_g = post_transform_node_map.GetNode(g.name());
EXPECT_EQ(2, transformed_g->input_size());
EXPECT_EQ("gradients/ReLUGrad", transformed_g->input(0));
EXPECT_EQ("Conv", transformed_g->input(1));
NodeDef* recomputed_b = post_transform_node_map.GetNode("Recomputed/BN");
EXPECT_EQ(2, recomputed_b->input_size());
EXPECT_EQ("Conv", recomputed_b->input(0));
EXPECT_EQ("^RecomputeTrigger/BN", recomputed_b->input(1));
NodeDef* recompute_trigger_b =
post_transform_node_map.GetNode("RecomputeTrigger/BN");
EXPECT_EQ(1, recompute_trigger_b->input_size());
EXPECT_EQ("^RecomputeTrigger/ReLU", recompute_trigger_b->input(0));
NodeDef* recomputed_c = post_transform_node_map.GetNode("Recomputed/ReLU");
EXPECT_EQ(2, recomputed_c->input_size());
EXPECT_EQ("Recomputed/BN", recomputed_c->input(0));
EXPECT_EQ("^RecomputeTrigger/ReLU", recomputed_c->input(1));
NodeDef* recompute_trigger_c =
post_transform_node_map.GetNode("RecomputeTrigger/ReLU");
EXPECT_EQ(1, recompute_trigger_c->input_size());
EXPECT_EQ("^gradients/BN1Grad", recompute_trigger_c->input(0));
}
class MemoryOptimizerTest : public GrapplerTest {
public:
static std::unique_ptr<VirtualCluster> CreateVirtualCluster() {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_memory_size(1024 * 1024);
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.set_frequency(1000);
gpu_device.set_num_cores(24);
gpu_device.set_bandwidth(128);
gpu_device.set_memory_size(1024 * 1024);
gpu_device.mutable_environment()->insert({"architecture", "6"});
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
devices["/job:localhost/replica:0/task:0/gpu:0"] = gpu_device;
return std::unique_ptr<VirtualCluster>(new VirtualCluster(devices));
}
};
TEST_F(MemoryOptimizerTest, SimpleSwapping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Variable(s.WithOpName("a").WithDevice("/gpu:0"), {10, 10}, DT_FLOAT);
Output b = ops::AddN(s.WithOpName("b").WithDevice("/gpu:0"), {a});
Output c = ops::AddN(s.WithOpName("c").WithDevice("/gpu:0"), {b});
Output d = ops::AddN(s.WithOpName("d").WithDevice("/gpu:0"), {c});
Output e = ops::AddN(s.WithOpName("e").WithDevice("/gpu:0"), {b, d});
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {10, 10});
Output init = ops::Assign(s.WithOpName("init"), a, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
EXPECT_EQ(7, item.graph.node_size());
EXPECT_EQ(NodeName(e.name()), item.graph.node(4).name());
AttrValue& val =
(*item.graph.mutable_node(4)->mutable_attr())["_swap_to_host"];
val.mutable_list()->add_i(0);
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(9, output.node_size());
const NodeDef& new_e = output.node(6);
EXPECT_EQ(NodeName(e.name()), new_e.name());
EXPECT_EQ(2, new_e.input_size());
EXPECT_EQ(NodeName(d.name()), new_e.input(1));
EXPECT_EQ("swap_in_e_0", new_e.input(0));
const NodeDef& swap_out = output.node(7);
EXPECT_EQ("swap_out_e_0", swap_out.name());
EXPECT_EQ("_CopyFromGpuToHost", swap_out.op());
const NodeDef& swap_in = output.node(8);
EXPECT_EQ("swap_in_e_0", swap_in.name());
EXPECT_EQ("_CopyFromHostToGpu", swap_in.op());
EXPECT_EQ(NodeName(b.name()), swap_out.input(0));
EXPECT_EQ(NodeName(swap_out.name()), swap_in.input(0));
EXPECT_EQ("^c", swap_in.input(1));
const NodeDef& new_c = output.node(4);
EXPECT_EQ(NodeName(c.name()), new_c.name());
EXPECT_EQ("^swap_out_e_0", new_c.input(1));
GrapplerItem item_copy = item.WithGraph(std::move(output));
status = optimizer.Optimize(cluster.get(), item_copy, &output);
TF_EXPECT_OK(status);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.fetch = {"e"};
item.init_ops = {init.name()};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(MemoryOptimizerTest, SwappingHeuristics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output v = ops::Variable(s.WithOpName("v").WithDevice("/gpu:0"),
{128, 128, 8}, DT_FLOAT);
Output a = ops::Identity(s.WithOpName("a").WithDevice("/gpu:0"), v);
Output b = ops::Square(s.WithOpName("b").WithDevice("/gpu:0"), v);
Output c = ops::Sqrt(s.WithOpName("c").WithDevice("/gpu:0"), a);
Output d = ops::Identity(s.WithOpName("d").WithDevice("/gpu:0"), b);
Output axis = ops::Const(s.WithOpName("axis"), 0);
Output e =
ops::Concat(s.WithOpName("e").WithDevice("/gpu:0"), {a, b, c, d}, axis);
Output f = ops::Square(s.WithOpName("f").WithDevice("/gpu:0"), a);
Output g = ops::Sqrt(s.WithOpName("g").WithDevice("/gpu:0"), b);
Output h = ops::Exp(s.WithOpName("h").WithDevice("/gpu:0"), c);
Output i = ops::Log(s.WithOpName("i").WithDevice("/gpu:0"), d);
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {128, 128, 8});
Output init = ops::Assign(s.WithOpName("init"), v, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e", "f", "g", "h", "i"};
item.init_ops = {init.name()};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SWAPPING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
for (const auto& node : output.node()) {
if (node.name() == "e") {
EXPECT_EQ(5, node.input_size());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("swap_in_e_1", node.input(1));
EXPECT_EQ("swap_in_e_2", node.input(2));
EXPECT_EQ("swap_in_e_3", node.input(3));
EXPECT_EQ("axis", node.input(4));
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
#endif
}
TEST_F(MemoryOptimizerTest, UnswappableInputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output v = ops::Variable(s.WithOpName("v").WithDevice("/gpu:0"),
{128, 128, 8}, DT_FLOAT);
Output a = ops::Square(s.WithOpName("a").WithDevice("/gpu:0"), v);
Output b = ops::Identity(s.WithOpName("b").WithDevice("/gpu:0"), {a});
Output c = ops::Identity(s.WithOpName("c").WithDevice("/gpu:0"), {a});
Output index = ops::Const(s.WithOpName("index"), {0});
Output indices = ops::Tile(s.WithOpName("indices"), index, {128});
Output d =
ops::ScatterAdd(s.WithOpName("d").WithDevice("/gpu:0"), v, indices, c);
Output axis = ops::Const(s.WithOpName("axis"), 0);
Output e =
ops::Concat(s.WithOpName("e").WithDevice("/gpu:0"), {b, c, d}, axis);
Output constant = ops::Const(s.WithOpName("constant"), 0.0f, {128, 128, 8});
Output init = ops::Assign(s.WithOpName("init"), v, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
item.init_ops = {init.name()};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SWAPPING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
for (const auto& node : output.node()) {
if (node.name() == "e") {
EXPECT_EQ(5, node.input_size());
EXPECT_EQ("d", node.input(2));
EXPECT_EQ("^swap_out_d_2", node.input(4));
}
}
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(MemoryOptimizerTest, AccumulationRewrites) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomNormal(s.WithOpName("a").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output b = ops::RandomNormal(s.WithOpName("b").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output c = ops::RandomNormal(s.WithOpName("c").WithDevice("/cpu:0"),
{128, 128, 8}, DT_FLOAT);
Output d = ops::AddN(s.WithOpName("d").WithDevice("/cpu:0"), {a, b, c});
Output e = ops::Square(s.WithOpName("e").WithDevice("/cpu:0"), d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"e"};
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
MemoryOptimizer optimizer(RewriterConfig::SCHEDULING_HEURISTICS);
GraphDef output;
Status status = optimizer.Optimize(cluster.get(), item, &output);
TF_EXPECT_OK(status);
int count = 0;
for (const auto& node : output.node()) {
if (node.name() == "d") {
EXPECT_EQ("DestroyTemporaryVariable", node.op());
count++;
} else if (node.name() == "d/tmp_var_initializer") {
EXPECT_EQ("Assign", node.op());
count++;
} else if (node.name() == "d/tmp_var") {
EXPECT_EQ("TemporaryVariable", node.op());
count++;
} else if (node.name() == "e") {
EXPECT_EQ("Square", node.op());
EXPECT_EQ("d", node.input(0));
count++;
}
}
EXPECT_EQ(4, count);
std::vector<string> fetch = {"a", "b", "c", "e"};
auto tensors = EvaluateNodes(output, fetch, {});
EXPECT_EQ(4, tensors.size());
for (int i = 0; i < tensors[0].NumElements(); ++i) {
float actual = tensors[3].flat<float>()(i);
float expected = 0.0f;
for (int j = 0; j < 3; ++j) {
expected += tensors[j].flat<float>()(i);
}
expected *= expected;
EXPECT_NEAR(actual, expected, 1e-4);
}
}
class RelaxAllocatorConstraintsTest : public GrapplerTest {};
TEST_F(RelaxAllocatorConstraintsTest, SameDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/cpu:0"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
item.fetch = {"exp"};
item.init_ops = {"variable"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(RelaxAllocatorConstraintsTest, DifferentDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/gpu:0"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.fetch = {"exp"};
item.init_ops = {"variable"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
#endif
}
TEST_F(RelaxAllocatorConstraintsTest, SameDeviceType) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
Output exp = ops::Exp(s.WithOpName("exp").WithDevice("/cpu:1"), assign);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_TRUE(node.attr().at("_grappler_relax_allocator_constraints").b());
}
TEST_F(RelaxAllocatorConstraintsTest, SendNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant = ops::Const(s.WithOpName("constant").WithDevice("/cpu:0"),
-3.14f, {128, 128});
Output variable = ops::Variable(s.WithOpName("variable").WithDevice("/cpu:0"),
{128, 128}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign").WithDevice("/cpu:0"),
variable, constant);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
NodeDef* send = item.graph.add_node();
send->set_name("send");
send->set_op("_Send");
send->add_input("assign");
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(2);
EXPECT_EQ("assign", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
}
TEST_F(RelaxAllocatorConstraintsTest, AssignNodeInFanout) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output constant0 = ops::Const(s.WithOpName("constant0").WithDevice("/cpu:0"),
-42.0f, {128, 128});
Output variable0 = ops::Variable(
s.WithOpName("variable0").WithDevice("/cpu:0"), {128, 128}, DT_FLOAT);
Output assign0 = ops::Assign(s.WithOpName("assign0").WithDevice("/cpu:0"),
variable0, constant0);
Output assign2 = ops::Assign(s.WithOpName("assign2").WithDevice("/cpu:0"),
variable0, constant0);
Output assign3 = ops::Assign(s.WithOpName("assign3").WithDevice("/cpu:0"),
variable0, constant0);
Output assign4 = ops::Assign(s.WithOpName("assign4").WithDevice("/cpu:0"),
variable0, constant0);
Output rank_cpu =
ops::Rank(s.WithOpName("rank_cpu").WithDevice("/cpu:0"), assign3);
Output exp_cpu =
ops::Exp(s.WithOpName("exp_cpu").WithDevice("/cpu:0"), assign4);
Output rank_gpu = ops::Rank(s.WithOpName("rank_gpu")
.WithDevice("/gpu:0")
.WithControlDependencies(assign2),
assign0);
Output id_gpu = ops::Identity(s.WithOpName("id_gpu"), rank_cpu);
Output id_gpu2 = ops::Identity(s.WithOpName("id_gpu2"), exp_cpu);
Output variable_gpu = ops::Variable(
s.WithOpName("variable_gpu").WithDevice("/gpu:0"), {128, 128}, DT_FLOAT);
Output assign_gpu = ops::Assign(
s.WithOpName("assign_gpu").WithDevice("/gpu:0"), variable_gpu, exp_cpu);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"assign0", "assign_gpu", "rank_gpu", "id_gpu", "id_gpu2"};
MemoryOptimizer optimizer(RewriterConfig::MANUAL);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto node = output.node(3);
EXPECT_EQ("assign0", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
node = output.node(4);
EXPECT_EQ("assign2", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
node = output.node(5);
EXPECT_EQ("assign3", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
node = output.node(6);
EXPECT_EQ("assign4", node.name());
EXPECT_EQ(0, node.attr().count("_grappler_relax_allocator_constraints"));
node = output.node(12);
EXPECT_EQ("assign_gpu", node.name());
EXPECT_EQ(1, node.attr().count("_grappler_relax_allocator_constraints"));
EXPECT_EQ(true, node.attr().at("_grappler_relax_allocator_constraints").b());
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
item.init_ops = {"exp_cpu", "variable_gpu"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
for (int i = 0; i < tensors_expected.size(); ++i) {
if (i == 2 || i == 3) {
test::ExpectTensorEqual<int>(tensors_expected[i], tensors[i]);
} else {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/memory_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/memory_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecb37921-f9b0-4f3c-870b-c6cfd0c1c1f1 | cpp | tensorflow/tensorflow | remapper | tensorflow/core/grappler/optimizers/remapper.cc | tensorflow/core/grappler/optimizers/remapper_test.cc | #include "tensorflow/core/grappler/optimizers/remapper.h"
#include <algorithm>
#include <cstdlib>
#include <map>
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/use_cudnn.h"
#include "tsl/platform/errors.h"
#ifdef INTEL_MKL
#include "tensorflow/core/util/mkl_heuristics.h"
#endif
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedConv2D[] = "_FusedConv2D";
constexpr char kFusedConv3D[] = "_FusedConv3D";
constexpr char kFusedMatMul[] = "_FusedMatMul";
constexpr char kFusedDepthwiseConv2dNative[] = "_FusedDepthwiseConv2dNative";
constexpr char kFusedBatchNormEx[] = "_FusedBatchNormEx";
constexpr char kFusedBatchNormGradEx[] = "_FusedBatchNormGradEx";
constexpr char kTensorToHashBucket[] = "_TensorToHashBucketFast";
constexpr char kLeakyRelu[] = "LeakyRelu";
constexpr char kMklFusedMish[] = "_MklFusedMish";
constexpr char kRelu[] = "Relu";
constexpr char kRelu6[] = "Relu6";
constexpr char kElu[] = "Elu";
constexpr char kDataFormat[] = "data_format";
constexpr char kIsTraining[] = "is_training";
constexpr char kWidth[] = "width";
constexpr char kFill[] = "fill";
constexpr int kMissingIndex = -1;
struct RemapperContext {
explicit RemapperContext(GrapplerItem* item, Status* status,
RewriterConfig::CpuLayout cpu_layout_conversion,
bool xla_auto_clustering_on,
bool xla_cpu_jit_disable_fusion)
: nodes_to_preserve(item->NodesToPreserve()),
graph_view(&item->graph, status),
graph_properties(*item),
inferred_graph_properties(false),
cpu_layout_conversion(cpu_layout_conversion),
xla_auto_clustering_on(xla_auto_clustering_on),
xla_cpu_jit_disable_fusion(xla_cpu_jit_disable_fusion) {}
std::unordered_set<string> nodes_to_preserve;
utils::MutableGraphView graph_view;
GraphProperties graph_properties;
bool inferred_graph_properties;
RewriterConfig::CpuLayout cpu_layout_conversion;
bool xla_auto_clustering_on;
bool xla_cpu_jit_disable_fusion;
};
struct FusedBatchNorm {
FusedBatchNorm() = default;
explicit FusedBatchNorm(int fused_batch_norm)
: fused_batch_norm(fused_batch_norm) {}
int fused_batch_norm = kMissingIndex;
};
struct FusedBatchNormEx {
FusedBatchNormEx() = default;
int fused_batch_norm = kMissingIndex;
int side_input = kMissingIndex;
int activation = kMissingIndex;
int invalidated = kMissingIndex;
};
struct FusedBatchNormGradEx {
int fused_batch_norm_grad = kMissingIndex;
int activation_grad = kMissingIndex;
int side_input_grad = kMissingIndex;
int fwd_fused_batch_norm = kMissingIndex;
};
struct TensorToHashBucket {
TensorToHashBucket() = default;
explicit TensorToHashBucket(int op1, int op2, int op3)
: pre_as_string(op1), as_string(op2), string_to_hash_bucket(op3) {}
int pre_as_string = kMissingIndex;
int as_string = kMissingIndex;
int string_to_hash_bucket = kMissingIndex;
};
struct PadWithConv3D {
PadWithConv3D() = default;
PadWithConv3D(int contraction_idx, int pad_idx, int padding_const_idx)
: contraction_idx(contraction_idx),
pad_idx(pad_idx),
padding_const_idx(padding_const_idx) {}
int contraction_idx = kMissingIndex;
int pad_idx = kMissingIndex;
int padding_const_idx = kMissingIndex;
};
struct ContractionWithBiasAdd {
ContractionWithBiasAdd() = default;
ContractionWithBiasAdd(int contraction, int bias_add, int bias_port)
: contraction(contraction), bias_add(bias_add), bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithActivation {
ContractionWithActivation() = default;
ContractionWithActivation(int contraction, int activation)
: contraction(contraction), activation(activation) {}
int contraction = kMissingIndex;
int activation = kMissingIndex;
};
struct ContractionWithBiasAddAndActivation {
ContractionWithBiasAddAndActivation() = default;
ContractionWithBiasAddAndActivation(int contraction, int bias_add,
int activation, int bias_port)
: contraction(contraction),
bias_add(bias_add),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int activation = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithSqueezeAndBiasAdd {
ContractionWithSqueezeAndBiasAdd() = default;
ContractionWithSqueezeAndBiasAdd(int contraction, int squeeze, int bias_add)
: contraction(contraction), squeeze(squeeze), bias_add(bias_add) {}
int contraction = kMissingIndex;
int squeeze = kMissingIndex;
int bias_add = kMissingIndex;
};
struct ContractionWithBatchNorm {
ContractionWithBatchNorm() = default;
ContractionWithBatchNorm(int contraction, int fused_batch_norm,
float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBatchNormAndActivation {
ContractionWithBatchNormAndActivation() = default;
ContractionWithBatchNormAndActivation(int contraction, int fused_batch_norm,
int activation, float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
activation(activation),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
int activation = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBiasAddAndAdd {
ContractionWithBiasAddAndAdd() = default;
ContractionWithBiasAddAndAdd(int contraction, int bias_add, int add,
int port_id, int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int bias_port = 1;
};
struct ContractionWithBiasAndAddActivation {
ContractionWithBiasAndAddActivation() = default;
ContractionWithBiasAndAddActivation(int contraction, int bias_add, int add,
int port_id, int activation,
int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int activation = kMissingIndex;
int bias_port = 1;
};
bool IsInPreserveSet(const RemapperContext& ctx, const NodeDef* node) {
return ctx.nodes_to_preserve.count(node->name()) > 0;
}
bool HaveSameDataType(const NodeDef* lhs, const NodeDef* rhs,
const string& type_attr = "T") {
DataType lhs_attr = GetDataTypeFromAttr(*lhs, type_attr);
DataType rhs_attr = GetDataTypeFromAttr(*rhs, type_attr);
return lhs_attr != DT_INVALID && rhs_attr != DT_INVALID &&
lhs_attr == rhs_attr;
}
bool HasDataType(const NodeDef* node, const DataType& expected,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*node, type_attr);
return dtype == expected;
}
bool IsCpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
bool is_one_dnn_enabled = IsMKLEnabled();
if (is_one_dnn_enabled) {
bool is_supported_matmul = false;
if (IsMatMul(*contraction)) {
is_supported_matmul = (dtype == DT_BFLOAT16)
? contraction->attr().contains("transpose_a") &&
!contraction->attr().at("transpose_a").b()
: true;
}
return ((IsConv2D(*contraction) || IsDepthwiseConv2dNative(*contraction) ||
IsConv3D(*contraction) || IsAnyBatchMatMul(*contraction) ||
is_supported_matmul) &&
IsDataTypeSupportedByOneDNNOnThisCPU(dtype));
}
if (IsConv2D(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_DOUBLE;
} else if (IsMatMul(*contraction)) {
return dtype == DT_FLOAT;
} else {
return false;
}
}
bool IsGpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
if (IsConv2D(*contraction) || IsMatMul(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_HALF;
} else {
return false;
}
}
bool IsCpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv_node) {
const string& data_format = conv_node->attr().at(kDataFormat).s();
if (IsConv2D(*conv_node)) {
return data_format == "NHWC" || (IsMKLEnabled() && data_format == "NCHW") ||
(ctx.cpu_layout_conversion == RewriterConfig::NHWC_TO_NCHW &&
data_format == "NCHW");
} else if (IsConv3D(*conv_node)) {
return data_format == "NDHWC" || (IsMKLEnabled() && data_format == "NCDHW");
} else {
return false;
}
}
bool BlasLtMatmulEnabled() {
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUBLASLT", false, &is_enabled));
return is_enabled;
}();
return is_enabled;
}
bool IsGpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
const string& data_format = conv2d->attr().at(kDataFormat).s();
return data_format == "NHWC" || data_format == "NCHW";
}
bool IsCpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
return NodeIsOnCpu(conv2d) && IsCpuCompatibleDataType(conv2d) &&
IsCpuCompatibleDataFormat(ctx, conv2d);
}
bool IsCpuCompatibleConv3D(const RemapperContext& ctx, const NodeDef* conv3d) {
DCHECK(IsConv3D(*conv3d)) << "Expected Conv3D op";
return NodeIsOnCpu(conv3d) && IsCpuCompatibleDataType(conv3d) &&
IsCpuCompatibleDataFormat(ctx, conv3d);
}
bool IsGpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d,
const NodeDef* activation) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
if (IsRelu(*activation)) {
return NodeIsOnGpu(conv2d) && IsGpuCompatibleDataType(conv2d) &&
IsGpuCompatibleDataFormat(ctx, conv2d);
} else if (IsRelu6(*activation) || IsElu(*activation) ||
IsLeakyRelu(*activation)) {
DataType dtype = GetDataTypeFromAttr(*conv2d, "T");
const string& data_format = conv2d->attr().at(kDataFormat).s();
return NodeIsOnGpu(conv2d) && dtype == DT_HALF && data_format == "NHWC";
}
return false;
}
bool IsGpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul,
const NodeDef* activation) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
if (activation == nullptr || IsRelu(*activation)) {
return BlasLtMatmulEnabled() && NodeIsOnGpu(matmul) &&
IsGpuCompatibleDataType(matmul);
} else if (IsTanh(*activation) || IsSigmoid(*activation)) {
DataType dtype = GetDataTypeFromAttr(*matmul, "T");
return NodeIsOnGpu(matmul) && dtype == DT_HALF;
}
return false;
}
bool IsCpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
return NodeIsOnCpu(matmul) && IsCpuCompatibleDataType(matmul);
}
bool IsCpuCompatibleDepthwiseConv2dNative(const NodeDef* dw_conv2d) {
DCHECK(IsDepthwiseConv2dNative(*dw_conv2d))
<< "Expected DepthwiseConv2dNative op";
return NodeIsOnCpu(dw_conv2d) && IsCpuCompatibleDataType(dw_conv2d);
}
template <typename Pattern>
bool IsCpuCompatible(const RemapperContext& ctx, const Pattern& matched) {
if (ctx.xla_cpu_jit_disable_fusion) return false;
const NodeDef& node = ctx.graph_view.graph()->node(matched.contraction);
if (IsConv2D(node)) {
return IsCpuCompatibleConv2D(ctx, &node);
} else if (IsDepthwiseConv2dNative(node)) {
return (IsMKLEnabled() && IsCpuCompatibleDepthwiseConv2dNative(&node));
} else if (IsMatMul(node)) {
return IsCpuCompatibleMatMul(ctx, &node);
} else if (IsConv3D(node)) {
return (IsMKLEnabled() && IsCpuCompatibleConv3D(ctx, &node));
} else {
return false;
}
}
bool RuntimeFusionEnabled(const Cluster* cluster) {
static bool is_enabled = [&] {
#if CUDNN_VERSION >= 8400
if (!cluster) return false;
auto devices = cluster->GetDevices();
int num_gpus = 0;
int num_ampere = 0;
for (const auto& d : devices) {
if (d.second.type() == "GPU") {
num_gpus++;
auto cc_it = d.second.environment().find("architecture");
if (cc_it != d.second.environment().end()) {
double compute_capability = 0.0;
if (absl::SimpleAtod(cc_it->second, &compute_capability) &&
compute_capability >= 8.0) {
num_ampere++;
}
}
}
}
bool runtime_fusion_enabled = CudnnUseRuntimeFusion() &&
CudnnUseFrontend() && num_gpus > 0 &&
num_gpus == num_ampere;
if (CudnnUseRuntimeFusion() && !runtime_fusion_enabled) {
VLOG(1) << "Enabling Cudnn with runtime compilation requires the "
<< "Cudnn frontend and Ampere GPUs or later, but we got "
<< "Cudnn frontend is "
<< (CudnnUseFrontend() ? "enabled" : "disabled") << " and "
<< num_ampere << " Ampere GPU(s) out of total " << num_gpus
<< " GPU(s)";
}
return runtime_fusion_enabled;
#else
return false;
#endif
}();
return is_enabled;
}
bool IsSupportedActivation(const NodeDef& node, const Cluster* cluster) {
bool is_default_supported =
IsRelu(node) || IsRelu6(node) || IsElu(node) || IsLeakyRelu(node);
bool is_device_specific = (IsMKLEnabled() || RuntimeFusionEnabled(cluster)) &&
(IsTanh(node) || IsSigmoid(node));
return (is_default_supported || is_device_specific);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAddAndActivation& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& activation_node = graph->node(matched.activation);
if (!IsSupportedActivation(activation_node, cluster)) return false;
const NodeDef& contraction_node = graph->node(matched.contraction);
if (IsConv2D(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& filter_shape =
input_props.size() >= 2 ? input_props[1].shape() : TensorShapeProto();
bool is_spatial_conv = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(0)) &&
IsKnown(filter_shape.dim(1)) &&
filter_shape.dim(0).size() != 1 &&
filter_shape.dim(1).size() != 1;
bool valid_channels = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(2)) &&
IsKnown(filter_shape.dim(3)) &&
filter_shape.dim(2).size() % 2 == 0 &&
filter_shape.dim(3).size() % 2 == 0;
return is_spatial_conv &&
(IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_channels)) &&
IsGpuCompatibleConv2D(ctx, &contraction_node, &activation_node);
} else if (IsMatMul(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& a_shape =
!input_props.empty() ? input_props[0].shape() : TensorShapeProto();
const TensorShapeProto& b_shape =
!input_props.empty() ? input_props[1].shape() : TensorShapeProto();
bool valid_dims = Rank(a_shape) == 2 && Rank(b_shape) == 2 &&
IsKnown(a_shape.dim(1)) &&
IsKnown(b_shape.dim(1)) &&
a_shape.dim(1).size() % 2 == 0 &&
b_shape.dim(1).size() % 2 == 0;
return (IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_dims)) &&
IsGpuCompatibleMatMul(ctx, &contraction_node, &activation_node);
}
return false;
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAdd& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM && !TF_HIPBLASLT
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& contraction_node = graph->node(matched.contraction);
if (!IsMatMul(contraction_node)) return false;
return IsGpuCompatibleMatMul(ctx, &contraction_node, nullptr);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithSqueezeAndBiasAdd& matched,
const Cluster* cluster) {
return false;
}
template <typename Pattern>
bool IsDeviceCompatible(const RemapperContext& ctx, Pattern& matched,
Cluster* cluster = nullptr) {
return IsCpuCompatible(ctx, matched) ||
IsGpuCompatible(ctx, matched, cluster);
}
std::string GetActivationName(const std::string& s) {
if (s == kMklFusedMish) {
return "Mish";
} else {
return s;
}
}
inline bool HasControlFaninOrFanout(const utils::MutableNodeView& node_view) {
return node_view.NumControllingFanins() > 0 ||
node_view.NumControlledFanouts() > 0;
}
inline bool HasAtMostOneFanoutAtPort0(const utils::MutableNodeView& node_view) {
return node_view.GetRegularFanout(0).size() <= 1;
}
inline bool HasAtMostOneDataFanoutAtPort0(
const utils::MutableNodeView& node_view) {
const auto predicate = [](const auto& fanout) -> bool {
const NodeDef* node = fanout.node_view()->node();
return !IsShape(*node) && !IsRank(*node);
};
return absl::c_count_if(node_view.GetRegularFanout(0), predicate) <= 1;
}
bool IsConvOrMatMul(const NodeDef& node) {
return IsConv2D(node) || IsDepthwiseConv2dNative(node) || IsMatMul(node) ||
IsConv3D(node);
}
bool IsBiasSemanticAdd(const RemapperContext& ctx,
const utils::MutableNodeView& node_view,
int& bias_port) {
if (!IsMKLEnabled()) return false;
const auto* node_def = node_view.node();
if (!NodeIsOnCpu(node_def)) return false;
if (!IsAdd(*node_def) || node_view.NumRegularFanins() != 2) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node_def->name());
if (props.size() < 2) return false;
const auto& regular_fanin_0 = node_view.GetRegularFanin(0);
const auto* node_view_0 = regular_fanin_0.node_view();
const auto* node_def_0 = node_view_0->node();
const auto& regular_fanin_1 = node_view.GetRegularFanin(1);
const auto* node_view_1 = regular_fanin_1.node_view();
const auto* node_def_1 = node_view_1->node();
if (!IsConvOrMatMul(*node_def_0) && !IsConvOrMatMul(*node_def_1))
return false;
auto is_channel_last_format = [](const NodeDef& node) -> bool {
if (node.attr().contains("data_format")) {
const string data_format = node.attr().at("data_format").s();
return (data_format == "NHWC" || data_format == "NDHWC");
}
return true;
};
if (!is_channel_last_format(*node_def_0) ||
!is_channel_last_format(*node_def_1))
return false;
const TensorShapeProto& prot0_shape = props[0].shape();
const TensorShapeProto& prot1_shape = props[1].shape();
if (prot0_shape.unknown_rank() || prot1_shape.unknown_rank() ||
prot0_shape.dim_size() < 1 || prot1_shape.dim_size() < 1 ||
!IsKnown(prot0_shape.dim(prot0_shape.dim_size() - 1)) ||
!IsKnown(prot1_shape.dim(prot1_shape.dim_size() - 1)))
return false;
const auto is_supported_shape =
[&](const TensorShapeProto& shape,
const TensorShapeProto& bcast_shape) -> bool {
int conv_channel_dim;
conv_channel_dim = shape.dim(shape.dim_size() - 1).size();
if (shape.dim_size() == 4 && bcast_shape.dim_size() > 4) return false;
if (shape.dim_size() == 5 && bcast_shape.dim_size() > 5) return false;
if (shape.dim_size() < 2) return false;
if (conv_channel_dim != bcast_shape.dim(bcast_shape.dim_size() - 1).size())
return false;
for (int i = 0; i < bcast_shape.dim_size() - 1; i++) {
if (1 != bcast_shape.dim(i).size()) return false;
}
return true;
};
if (ShapesSymbolicallyEqual(prot0_shape, prot1_shape) ||
!ShapesBroadcastable(prot0_shape, prot1_shape))
return false;
if (IsConvOrMatMul(*node_def_0)) {
bias_port = 1;
return (is_supported_shape(prot0_shape, prot1_shape));
} else if (IsConvOrMatMul(*node_def_1)) {
bias_port = 0;
return (is_supported_shape(prot1_shape, prot0_shape));
}
return false;
}
void AddInputShapesAttr(const RemapperContext& ctx, int node_index) {
auto mutable_node = ctx.graph_view.graph()->mutable_node(node_index);
AttrValue attr_input_shape;
auto tensor_properties =
ctx.graph_properties.GetInputProperties(mutable_node->name());
for (const auto& tensor_property : tensor_properties) {
TensorShapeProto* proto = attr_input_shape.mutable_list()->add_shape();
*proto = tensor_property.shape();
}
if (IsMKLEnabled() && !tensor_properties.empty()) {
(*mutable_node->mutable_attr())["_input_shapes"] =
std::move(attr_input_shape);
}
}
bool FindContractionWithBias(const RemapperContext& ctx, int node_index,
ContractionWithBiasAdd* matched,
bool check_device_compatible = true) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
int bias_port = 1;
if (!IsBiasAdd(*node_def) && !IsBiasSemanticAdd(ctx, *node_view, bias_port))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(1 - bias_port);
const auto* contraction_node_view = regular_fanin_0.node_view();
const auto* contraction_node_def = contraction_node_view->node();
bool is_contraction = IsConv2D(*contraction_node_def) ||
(IsConv3D(*contraction_node_def) && IsMKLEnabled()) ||
IsMatMul(*contraction_node_def) ||
IsDepthwiseConv2dNative(*contraction_node_def);
#ifdef DNNL_AARCH64_USE_ACL
if (IsDepthwiseConv2dNative(*contraction_node_def)) is_contraction = false;
#endif
if (!is_contraction || !HaveSameDataType(node_def, contraction_node_def) ||
HasControlFaninOrFanout(*contraction_node_view) ||
!HasAtMostOneFanoutAtPort0(*contraction_node_view) ||
IsInPreserveSet(ctx, contraction_node_def))
return false;
const ContractionWithBiasAdd pattern{contraction_node_view->node_index(),
node_index, bias_port};
if (check_device_compatible && !IsDeviceCompatible(ctx, pattern))
return false;
*matched = pattern;
return true;
}
bool FindFusedConvWithFusedActivation(const RemapperContext& ctx,
int node_index,
ContractionWithActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!NodeIsOnCpu(node_def) && !IsMKLEnabled()) return false;
if (!IsLeakyRelu(*node_def) && !IsMklFusedMish(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* contraction_node_view = regular_fanin_0.node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!(contraction_node_def->op() == kFusedConv2D ||
contraction_node_def->op() == kFusedConv3D))
return false;
auto contraction_fused_ops_list =
contraction_node_def->attr().at("fused_ops").list().s();
for (auto it = contraction_fused_ops_list.begin();
it != contraction_fused_ops_list.end(); it++) {
if (*it == kLeakyRelu || *it == kMklFusedMish || *it == kRelu ||
*it == kRelu6 || *it == kElu) {
return false;
}
}
const ContractionWithActivation pattern{contraction_node_view->node_index(),
node_view->node_index()};
*matched = pattern;
return true;
}
bool FindContractionWithBiasAndActivation(
const RemapperContext& ctx, Cluster* cluster, int node_index,
ContractionWithBiasAddAndActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsSupportedActivation(*node_def, cluster)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* bias_add_node_view = regular_fanin_0.node_view();
const auto* bias_add_node_def = bias_add_node_view->node();
ContractionWithBiasAdd base;
if (!FindContractionWithBias(ctx, bias_add_node_view->node_index(), &base,
false) ||
!HasAtMostOneFanoutAtPort0(*bias_add_node_view) ||
!HaveSameDataType(node_def, bias_add_node_def) ||
IsInPreserveSet(ctx, bias_add_node_def))
return false;
const auto* contraction_node_view =
bias_add_node_view->GetRegularFanin(1 - base.bias_port).node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!IsMatMul(*contraction_node_def) &&
(IsTanh(*node_def) || IsSigmoid(*node_def)))
return false;
if (!(IsConv2D(*contraction_node_def) || IsMatMul(*contraction_node_def) ||
(IsConv3D(*contraction_node_def) && IsMKLEnabled())) &&
IsLeakyRelu(*node_def))
return false;
const ContractionWithBiasAddAndActivation pattern{
base.contraction, base.bias_add, node_index, base.bias_port};
if (!IsDeviceCompatible(ctx, pattern, cluster)) return false;
*matched = pattern;
return true;
}
bool FindConvWithSqueezeAndBias(const RemapperContext& ctx, int node_index,
ContractionWithSqueezeAndBiasAdd* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsBiasAdd(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* squeeze_node_view = regular_fanin_0.node_view();
const auto* squeeze_node_def = squeeze_node_view->node();
if (!IsSqueeze(*squeeze_node_def) ||
!HaveSameDataType(node_def, squeeze_node_def, "T") ||
HasControlFaninOrFanout(*squeeze_node_view) ||
!HasAtMostOneFanoutAtPort0(*squeeze_node_view) ||
IsInPreserveSet(ctx, squeeze_node_def))
return false;
if (squeeze_node_view->NumRegularFanins() < 1) return false;
const auto& squeeze_regular_fanin_0 = squeeze_node_view->GetRegularFanin(0);
const auto* conv_node_view = squeeze_regular_fanin_0.node_view();
const auto* conv_node_def = conv_node_view->node();
if (!(IsConv2D(*conv_node_def) ||
(IsConv3D(*conv_node_def) && IsMKLEnabled())) ||
!HaveSameDataType(node_def, conv_node_def, "T") ||
HasControlFaninOrFanout(*conv_node_view) ||
!HasAtMostOneFanoutAtPort0(*conv_node_view) ||
IsInPreserveSet(ctx, conv_node_def))
return false;
std::vector<int32> dims;
if (!TryGetNodeAttr(*squeeze_node_def, "squeeze_dims", &dims)) return false;
for (auto dim : dims) {
if ((dim == 3 && IsConv2D(*conv_node_def)) ||
(dim == 4 && IsConv3D(*conv_node_def)))
return false;
}
const ContractionWithSqueezeAndBiasAdd pattern{
conv_node_view->node_index(), squeeze_node_view->node_index(),
node_index};
if (!IsDeviceCompatible(ctx, pattern)) return false;
*matched = pattern;
return true;
}
bool FindConv2DWithBatchNorm(const RemapperContext& ctx, int node_index,
ContractionWithBatchNorm* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsFusedBatchNorm(*node_def)) return false;
bool dtypeU_is_float = HasDataType(node_def, DT_FLOAT, "U");
bool dtypeT_is_bf16 = HasDataType(node_def, DT_BFLOAT16, "T");
bool dtypeT_is_mkl_fp16 =
IsMKLEnabled() && HasDataType(node_def, DT_HALF, "T");
if (node_view->GetOp() != "FusedBatchNorm" &&
(!dtypeU_is_float || dtypeT_is_bf16 || dtypeT_is_mkl_fp16)) {
return false;
}
const auto* training_attr = node_view->GetAttr(kIsTraining);
if (training_attr != nullptr && training_attr->b()) return false;
if (HasControlFaninOrFanout(*node_view) ||
!node_view->GetRegularFanout(1).empty() ||
!node_view->GetRegularFanout(2).empty() ||
!node_view->GetRegularFanout(3).empty() ||
!node_view->GetRegularFanout(4).empty())
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* conv2d_node_view = regular_fanin_0.node_view();
const auto* conv2d_node_def = conv2d_node_view->node();
if (NodeIsOnCpu(conv2d_node_def) && ctx.xla_cpu_jit_disable_fusion) {
return false;
}
if (!IsConv2D(*conv2d_node_def) || !NodeIsOnCpu(conv2d_node_def) ||
!HaveSameDataType(node_def, conv2d_node_def) ||
!IsCpuCompatibleDataType(conv2d_node_def) ||
!IsCpuCompatibleDataFormat(ctx, conv2d_node_def) ||
HasControlFaninOrFanout(*conv2d_node_view) ||
!HasAtMostOneFanoutAtPort0(*conv2d_node_view) ||
IsInPreserveSet(ctx, conv2d_node_def))
return false;
matched->contraction = conv2d_node_view->node_index();
matched->fused_batch_norm = node_index;
if (!TryGetNodeAttr(*node_def, "epsilon", &matched->epsilon)) return false;
return true;
}
bool FindConv2DWithBatchNormAndActivation(
const RemapperContext& ctx, int node_index,
ContractionWithBatchNormAndActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsSupportedActivation(*node_def, nullptr)) return false;
if (IsSigmoid(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* batch_norm_node_view = regular_fanin_0.node_view();
ContractionWithBatchNorm base;
if (!FindConv2DWithBatchNorm(ctx, batch_norm_node_view->node_index(), &base))
return false;
const auto* fused_batch_norm_node_view =
ctx.graph_view.GetNode(base.fused_batch_norm);
const auto* fused_batch_norm_node_def = fused_batch_norm_node_view->node();
if (!HasAtMostOneFanoutAtPort0(*fused_batch_norm_node_view) ||
!HaveSameDataType(node_def, fused_batch_norm_node_def) ||
IsInPreserveSet(ctx, fused_batch_norm_node_def))
return false;
matched->contraction = base.contraction;
matched->fused_batch_norm = base.fused_batch_norm;
matched->activation = node_index;
matched->epsilon = base.epsilon;
return true;
}
bool FindContractionWithBiasInPort(const RemapperContext& ctx,
const utils::MutableNodeView& add_node_view,
const NodeDef& add_node_def, int port_id,
ContractionWithBiasAdd* base,
const int allowed_fanouts = 1) {
if (add_node_view.NumRegularFanins() < port_id + 1) return false;
const auto& bias_add_node_view =
add_node_view.GetRegularFanin(port_id).node_view();
if (bias_add_node_view == nullptr) return false;
const auto* bias_add_node_def = bias_add_node_view->node();
if (!FindContractionWithBias(ctx, bias_add_node_view->node_index(), base,
false))
return false;
if (bias_add_node_view->GetRegularFanout(0).size() > allowed_fanouts ||
!HaveSameDataType(&add_node_def, bias_add_node_def) ||
IsInPreserveSet(ctx, bias_add_node_def))
return false;
return true;
}
bool IsAddWithNoBroadcast(const RemapperContext& ctx, const NodeDef& node) {
if (!IsAdd(node)) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node.name());
if (props.size() == 2 &&
ShapesSymbolicallyEqual(props[0].shape(), props[1].shape())) {
return true;
}
return false;
}
bool FindPadWithConv3D(const RemapperContext& ctx, int node_index,
PadWithConv3D* matched) {
if (!IsMKLEnabled()) return false;
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!NodeIsOnCpu(node_def)) return false;
if (!(IsConv3D(*node_def) || node_def->op() == kFusedConv3D)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* pad_node_view = regular_fanin_0.node_view();
const auto* pad_node_def = pad_node_view->node();
const auto& padding_const = pad_node_view->GetRegularFanin(1);
const auto* padding_const_node_view = padding_const.node_view();
if (!(pad_node_def->op() == "Pad") ||
!HaveSameDataType(node_def, pad_node_def))
return false;
const PadWithConv3D pattern{node_view->node_index(),
pad_node_view->node_index(),
padding_const_node_view->node_index()};
*matched = pattern;
return true;
}
bool FindContractionWithBiasAddAndAdd(const RemapperContext& ctx,
const utils::MutableNodeView& node_view,
ContractionWithBiasAddAndAdd* matched) {
if (HasControlFaninOrFanout(node_view) || node_view.NumRegularFanins() != 2)
return false;
const auto* node_def = node_view.node();
if (!IsAddN(*node_def) && !IsAddWithNoBroadcast(ctx, *node_def)) return false;
if (!NodeIsOnCpu(node_def)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
ContractionWithBiasAdd base;
matched->port_id = 0;
if (!FindContractionWithBiasInPort(ctx, node_view, *node_def,
matched->port_id, &base)) {
matched->port_id = 1;
if (!FindContractionWithBiasInPort(ctx, node_view, *node_def,
matched->port_id, &base)) {
return false;
}
}
matched->contraction = base.contraction;
matched->bias_add = base.bias_add;
matched->add = node_view.node_index();
matched->bias_port = base.bias_port;
return true;
}
bool FindContractionWithBiasAddAndAdd(const RemapperContext& ctx,
int node_index,
ContractionWithBiasAddAndAdd* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
return FindContractionWithBiasAddAndAdd(ctx, *node_view, matched);
}
bool FindContractionWithBiasAndAddActivation(
const RemapperContext& ctx, int node_index,
ContractionWithBiasAndAddActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (node_def == nullptr) return false;
if (!IsSupportedActivation(*node_def, nullptr)) return false;
if (!NodeIsOnCpu(node_def)) return false;
if (IsTanh(*node_def)) return false;
if (IsSigmoid(*node_def)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* add_node_view = regular_fanin_0.node_view();
ContractionWithBiasAddAndAdd base;
if (!FindContractionWithBiasAddAndAdd(ctx, *add_node_view, &base)) {
return false;
}
const auto* bias_add_node_view =
add_node_view->GetRegularFanin(base.port_id).node_view();
const auto* contraction_node_view =
bias_add_node_view->GetRegularFanin(0).node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!(IsConv2D(*contraction_node_def) || IsConv3D(*contraction_node_def)) &&
IsLeakyRelu(*node_def))
return false;
if (IsConv3D(*contraction_node_def) && !IsMKLEnabled()) return false;
const ContractionWithBiasAndAddActivation pattern{
base.contraction, base.bias_add, base.add,
base.port_id, node_index, base.bias_port};
*matched = pattern;
return true;
}
bool FindConv2DSwish(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern conv2dbiasaddswish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "BiasAdd", "biasadd", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "bias", NodeStatus::kRemain}
}
}
}
},
{ "BiasAdd", "biasadd", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormswish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNorm", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNorm", "fusebatchnorm", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormv2swish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNormV2", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNormV2", "fusebatchnorm", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormv3swish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNormV3", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNormV3", "fusebatchnorm", NodeStatus::kRemove}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!(HasDataType(mul_node_def, DT_FLOAT) ||
HasDataType(mul_node_def, DT_HALF) ||
HasDataType(mul_node_def, DT_BFLOAT16)))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
bool is_biasadd_pattern = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbiasaddswish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
is_biasadd_pattern = found_op_type_match;
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormswish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormv2swish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormv3swish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (found_op_type_match) {
NodeDef* conv2d_node =
ctx->graph_view.GetNode(matched_nodes_map->at("conv"))->node();
if (!IsCpuCompatibleConv2D(*ctx, conv2d_node)) return false;
if (!is_biasadd_pattern) {
NodeDef* fusedbatchnorm_node =
ctx->graph_view.GetNode(matched_nodes_map->at("fusebatchnorm"))
->node();
bool is_training = true;
if (!TryGetNodeAttr(*fusedbatchnorm_node, kIsTraining, &is_training) ||
is_training)
return false;
if (fusedbatchnorm_node->op() != "FusedBatchNorm" &&
(!HasDataType(fusedbatchnorm_node, DT_FLOAT, "U") ||
(HasDataType(fusedbatchnorm_node, DT_FLOAT, "U") &&
!HasDataType(fusedbatchnorm_node, DT_FLOAT, "T")))) {
return false;
}
}
}
return found_op_type_match;
}
inline bool VerifyConstants(RemapperContext* ctx,
std::map<string, int>* nodes_map,
std::map<string, float>* values_map) {
using utils::MutableNodeView;
for (auto it = values_map->begin(); it != values_map->end(); ++it) {
int node_idx = nodes_map->at(it->first);
MutableNodeView* node_view = ctx->graph_view.GetNode(node_idx);
NodeDef* node_def = node_view->node();
Tensor const_tensor;
if (node_def != nullptr && node_def->op() == "Cast") {
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* regular_node_view = regular_fanin_0.node_view();
node_def = regular_node_view->node();
}
if (node_def == nullptr || node_def->op() != "Const" ||
!const_tensor.FromProto(node_def->attr().at("value").tensor()) ||
const_tensor.NumElements() != 1) {
return false;
}
DataType dtype = const_tensor.dtype();
float const_value;
if (dtype == DT_FLOAT) {
const_value = const_tensor.flat<float>()(0);
} else if (dtype == DT_BFLOAT16) {
const_value = static_cast<float>(const_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
const_value = static_cast<float>(const_tensor.flat<Eigen::half>()(0));
} else {
return false;
}
if (std::abs(const_value - it->second) > 1e-2) return false;
}
return true;
}
bool IsMatchedMatMulBiasAddAndGeluExact(
RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map = nullptr,
std::set<int>* remove_node_indices = nullptr) {
auto* node_view = ctx.graph_view.GetNode(node_index);
using utils::MatchingDirection;
using utils::NodeStatus;
static utils::OpTypePattern* gelu_exact_pattern = new utils::OpTypePattern
{"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "erf_plus_one_times_one_half", NodeStatus::kRemove,
{
{"Add|AddV2", "erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "erf", NodeStatus::kRemove,
{
{"Mul", "bias_add_x_sqrt_one_half",
NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove},
{"Cast|Const", "sqrt_one_half", NodeStatus::kRemain}
}
}
}
},
{"Cast|Const", "one", NodeStatus::kRemain}
}
},
{"Cast|Const", "one_half", NodeStatus::kRemain}
}
},
{"BiasAdd", "bias_add", NodeStatus::kRemove,
{
{"MatMul", "matmul", NodeStatus::kRemove},
{"*", "bias", NodeStatus::kRemain}
}
}
}
};
static utils::OpTypePattern* gelu_exact_pattern2 = new utils::OpTypePattern
{"Mul", "output", NodeStatus::kReplace,
{
{"Add|AddV2", "erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "erf", NodeStatus::kRemove,
{
{"Mul", "bias_add_x_sqrt_one_half", NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove},
{"Cast|Const", "sqrt_one_half", NodeStatus::kRemain}
}
}
}
},
{"Cast|Const", "one", NodeStatus::kRemain}
}
},
{"Mul", "erf_plus_one_times_one_half", NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove,
{
{"MatMul", "matmul", NodeStatus::kRemove},
{"*", "bias", NodeStatus::kRemain}
}
},
{"Cast|Const", "one_half", NodeStatus::kRemain}
}
}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx.graph_view));
std::map<string, int> dummy_matched_nodes_map;
std::set<int> dummy_remove_node_indices;
if (!matched_nodes_map) matched_nodes_map = &dummy_matched_nodes_map;
if (!remove_node_indices) remove_node_indices = &dummy_remove_node_indices;
if (graph_matcher.GetMatchedNodes(*gelu_exact_pattern, ctx.nodes_to_preserve,
node_view, matched_nodes_map,
remove_node_indices)) {
return true;
}
matched_nodes_map->clear();
remove_node_indices->clear();
return graph_matcher.GetMatchedNodes(*gelu_exact_pattern2,
ctx.nodes_to_preserve, node_view,
matched_nodes_map, remove_node_indices);
}
bool FindMatMulBiasAddAndGelu(RemapperContext* ctx, int node_index,
const Cluster* cluster,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
bool* is_gelu_approximate) {
if (!IsMKLEnabled() && !BlasLtMatmulEnabled() &&
!RuntimeFusionEnabled(cluster))
return false;
using utils::MatchingDirection;
using utils::NodeStatus;
bool found_gelu_exact = false;
bool found_gelu_approximate = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_gelu_exact = IsMatchedMatMulBiasAddAndGeluExact(
*ctx, node_index, matched_nodes_map, remove_node_indices);
if (!found_gelu_exact) {
utils::OpTypePattern subgraph_gpu =
{"Mul", "mul", NodeStatus::kRemove,
{
{"Pow", "pow", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove},
{"Const", "three", NodeStatus::kRemain}
}
},
{"Const", "empirical_const", NodeStatus::kRemain}
}
};
utils::OpTypePattern subgraph_cpu =
{"Mul", "mul", NodeStatus::kRemove,
{
{"Mul", "empirical_const_times_matmul", NodeStatus::kRemove,
{
{"Const", "empirical_const", NodeStatus::kRemain},
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
},
{"Square", "square", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
}
}
};
utils::MutableNodeView* node_view = ctx->graph_view.GetNode(node_index);
const NodeDef* node_def = node_view->node();
bool root_on_gpu = NodeIsOnGpu(node_def);
utils::OpTypePattern* subgraph_pattern =
root_on_gpu ? &subgraph_gpu : &subgraph_cpu;
utils::OpTypePattern gelu_approximate_pattern =
{"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "tanh_plus_one_times_one_half", NodeStatus::kRemove,
{
{"AddV2", "tanh_plus_one", NodeStatus::kRemove,
{
{"Tanh", "tanh", NodeStatus::kRemove,
{
{"Mul", "matmul_plus_mul_times_square_root_two_over_pi", NodeStatus::kRemove,
{
{"AddV2", "matmul_plus_mul", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove},
*subgraph_pattern
}
},
{"Const", "square_root_two_over_pi", NodeStatus::kRemain}
}
}
}
},
{"Const", "one", NodeStatus::kRemain}
}
},
{"Const", "one_half", NodeStatus::kRemain}
}
},
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_gelu_approximate = graph_matcher.GetMatchedNodes(
gelu_approximate_pattern, ctx->nodes_to_preserve, node_view,
matched_nodes_map, remove_node_indices);
}
if (found_gelu_exact) {
NodeDef* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("matmul"))->node();
if (NodeIsOnCpu(matmul_node) && ctx->xla_cpu_jit_disable_fusion) {
return false;
}
DataType matmul_dtype = GetDataTypeFromAttr(*matmul_node, "T");
bool cpu_ok = IsMKLEnabled() && IsCpuCompatibleMatMul(*ctx, matmul_node);
cpu_ok = cpu_ok && matmul_node->attr().contains("transpose_a") &&
!matmul_node->attr().at("transpose_a").b();
bool gpu_ok = NodeIsOnGpu(matmul_node) && RuntimeFusionEnabled(cluster) &&
matmul_dtype == DT_HALF;
if (!cpu_ok && !gpu_ok) return false;
if (gpu_ok) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx->graph_properties.GetInputProperties(matmul_node->name());
const TensorShapeProto& a_shape =
!input_props.empty() ? input_props[0].shape() : TensorShapeProto();
const TensorShapeProto& b_shape =
!input_props.empty() ? input_props[1].shape() : TensorShapeProto();
bool valid_dims = Rank(a_shape) == 2 && Rank(b_shape) == 2 &&
IsKnown(a_shape.dim(1)) &&
IsKnown(b_shape.dim(1)) &&
a_shape.dim(1).size() % 2 == 0 &&
b_shape.dim(1).size() % 2 == 0;
if (!valid_dims) return false;
}
std::map<string, float> values_map = {
{"sqrt_one_half", 0.707106}, {"one", 1.0}, {"one_half", 0.5}};
if (!VerifyConstants(ctx, matched_nodes_map, &values_map)) return false;
} else if (found_gelu_approximate) {
NodeDef* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("matmul"))->node();
if (NodeIsOnCpu(matmul_node) && ctx->xla_cpu_jit_disable_fusion) {
return false;
}
if (!IsMKLEnabled() && !NodeIsOnGpu(matmul_node)) return false;
if (NodeIsOnCpu(matmul_node) &&
matmul_node->attr().contains("transpose_a") &&
matmul_node->attr().at("transpose_a").b()) {
return false;
}
auto fused_ops = matmul_node->attr().at("fused_ops").list().s();
if (fused_ops.size() == 1) {
if (fused_ops.at(0) != "BiasAdd") return false;
} else {
return false;
}
std::map<string, float> values_map = {{"square_root_two_over_pi", 0.797884},
{"one", 1.0},
{"one_half", 0.5},
{"empirical_const", 0.044715}};
if (NodeIsOnGpu(matmul_node)) {
values_map["three"] = 3.0;
}
if (!VerifyConstants(ctx, matched_nodes_map, &values_map)) return false;
} else {
return false;
}
*is_gelu_approximate = found_gelu_approximate ? true : false;
return (found_gelu_exact || found_gelu_approximate);
}
bool FindMulAndMaximum(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices, float* alpha) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern mulmax_pattern{
"Maximum", "max_to_leakyrelu", NodeStatus::kReplace,
{
{ "Mul", "mul", NodeStatus::kRemove,
{
{ "*", "input", NodeStatus::kRemain},
{ "Const|Cast", "alpha", NodeStatus::kRemain}
}
},
{ "*", "input", NodeStatus::kRemain}
}
};
auto* max_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!HasDataType(max_node_def, DT_HALF) &&
!HasDataType(max_node_def, DT_BFLOAT16) &&
!HasDataType(max_node_def, DT_FLOAT) &&
!HasDataType(max_node_def, DT_DOUBLE))
return false;
if (!NodeIsOnCpu(max_node_def) && !IsMKLEnabled()) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
mulmax_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
const auto* alpha_node_view =
ctx->graph_view.GetNode(matched_nodes_map->at("alpha"));
const auto* alpha_node_def = alpha_node_view->node();
if (alpha_node_def != nullptr && alpha_node_def->op() == "Cast") {
const auto& regular_fanin_0 = alpha_node_view->GetRegularFanin(0);
const auto* regular_node_view = regular_fanin_0.node_view();
alpha_node_def = regular_node_view->node();
}
Tensor alpha_tensor;
if (alpha_node_def == nullptr || alpha_node_def->op() != "Const" ||
!alpha_tensor.FromProto(alpha_node_def->attr().at("value").tensor()) ||
alpha_tensor.NumElements() != 1) {
return false;
}
DataType dtype = alpha_tensor.dtype();
float alpha_val;
if (dtype == DT_FLOAT) {
alpha_val = alpha_tensor.flat<float>()(0);
} else if (dtype == DT_BFLOAT16) {
alpha_val = static_cast<float>(alpha_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
alpha_val = static_cast<float>(alpha_tensor.flat<Eigen::half>()(0));
} else {
return false;
}
if (alpha_val < 0) return false;
*alpha = alpha_val;
}
return found_op_type_match;
}
bool FindSigmoidAndMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern sigmoidmul_pattern{
"Mul", "mul_to_swish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "*", "input", NodeStatus::kRemain}
}
},
{ "*", "input", NodeStatus::kRemain}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!(HasDataType(mul_node_def, DT_FLOAT) ||
HasDataType(mul_node_def, DT_HALF) ||
HasDataType(mul_node_def, DT_BFLOAT16)))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
sigmoidmul_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
NodeDef* matched_sigmoid_node =
ctx->graph_view.GetNode(matched_nodes_map->at("sigmoid"))->node();
auto in_tensor_sigmoid = matched_sigmoid_node->input(0);
if ((mul_node_def->input(0) != in_tensor_sigmoid) &&
(mul_node_def->input(1) != in_tensor_sigmoid)) {
found_op_type_match = false;
}
}
return found_op_type_match;
}
bool IsCommonNormPattern(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern subgraph_pattern =
{"Rsqrt", "rsqrt", NodeStatus::kRemove,
{
{"AddV2|Add", "add", NodeStatus::kRemove,
{
{"Mean", "mean0", NodeStatus::kRemove,
{
{"SquaredDifference", "squareddiff", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mean", "mean1", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const", "r_indices1", NodeStatus::kRemain}
}
}
}
},
{"Const", "r_indices0", NodeStatus::kRemain}
}
},
{"Const", "epsilon", NodeStatus::kRemain}
}
}
}
};
utils::OpTypePattern common_norm_pattern =
{"AddV2|Add", "output", NodeStatus::kReplace,
{
{"Mul", "mul0", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mul", "mul1", NodeStatus::kRemove,
{
subgraph_pattern,
{"Const", "gamma", NodeStatus::kRemain}
}
}
}
},
{"Sub", "sub0", NodeStatus::kRemove,
{
{"Const", "beta", NodeStatus::kRemain},
{"Mul", "mul2", NodeStatus::kRemove,
{
{"Mul", "mul1", NodeStatus::kRemove},
{"Mean", "mean1", NodeStatus::kRemove}
}
},
}
}
}
};
utils::OpTypePattern common_norm_pattern_1 =
{"AddV2|Add", "output", NodeStatus::kReplace,
{
{"Mul", "mul0", NodeStatus::kRemove,
{
{"Mul", "mul1", NodeStatus::kRemove,
{
{"Sub", "sub0", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mean", "mean1", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const", "r_indices1", NodeStatus::kRemain}
}
}
}
},
subgraph_pattern
}
},
{"*", "gamma", NodeStatus::kRemain}
}
},
{"*", "beta", NodeStatus::kRemain},
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
bool found_op_type_match =
graph_matcher.GetMatchedNodes(common_norm_pattern, {},
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices) ||
graph_matcher.GetMatchedNodes(common_norm_pattern_1, {},
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
return found_op_type_match;
}
bool FindMklLayerNorm(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<string>* input_node_names, float* epsilon) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern layer_norm_pattern =
{"AddV2", "output", NodeStatus::kReplace,
{
{"*", "beta", NodeStatus::kRemain},
{"Mul", "scale", NodeStatus::kRemove,
{
{"Reshape", "post_reshape", NodeStatus::kRemove,
{
{"FusedBatchNormV3", "fused_batch_norm", NodeStatus::kRemove,
{
{"Reshape", "pre_reshape", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"*", "pre_shape", NodeStatus::kRemain}
}
},
{"Fill", "fill_scale", NodeStatus::kRemove,
{
{"*", "dims_fill_scale", NodeStatus::kRemain},
{"Const", "unit_gamma", NodeStatus::kRemain}
}
},
{"Fill", "fill_offset", NodeStatus::kRemove,
{
{"*", "dims_fill_offset", NodeStatus::kRemain},
{"Const", "zero_beta", NodeStatus::kRemain}
}
},
{"Const", "empty", NodeStatus::kRemain},
{"Const", "empty", NodeStatus::kRemain}
}
},
{"*", "post_shape", NodeStatus::kRemain}
}
},
{"*", "gamma", NodeStatus::kRemain}
}
}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
bool found_op_type_match = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(layer_norm_pattern, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (!found_op_type_match) {
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = IsCommonNormPattern(
ctx, node_index, matched_nodes_map, remove_node_indices);
}
if (found_op_type_match) {
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
true,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
*epsilon = 0.001;
if (matched_nodes_map->count("fused_batch_norm")) {
NodeDef* fused_batch_norm_node =
ctx->graph_view.GetNode(matched_nodes_map->at("fused_batch_norm"))
->node();
if (fused_batch_norm_node->attr().count("epsilon")) {
*epsilon = fused_batch_norm_node->attr().at("epsilon").f();
}
bool is_training = false;
if (!TryGetNodeAttr(*fused_batch_norm_node, kIsTraining, &is_training) ||
!is_training)
return false;
NodeDef* empty_const_node =
ctx->graph_view.GetNode(matched_nodes_map->at("empty"))->node();
Tensor const_tensor;
if (empty_const_node != nullptr && empty_const_node->op() == "Const" &&
const_tensor.FromProto(
empty_const_node->attr().at("value").tensor())) {
if (const_tensor.NumElements() != 0) return false;
} else {
return false;
}
auto* pre_reshape_node =
ctx->graph_view.GetNode(matched_nodes_map->at("pre_reshape"))->node();
auto* scale_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
input_node_names->clear();
input_node_names->resize(3);
input_node_names->at(0) = pre_reshape_node->input(0);
input_node_names->at(1) = scale_node->name();
input_node_names->at(2) = beta_node->name();
} else {
NodeDef* mean1_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mean1"))->node();
bool keep_dims = false;
if (!mean1_node ||
!TryGetNodeAttr(*mean1_node, "keep_dims", &keep_dims) || !keep_dims)
return false;
NodeDef* mean_axis_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axis_node) {
VLOG(1) << "Unable to find reduction axis node";
return false;
}
Tensor mean_axis_tensor;
if (!mean_axis_tensor.FromProto(
mean_axis_node->attr().at("value").tensor())) {
return false;
}
DataType dtype = mean_axis_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) return false;
int expected_axis_count = 1;
if (mean_axis_tensor.NumElements() != expected_axis_count) return false;
NodeDef* input_node =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto input_node_props =
ctx->graph_properties.GetOutputProperties(input_node->name());
int rank = Rank(input_node_props[0].shape());
if (dtype == DT_INT32) {
if (static_cast<int32>(rank - 1) != mean_axis_tensor.flat<int32>()(0))
return false;
} else {
if (static_cast<int64>(rank - 1) != mean_axis_tensor.flat<int64>()(0))
return false;
}
auto* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
input_node_names->clear();
input_node_names->resize(3);
input_node_names->at(0) = mean1_node->input(0);
input_node_names->at(1) = gamma_node->name();
input_node_names->at(2) = beta_node->name();
}
NodeDef* input_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto input_props =
ctx->graph_properties.GetOutputProperties(input_node_def->name());
NodeDef* output_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto output_props =
ctx->graph_properties.GetOutputProperties(output_node_def->name());
if (ShapesSymbolicallyEqual(input_props[0].shape(),
output_props[0].shape())) {
int rank = Rank(input_props[0].shape());
if (rank < 2 || rank > 3) return false;
} else {
return false;
}
}
return found_op_type_match;
}
bool FindFusedBatchNorm(const RemapperContext& ctx, int node_index,
FusedBatchNorm* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (ctx.xla_cpu_jit_disable_fusion && NodeIsOnCpu(node_def)) return false;
if (!IsFusedBatchNorm(*node_def)) return false;
if (GetDataTypeFromAttr(*node_def, "T") != DT_FLOAT) return false;
bool is_training = true;
if (!TryGetNodeAttr(*node_def, kIsTraining, &is_training)) return false;
if (is_training) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node_def->name());
bool const_scaling_factor =
props.size() == 5 &&
props[1].has_value() &&
props[4].has_value();
auto const_inputs = std::count_if(
props.begin(), props.end(),
[](const OpInfo::TensorProperties& props) { return props.has_value(); });
bool can_remap = const_scaling_factor || const_inputs >= 4;
if (!can_remap) return false;
if (node_view->GetRegularFanouts().size() > 1) {
return false;
}
matched->fused_batch_norm = node_index;
return true;
}
bool BatchnormSpatialPersistentEnabled() {
#if CUDNN_VERSION >= 7402
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT",
false, &is_enabled));
return is_enabled;
}();
return is_enabled;
#else
return false;
#endif
}
bool FindFusedBatchNormEx(const RemapperContext& ctx, int node_index,
FusedBatchNormEx* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsRelu(*node_def) || HasControlFaninOrFanout(*node_view)) return false;
const auto valid_batch_norm =
[&](const utils::MutableNodeView& fused_batch_norm) -> bool {
const auto* fused_batch_norm_node_def = fused_batch_norm.node();
if (!IsFusedBatchNorm(*fused_batch_norm_node_def)) return false;
if (!IsMKLEnabled() && !NodeIsOnGpu(fused_batch_norm_node_def))
return false;
DataType t_dtype = GetDataTypeFromAttr(*fused_batch_norm_node_def, "T");
if (NodeIsOnGpu(fused_batch_norm_node_def)) {
if (t_dtype != DT_FLOAT && t_dtype != DT_HALF) return false;
} else {
if (ctx.xla_cpu_jit_disable_fusion) return false;
if (IsMKLEnabled() && !IsDataTypeSupportedByOneDNNOnThisCPU(t_dtype))
return false;
}
bool is_training;
if (!GetNodeAttr(*fused_batch_norm_node_def, kIsTraining, &is_training)
.ok())
return false;
string data_format;
if (!GetNodeAttr(*fused_batch_norm_node_def, kDataFormat, &data_format)
.ok())
return false;
if (data_format != "NHWC" && data_format != "NCHW") return false;
if (is_training && NodeIsOnGpu(fused_batch_norm_node_def)) {
if (data_format != "NHWC") return false;
if (t_dtype != DT_HALF) return false;
const auto& props = ctx.graph_properties.GetInputProperties(
fused_batch_norm_node_def->name());
const bool valid_channel_dim = !props.empty() &&
props[0].shape().dim_size() == 4 &&
props[0].shape().dim(3).size() % 4 == 0;
if (!valid_channel_dim) return false;
if (!BatchnormSpatialPersistentEnabled()) return false;
}
if ((fused_batch_norm_node_def->op() != "FusedBatchNorm") &&
!HasDataType(fused_batch_norm_node_def, DT_FLOAT, "U"))
return false;
if (HasControlFaninOrFanout(fused_batch_norm) ||
!HasAtMostOneDataFanoutAtPort0(fused_batch_norm) ||
IsInPreserveSet(ctx, fused_batch_norm_node_def))
return false;
return true;
};
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = regular_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (valid_batch_norm(*relu_fanin_0_node_view)) {
matched->activation = node_index;
matched->fused_batch_norm = regular_fanin_0.node_index();
return true;
}
if (IsAdd(*relu_fanin_0_node_def)) {
if (IsMKLEnabled() && !NodeIsOnGpu(node_def)) return false;
if (HasControlFaninOrFanout(*relu_fanin_0_node_view) ||
!HasAtMostOneFanoutAtPort0(*relu_fanin_0_node_view) ||
IsInPreserveSet(ctx, relu_fanin_0_node_def))
return false;
const auto& props =
ctx.graph_properties.GetInputProperties(relu_fanin_0_node_def->name());
if (props.size() < 2 ||
!ShapesSymbolicallyEqual(props[0].shape(), props[1].shape()))
return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 2) return false;
const auto& add_regular_fanin_0 =
relu_fanin_0_node_view->GetRegularFanin(0);
const auto& add_regular_fanin_1 =
relu_fanin_0_node_view->GetRegularFanin(1);
if (valid_batch_norm(*add_regular_fanin_0.node_view())) {
matched->activation = node_index;
matched->side_input = add_regular_fanin_1.node_index();
matched->fused_batch_norm = add_regular_fanin_0.node_index();
matched->invalidated = regular_fanin_0.node_index();
return true;
}
if (valid_batch_norm(*add_regular_fanin_1.node_view())) {
matched->activation = node_index;
matched->side_input = add_regular_fanin_0.node_index();
matched->fused_batch_norm = add_regular_fanin_1.node_index();
matched->invalidated = regular_fanin_0.node_index();
return true;
}
}
return false;
}
bool FindFusedBatchNormGradEx(const RemapperContext& ctx, int node_index,
FusedBatchNormGradEx* matched) {
const utils::MutableNodeView* node_view = ctx.graph_view.GetNode(node_index);
const auto valid_batch_norm_grad =
[&](const utils::MutableNodeView& fused_batch_norm_grad) -> bool {
const NodeDef* node_def = fused_batch_norm_grad.node();
if (!IsFusedBatchNormGrad(*node_def) ||
HasControlFaninOrFanout(fused_batch_norm_grad))
return false;
if (!NodeIsOnGpu(node_def)) return false;
bool is_training;
if (!GetNodeAttr(*node_def, kIsTraining, &is_training).ok() || !is_training)
return false;
DataType t_dtype = GetDataTypeFromAttr(*node_def, "T");
if (t_dtype != DT_HALF) return false;
string data_format;
if (!GetNodeAttr(*node_def, kDataFormat, &data_format).ok()) return false;
if (data_format != "NHWC") return false;
const auto& props =
ctx.graph_properties.GetInputProperties(node_def->name());
const bool valid_channel_dim = !props.empty() &&
props[0].shape().dim_size() == 4 &&
props[0].shape().dim(3).size() % 4 == 0;
if (!valid_channel_dim) return false;
if (!BatchnormSpatialPersistentEnabled()) return false;
if (node_def->op() != "FusedBatchNorm" &&
!HasDataType(node_def, DT_FLOAT, "U"))
return false;
return true;
};
if (ctx.xla_auto_clustering_on) return false;
if (!valid_batch_norm_grad(*node_view)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const utils::MutableFanoutView& regular_fanin_0 =
node_view->GetRegularFanin(0);
const utils::MutableNodeView* relugrad_node_view =
regular_fanin_0.node_view();
const NodeDef* relugrad_node_def = relugrad_node_view->node();
bool is_relugrad = IsReluGrad(*relugrad_node_def);
if (!is_relugrad || HasControlFaninOrFanout(*relugrad_node_view) ||
IsInPreserveSet(ctx, relugrad_node_def))
return false;
if (relugrad_node_view->NumRegularFanins() < 1) return false;
const utils::MutableFanoutView& fanin_1 =
relugrad_node_view->GetRegularFanin(1);
const utils::MutableNodeView* fwd_node_view = fanin_1.node_view();
FusedBatchNormEx fwd_matched;
FindFusedBatchNormEx(ctx, fwd_node_view->node_index(), &fwd_matched);
bool fwd_bn_act_used = fwd_matched.activation != kMissingIndex &&
fwd_matched.side_input == kMissingIndex;
bool fwd_bn_add_act_used = fwd_matched.activation != kMissingIndex &&
fwd_matched.side_input != kMissingIndex;
if (fwd_bn_act_used && relugrad_node_view->GetRegularFanout(0).size() == 1) {
matched->activation_grad = regular_fanin_0.node_index();
matched->fused_batch_norm_grad = node_index;
matched->fwd_fused_batch_norm = fwd_matched.fused_batch_norm;
return true;
}
if (fwd_bn_add_act_used &&
relugrad_node_view->GetRegularFanout(0).size() == 2) {
const utils::MutableFanoutView& fwd_batch_norm_node =
node_view->GetRegularFanin(5);
if (fwd_matched.fused_batch_norm != fwd_batch_norm_node.node_index()) {
return false;
}
const std::vector<utils::MutableFaninView>& fanouts_at_port_0 =
relugrad_node_view->GetRegularFanouts()[0];
const utils::MutableNodeView* fanout_0_node_view =
ctx.graph_view.GetNode(fanouts_at_port_0[0].node_view()->GetName());
const utils::MutableNodeView* fanout_1_node_view =
ctx.graph_view.GetNode(fanouts_at_port_0[1].node_view()->GetName());
const NodeDef* fanout_0_node_def = fanout_0_node_view->node();
const NodeDef* fanout_1_node_def = fanout_1_node_view->node();
const NodeDef* node_def = node_view->node();
matched->activation_grad = regular_fanin_0.node_index();
matched->fused_batch_norm_grad = node_index;
matched->fwd_fused_batch_norm = fwd_matched.fused_batch_norm;
if (fanout_0_node_def == node_def) {
matched->side_input_grad = fanout_1_node_view->node_index();
return true;
}
if (fanout_1_node_def == node_def) {
matched->side_input_grad = fanout_0_node_view->node_index();
return true;
}
}
return false;
}
bool FindTensorToHashBucket(const RemapperContext& ctx, int node_index,
TensorToHashBucket* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsStringToHashBucketFast(*node_def) ||
HasControlFaninOrFanout(*node_view)) {
return false;
}
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* as_string_node_view = regular_fanin_0.node_view();
const auto* as_string_node_def = as_string_node_view->node();
bool is_as_string = IsAsString(*as_string_node_def);
if (!is_as_string || HasControlFaninOrFanout(*as_string_node_view) ||
!HasAtMostOneFanoutAtPort0(*as_string_node_view) ||
IsInPreserveSet(ctx, as_string_node_def))
return false;
if (!HasDataType(as_string_node_def, DT_INT8) &&
!HasDataType(as_string_node_def, DT_INT16) &&
!HasDataType(as_string_node_def, DT_INT32) &&
!HasDataType(as_string_node_def, DT_INT64)) {
return false;
}
int width;
if (!GetNodeAttr(*as_string_node_def, kWidth, &width).ok() || width != -1) {
return false;
}
string fill;
if (!GetNodeAttr(*as_string_node_def, kFill, &fill).ok() || !fill.empty()) {
return false;
}
if (as_string_node_view->NumRegularFanins() < 1) return false;
const auto& fanin_0 = as_string_node_view->GetRegularFanin(0);
const auto* pre_node_view = fanin_0.node_view();
const TensorToHashBucket pattern{pre_node_view->node_index(),
as_string_node_view->node_index(),
node_index};
*matched = pattern;
return true;
}
bool FindHardSwish(RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern pattern {"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "mul_one_sixth", NodeStatus::kRemove,
{
{"Const|Cast", "one_sixth", NodeStatus::kRemain},
{"*", "input", NodeStatus::kRemain}
}
},
{"Relu6", "relu6", NodeStatus::kRemove,
{
{"Add|AddV2", "add", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const|Cast", "three", NodeStatus::kRemain}
}
}
}
},
}
};
bool found_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx.graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_match = graph_matcher.GetMatchedNodes(
pattern, ctx.nodes_to_preserve, ctx.graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_match) {
std::map<string, float> values_map = {{"three", 3.0},
{"one_sixth", 0.16666}};
if (!VerifyConstants(&ctx, matched_nodes_map, &values_map)) return false;
}
return found_match;
}
bool FindContractionWithBiasAddAndHardSwish(
RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
if (!FindHardSwish(ctx, node_index, matched_nodes_map, remove_node_indices))
return false;
const auto* add_node_view =
ctx.graph_view.GetNode(matched_nodes_map->at("add"));
const auto* add_node_def = add_node_view->node();
ContractionWithBiasAdd base;
int port_id = 0;
if (!FindContractionWithBiasInPort(ctx, *add_node_view, *add_node_def,
port_id, &base, 2)) {
port_id = 1;
if (!FindContractionWithBiasInPort(ctx, *add_node_view, *add_node_def,
port_id, &base, 2)) {
VLOG(2) << "Contraction + BiasAdd pattern was not found although"
<< " HardSwish pattern was found, so fusion failed.";
return false;
}
}
const auto* bias_node_def = ctx.graph_view.GetNode(base.bias_add)->node();
if (!HaveSameDataType(add_node_def, bias_node_def)) return false;
const auto* contraction_node_view = ctx.graph_view.GetNode(base.contraction);
const auto* contraction_node_def = contraction_node_view->node();
if (!IsConv2D(*contraction_node_def) &&
!IsDepthwiseConv2dNative(*contraction_node_def))
return false;
if (!IsCpuCompatibleConv2D(ctx, contraction_node_def) &&
!IsCpuCompatibleDepthwiseConv2dNative(contraction_node_def))
return false;
matched_nodes_map->insert({"contraction", base.contraction});
matched_nodes_map->insert({"bias_add", base.bias_add});
remove_node_indices->insert(base.contraction);
remove_node_indices->insert(base.bias_add);
return true;
}
bool FindFusedBatchMatMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<string>* input_node_names) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
int pattern = 0;
utils::OpTypePattern fusion_pattern1 =
{"Add|AddV2", "output", NodeStatus::kReplace,
{
{"Mul", "mul", NodeStatus::kRemove,
{
{"BatchMatMulV2", "batch_matmul", NodeStatus::kRemove},
{"*", "multiplicand", NodeStatus::kRemain}
}
},
{"*", "addend", NodeStatus::kRemain}
}
};
utils::OpTypePattern fusion_pattern2 =
{"Add|AddV2", "output", NodeStatus::kReplace,
{
{"BatchMatMulV2", "batch_matmul", NodeStatus::kRemove,
{
{"Mul", "mul", NodeStatus::kRemove,
{
{"*", "mul_input0", NodeStatus::kRemain},
{"Const|Cast", "multiplicand", NodeStatus::kRemain}
}
},
{"*", "bmm_input1", NodeStatus::kRemain}
}
},
{"*", "addend", NodeStatus::kRemain}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
bool found_op_type_match = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(fusion_pattern1, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) pattern = 1;
if (!found_op_type_match) {
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(fusion_pattern2, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) pattern = 2;
}
if (!found_op_type_match) return false;
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
false,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
NodeDef* multiplicand_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("multiplicand"))->node();
auto multiplicand_props =
ctx->graph_properties.GetOutputProperties(multiplicand_node_def->name());
if (NumCoefficients(multiplicand_props[0].shape()) != 1) return false;
NodeDef* batch_matmul_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("batch_matmul"))->node();
if (!IsCpuCompatibleMatMul(*ctx, batch_matmul_node_def)) return false;
auto batch_matmul_props =
ctx->graph_properties.GetOutputProperties(batch_matmul_node_def->name());
if (Rank(batch_matmul_props[0].shape()) != 4) return false;
NodeDef* addend_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("addend"))->node();
auto addend_props =
ctx->graph_properties.GetOutputProperties(addend_node_def->name());
auto addend_shape = addend_props[0].shape();
if (!(Rank(addend_shape) == 4 && addend_shape.dim(1).size() == 1)) {
return false;
}
input_node_names->clear();
input_node_names->resize(4);
if (pattern == 1) {
input_node_names->at(0) = batch_matmul_node_def->input(0);
input_node_names->at(1) = batch_matmul_node_def->input(1);
input_node_names->at(2) = multiplicand_node_def->name();
input_node_names->at(3) = addend_node_def->name();
} else if (pattern == 2) {
auto* mul_input0_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("mul_input0"))->node();
input_node_names->at(0) = mul_input0_node_def->name();
input_node_names->at(1) = batch_matmul_node_def->input(1);
input_node_names->at(2) = multiplicand_node_def->name();
input_node_names->at(3) = addend_node_def->name();
}
return found_op_type_match;
}
template <typename T>
bool IsInstanceNormReduction(const TensorShapeProto& input_shape,
const Tensor& reduction_axes_data) {
int input_dims = input_shape.dim_size();
int reduction_axes = reduction_axes_data.NumElements();
if ((input_dims != 4 && input_dims != 5) ||
(reduction_axes + 2) != input_dims) {
return false;
}
if (input_dims == 4) {
return ((reduction_axes_data.flat<T>()(0) == static_cast<T>(1) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(2)) ||
(reduction_axes_data.flat<T>()(0) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(3)));
} else {
return ((reduction_axes_data.flat<T>()(0) == static_cast<T>(1) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(2) == static_cast<T>(3)) ||
(reduction_axes_data.flat<T>()(0) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(3) &&
reduction_axes_data.flat<T>()(2) == static_cast<T>(4)));
}
}
bool FindInstanceNorm(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsCommonNormPattern(ctx, node_index, matched_nodes_map,
remove_node_indices)) {
return false;
}
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
false,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
NodeDef* mean1_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mean1"))->node();
bool keep_dims = false;
if (!mean1_node || !TryGetNodeAttr(*mean1_node, "keep_dims", &keep_dims) ||
!keep_dims) {
return false;
}
const auto& input_props =
ctx->graph_properties.GetInputProperties(mean1_node->name());
const TensorShapeProto& input_shape = input_props[0].shape();
if (input_shape.unknown_rank()) return false;
DataType dtype = GetDataTypeFromAttr(*mean1_node, "T");
if (dtype != DT_FLOAT && dtype != DT_HALF) return false;
NodeDef* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
NodeDef* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
if (!gamma_node || !beta_node) {
VLOG(2) << "Unexpected error to retrieve gamma or beta node";
return false;
}
Tensor gamma_tensor, beta_tensor;
if (gamma_node->op() != "Const" ||
!gamma_tensor.FromProto(gamma_node->attr().at("value").tensor()) ||
beta_node->op() != "Const" ||
!beta_tensor.FromProto(beta_node->attr().at("value").tensor())) {
return false;
}
if (!gamma_tensor.IsSameSize(beta_tensor)) return false;
NodeDef* mean_axes_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axes_node) {
VLOG(2) << "Unexpected error to retrieve reduction axes node";
return false;
}
Tensor mean_axes_tensor;
if (!mean_axes_tensor.FromProto(
mean_axes_node->attr().at("value").tensor())) {
return false;
}
dtype = mean_axes_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) return false;
return (dtype == DT_INT32)
? IsInstanceNormReduction<int32>(input_shape, mean_axes_tensor)
: IsInstanceNormReduction<int64>(input_shape, mean_axes_tensor);
}
bool FindInstanceNormWithActivation(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
const auto* node_view = ctx->graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsLeakyRelu(*node_def) && !IsRelu(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* base_node_view = regular_fanin_0.node_view();
int base_node_idx = base_node_view->node_index();
if (!FindInstanceNorm(ctx, base_node_idx, matched_nodes_map,
remove_node_indices))
return false;
remove_node_indices->insert(matched_nodes_map->at("output"));
matched_nodes_map->insert(std::pair<string, int>("activation", node_index));
return true;
}
void CopyConv2DAttributes(const NodeDef& conv2d, NodeDef* fused_conv2d,
const NodeDef* activation = nullptr) {
DCHECK(IsConv2D(conv2d)) << "Input node must be a Conv2D";
auto* attr = fused_conv2d->mutable_attr();
auto& src_attr = conv2d.attr();
(*attr)["T"] = src_attr.at("T");
int num_args = fused_conv2d->input_size() - 2;
for (int i = 0; i < num_args; ++i) {
(*attr)["TArgs"].mutable_list()->add_type(src_attr.at("T").type());
}
(*attr)["num_args"].set_i(num_args);
(*attr)["num_host_args"].set_i(0);
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["explicit_paddings"] = src_attr.at("explicit_paddings");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["use_cudnn_on_gpu"] = src_attr.at("use_cudnn_on_gpu");
if (IsMKLEnabled() && src_attr.find("_input_shapes") != src_attr.end()) {
(*attr)["_input_shapes"] = src_attr.at("_input_shapes");
}
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyConv3DAttributes(const NodeDef& conv3d, NodeDef* fused_conv3d,
const NodeDef* activation = nullptr) {
DCHECK(IsConv3D(conv3d)) << "Input node must be a Conv3D";
auto* attr = fused_conv3d->mutable_attr();
auto& src_attr = conv3d.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyDepthwiseConv2dNativeAttributes(const NodeDef& dw_conv2d,
NodeDef* fused_dw_conv2d,
const NodeDef* activation = nullptr) {
DCHECK(IsDepthwiseConv2dNative(dw_conv2d))
<< "Input node must be a DepthwiseConv2dNative";
auto* attr = fused_dw_conv2d->mutable_attr();
auto& src_attr = dw_conv2d.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyFusedBatchNormAttributes(const NodeDef& fused_batch_norm,
NodeDef* fused_batch_norm_ex) {
DCHECK(IsFusedBatchNorm(fused_batch_norm))
<< "Input node must be a FusedBatchNorm";
auto* attr = fused_batch_norm_ex->mutable_attr();
auto src_attr = fused_batch_norm.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["is_training"] = src_attr.at("is_training");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["epsilon"] = src_attr.at("epsilon");
(*attr)["exponential_avg_factor"] = src_attr.at("exponential_avg_factor");
if (fused_batch_norm.op() != "FusedBatchNorm") {
SetAttrValue(src_attr.at("U"), &(*attr)["U"]);
} else {
if (!IsMKLEnabled())
SetAttrValue(src_attr.at("T"), &(*attr)["U"]);
else
SetAttrValue(DT_FLOAT, &(*attr)["U"]);
}
}
void CopyFusedBatchNormGradAttributes(const NodeDef& fused_batch_norm_grad,
NodeDef* fused_batch_norm_grad_ex) {
DCHECK(IsFusedBatchNormGrad(fused_batch_norm_grad))
<< "Input node must be a FusedBatchNormGrad";
auto* attr = fused_batch_norm_grad_ex->mutable_attr();
auto src_attr = fused_batch_norm_grad.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["is_training"] = src_attr.at("is_training");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["epsilon"] = src_attr.at("epsilon");
if (fused_batch_norm_grad.op() != "FusedBatchNormGrad") {
SetAttrValue(src_attr.at("U"), &(*attr)["U"]);
} else {
SetAttrValue(DT_FLOAT, &(*attr)["U"]);
}
}
void CopyMatMulAttributes(const NodeDef& matmul, NodeDef* fused_matmul,
const NodeDef* activation = nullptr) {
DCHECK(IsMatMul(matmul)) << "Input node must be a MatMul";
auto* attr = fused_matmul->mutable_attr();
auto& src_attr = matmul.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["transpose_a"] = src_attr.at("transpose_a");
(*attr)["transpose_b"] = src_attr.at("transpose_b");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
if (IsMKLEnabled()) {
auto input_shapes = src_attr.find("_input_shapes");
if (input_shapes != src_attr.end()) {
(*attr)["_input_shapes"] = input_shapes->second;
}
}
}
void CopyBatchMatMulAttributes(const NodeDef& batchmatmul,
NodeDef* fused_batch_matmul) {
DCHECK(IsAnyBatchMatMul(batchmatmul)) << "Input node must be a BatchMatMul";
auto* attr = fused_batch_matmul->mutable_attr();
auto& src_attr = batchmatmul.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["adj_x"] = src_attr.at("adj_x");
(*attr)["adj_y"] = src_attr.at("adj_y");
if (IsMKLEnabled()) {
auto input_shapes = src_attr.find("_input_shapes");
if (input_shapes != src_attr.end()) {
(*attr)["_input_shapes"] = input_shapes->second;
}
}
}
void SetFusedOpAttributes(NodeDef* fused,
const absl::Span<const absl::string_view> fused_ops,
int num_args = 1, float epsilon = 0.0) {
auto* attr = fused->mutable_attr();
SetAttrValue(fused_ops, &(*attr)["fused_ops"]);
SetAttrValue(num_args, &(*attr)["num_args"]);
SetAttrValue(epsilon, &(*attr)["epsilon"]);
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithBiasAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
VLOG(2) << "Fuse " << contraction.op()
<< " with BiasAdd: " << " bias_add=" << bias_add.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op.set_name(bias_add.name());
fused_op.set_device(contraction.device());
fused_op.add_input(contraction.input(0));
fused_op.add_input(contraction.input(1));
fused_op.add_input(bias_add.input(matched.bias_port));
if (IsConv2D(contraction)) {
fused_op.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_op);
} else if (IsDepthwiseConv2dNative(contraction)) {
fused_op.set_op(kFusedDepthwiseConv2dNative);
CopyDepthwiseConv2dNativeAttributes(contraction, &fused_op);
} else if (IsMatMul(contraction)) {
fused_op.set_op(kFusedMatMul);
AddInputShapesAttr(*ctx, matched.contraction);
CopyMatMulAttributes(contraction, &fused_op);
} else if (IsConv3D(contraction)) {
fused_op.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_op);
}
SetFusedOpAttributes(&fused_op, {"BiasAdd"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.bias_add] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithActivation& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << contraction.op() << " and " << activation.op() << ":"
<< " activation=" << activation.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op = contraction;
auto* attr = fused_op.mutable_attr();
auto contraction_fused_ops_list =
contraction.attr().at("fused_ops").list().s();
std::vector<std::string> fused_items;
for (auto it = contraction_fused_ops_list.begin();
it != contraction_fused_ops_list.end(); it++) {
fused_items.push_back(*it);
}
fused_items.push_back(GetActivationName(activation.op()));
SetAttrValue(fused_items, &(*attr)["fused_ops"]);
if (IsLeakyRelu(activation)) {
auto& activation_attr = activation.attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
fused_op.set_name(activation.name());
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*nodes_to_delete)[matched.contraction] = true;
(*invalidated_nodes)[matched.activation] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(
RemapperContext* ctx, const ContractionWithBiasAddAndActivation& matched,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << contraction.op() << " with BiasAdd and "
<< activation.op() << ":" << " activation=" << activation.name()
<< " bias_add=" << bias_add.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op.set_name(activation.name());
fused_op.set_device(contraction.device());
fused_op.add_input(contraction.input(0));
fused_op.add_input(contraction.input(1));
fused_op.add_input(bias_add.input(matched.bias_port));
if (IsConv2D(contraction)) {
fused_op.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_op, &activation);
} else if (IsDepthwiseConv2dNative(contraction)) {
fused_op.set_op(kFusedDepthwiseConv2dNative);
CopyDepthwiseConv2dNativeAttributes(contraction, &fused_op);
} else if (IsMatMul(contraction)) {
fused_op.set_op(kFusedMatMul);
AddInputShapesAttr(*ctx, matched.contraction);
CopyMatMulAttributes(contraction, &fused_op, &activation);
} else if (IsConv3D(contraction)) {
fused_op.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_op, &activation);
}
SetFusedOpAttributes(&fused_op, {"BiasAdd", activation.op()});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.bias_add] = true;
(*invalidated_nodes)[matched.activation] = true;
return absl::OkStatus();
}
Status AddFusedConvNode(RemapperContext* ctx,
const ContractionWithSqueezeAndBiasAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
const NodeDef& squeeze = graph->node(matched.squeeze);
VLOG(2) << "Fuse Conv2D/3D with Squeeze and BiasAdd: " << " bias_add="
<< bias_add.name() << " squeeze=" << squeeze.name()
<< " conv=" << contraction.name();
NodeDef fused_conv;
fused_conv.set_name(contraction.name());
fused_conv.set_device(contraction.device());
fused_conv.add_input(contraction.input(0));
fused_conv.add_input(contraction.input(1));
fused_conv.add_input(bias_add.input(1));
if (IsConv2D(contraction)) {
fused_conv.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv);
} else if (IsConv3D(contraction)) {
fused_conv.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_conv);
}
SetFusedOpAttributes(&fused_conv, {"BiasAdd"});
NodeDef remapped_squeeze = squeeze;
remapped_squeeze.set_name(bias_add.name());
remapped_squeeze.set_input(0, contraction.name());
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv), &status);
TF_RETURN_IF_ERROR(status);
mutation->AddNode(std::move(remapped_squeeze), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.contraction] = true;
(*invalidated_nodes)[matched.bias_add] = true;
(*nodes_to_delete)[matched.squeeze] = true;
return absl::OkStatus();
}
Status AddFusedConv2DNode(RemapperContext* ctx,
const ContractionWithBatchNorm& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction)) << "Only Conv2D supported for now";
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
VLOG(2) << "Fuse Conv2D with BatchNorm: batch_norm="
<< fused_batch_norm.name() << " conv2d=" << contraction.name();
NodeDef fused_conv2d;
fused_conv2d.set_name(fused_batch_norm.name());
fused_conv2d.set_op(kFusedConv2D);
fused_conv2d.set_device(contraction.device());
fused_conv2d.add_input(contraction.input(0));
fused_conv2d.add_input(contraction.input(1));
fused_conv2d.add_input(fused_batch_norm.input(1));
fused_conv2d.add_input(fused_batch_norm.input(2));
fused_conv2d.add_input(fused_batch_norm.input(3));
fused_conv2d.add_input(fused_batch_norm.input(4));
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv2d);
SetFusedOpAttributes(&fused_conv2d, {"FusedBatchNorm"},
4, matched.epsilon);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv2d), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status AddFusedConv2DNode(RemapperContext* ctx,
const ContractionWithBatchNormAndActivation& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction)) << "Only Conv2D supported for now";
const NodeDef& activation = graph->node(matched.activation);
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
VLOG(2) << "Fuse Conv2D with BatchNorm and " << activation.op()
<< ": activation=" << activation.name()
<< " batch_norm=" << fused_batch_norm.name()
<< " conv2d=" << contraction.name();
NodeDef fused_conv2d;
fused_conv2d.set_name(activation.name());
fused_conv2d.set_op(kFusedConv2D);
fused_conv2d.set_device(contraction.device());
fused_conv2d.add_input(contraction.input(0));
fused_conv2d.add_input(contraction.input(1));
fused_conv2d.add_input(fused_batch_norm.input(1));
fused_conv2d.add_input(fused_batch_norm.input(2));
fused_conv2d.add_input(fused_batch_norm.input(3));
fused_conv2d.add_input(fused_batch_norm.input(4));
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv2d, &activation);
SetFusedOpAttributes(&fused_conv2d, {"FusedBatchNorm", activation.op()},
4, matched.epsilon);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv2d), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.activation] = true;
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.fused_batch_norm] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithBiasAddAndAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
DCHECK(IsConv2D(contraction) || IsMatMul(contraction) ||
IsConv3D(contraction));
NodeDef contraction_node;
const NodeDef& add = graph->node(matched.add);
contraction_node.set_name(add.name());
contraction_node.set_device(contraction.device());
contraction_node.add_input(
contraction.input(0));
contraction_node.add_input(
contraction.input(1));
contraction_node.add_input(bias_add.input(matched.bias_port));
contraction_node.add_input(add.input(1 - matched.port_id));
if (IsConv2D(contraction)) {
contraction_node.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &contraction_node);
} else if (IsMatMul(contraction)) {
AddInputShapesAttr(*ctx, matched.contraction);
contraction_node.set_op(kFusedMatMul);
CopyMatMulAttributes(contraction, &contraction_node);
} else if (IsConv3D(contraction)) {
contraction_node.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &contraction_node);
}
SetFusedOpAttributes(&contraction_node, {"BiasAdd", "Add"}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(contraction_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.add] = true;
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.bias_add] = true;
return absl::OkStatus();
}
Status AddFusedConv3DNode(RemapperContext* ctx, const PadWithConv3D& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction_idx);
const NodeDef& pad_node_def = graph->node(matched.pad_idx);
const NodeDef& padding_const_node_def =
graph->node(matched.padding_const_idx);
VLOG(2) << "Fuse " << pad_node_def.op()
<< " with contraction: " << " contraction=" << contraction.name();
NodeDef fused_node;
fused_node = contraction;
fused_node.set_input(0, pad_node_def.input(0));
fused_node.set_op(kFusedConv3D);
auto* attr = fused_node.mutable_attr();
if (!attr->contains("num_args")) {
SetAttrValue(0, &(*attr)["num_args"]);
}
Tensor const_tensor;
if (padding_const_node_def.op() == "Const" &&
const_tensor.FromProto(
padding_const_node_def.attr().at("value").tensor())) {
auto const_value = const_tensor.flat<int32>();
std::vector<int32> paddings;
for (int i = 0; i < const_value.size(); ++i) {
paddings.push_back(const_value(i));
SetAttrValue(paddings, &(*attr)["padding_list"]);
}
} else {
VLOG(2) << "Pad fusion with " << contraction.op() << " is invalidated, "
<< "it requires padding dim sizes to be constant.";
return absl::OkStatus();
}
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.contraction_idx] = true;
(*nodes_to_delete)[matched.pad_idx] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(
RemapperContext* ctx, const ContractionWithBiasAndAddActivation& matched,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction) || IsConv3D(contraction));
const NodeDef& activation = graph->node(matched.activation);
NodeDef fused_conv;
fused_conv.set_name(activation.name());
fused_conv.set_device(contraction.device());
fused_conv.add_input(contraction.input(0));
fused_conv.add_input(contraction.input(1));
const NodeDef& bias_add = graph->node(matched.bias_add);
fused_conv.add_input(bias_add.input(matched.bias_port));
const NodeDef& add = graph->node(matched.add);
fused_conv.add_input(add.input(1 - matched.port_id));
if (IsConv2D(contraction)) {
fused_conv.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv);
} else if (IsConv3D(contraction)) {
fused_conv.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_conv);
}
SetFusedOpAttributes(&fused_conv, {"BiasAdd", "Add", activation.op()}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.activation] = true;
(*nodes_to_delete)[matched.add] = true;
(*nodes_to_delete)[matched.bias_add] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status FuseContractionWithBiasAddAndHardSwish(
RemapperContext* ctx, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices, std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto* contraction_node =
ctx->graph_view.GetNode(matched_nodes_map->at("contraction"))->node();
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map->at("bias_add"))->node();
bool is_conv2d = IsConv2D(*contraction_node);
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op(is_conv2d ? kFusedConv2D : kFusedDepthwiseConv2dNative);
fused_node.set_device(contraction_node->device());
fused_node.add_input(contraction_node->input(0));
fused_node.add_input(contraction_node->input(1));
fused_node.add_input(bias_add_node->input(1));
if (is_conv2d) {
CopyConv2DAttributes(*contraction_node, &fused_node);
} else {
CopyDepthwiseConv2dNativeAttributes(*contraction_node, &fused_node);
}
SetFusedOpAttributes(&fused_node, {"BiasAdd", "_FusedHardSwish"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map->at("output")] = true;
for (const auto& node_idx : *remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status FuseConv2DSwish(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const NodeDef* mul =
ctx->graph_view.GetNode(matched_nodes_map.at("mulToswish"))->node();
const NodeDef* conv2d =
ctx->graph_view.GetNode(matched_nodes_map.at("conv"))->node();
NodeDef fused_op;
fused_op.set_name(mul->name());
fused_op.set_op(kFusedConv2D);
fused_op.set_device(mul->device());
fused_op.add_input(conv2d->input(0));
fused_op.add_input(conv2d->input(1));
if (matched_nodes_map.find("biasadd") != matched_nodes_map.end()) {
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map.at("biasadd"))->node();
fused_op.add_input(bias_add_node->input(1));
SetFusedOpAttributes(&fused_op, {"BiasAdd", "_MklSwish"});
} else {
auto* fusebatchnorm_node =
ctx->graph_view.GetNode(matched_nodes_map.at("fusebatchnorm"))->node();
fused_op.add_input(fusebatchnorm_node->input(1));
fused_op.add_input(fusebatchnorm_node->input(2));
fused_op.add_input(fusebatchnorm_node->input(3));
fused_op.add_input(fusebatchnorm_node->input(4));
float epsilon;
TF_CHECK_OK(GetNodeAttr(*fusebatchnorm_node, "epsilon", &epsilon));
SetFusedOpAttributes(&fused_op, {"FusedBatchNorm", "_MklSwish"},
4, epsilon);
}
AddInputShapesAttr(*ctx, matched_nodes_map.at("conv"));
CopyConv2DAttributes(*conv2d, &fused_op);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("mulToswish")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status AddFusedMatMulBiasAddAndGelu(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete,
bool is_gelu_approximate) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
auto* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map.at("matmul"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_FusedMatMul");
fused_node.set_device(matmul_node->device());
fused_node.add_input(matmul_node->input(0));
fused_node.add_input(matmul_node->input(1));
if (is_gelu_approximate) {
fused_node.add_input(matmul_node->input(2));
} else {
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map.at("bias_add"))->node();
fused_node.add_input(bias_add_node->input(1));
}
CopyMatMulAttributes(*matmul_node, &fused_node);
if (is_gelu_approximate)
SetFusedOpAttributes(&fused_node, {"BiasAdd", "GeluApproximate"});
else
SetFusedOpAttributes(&fused_node, {"BiasAdd", "GeluExact"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status AddMklLayerNorm(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
const std::vector<string>& input_node_names,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete,
const float epsilon) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_MklLayerNorm");
fused_node.set_device(output_node->device());
for (const auto& name : input_node_names) fused_node.add_input(name);
auto* attr = fused_node.mutable_attr();
auto& src_attr = output_node->attr();
(*attr)["T"] = src_attr.at("T");
SetAttrValue(epsilon, &(*attr)["epsilon"]);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status ReplaceMulMaximumWithLeakyRelu(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete,
float alpha) {
const NodeDef* maximum =
ctx->graph_view.GetNode(matched_nodes_map.at("max_to_leakyrelu"))->node();
const NodeDef* input =
ctx->graph_view.GetNode(matched_nodes_map.at("input"))->node();
const auto* alpha_node_view =
ctx->graph_view.GetNode(matched_nodes_map.at("alpha"));
NodeDef fused_op;
fused_op.set_name(maximum->name());
fused_op.set_op("LeakyRelu");
fused_op.set_device(maximum->device());
fused_op.add_input(input->name());
if (alpha_node_view->NumControllingFanins() > 0) {
const auto& control_fanins = alpha_node_view->GetControllingFanins();
for (int i = 0; i < alpha_node_view->NumControllingFanins(); i++) {
const auto* control_node_view = control_fanins[i].node_view();
*fused_op.add_input() =
AsControlDependency(control_node_view->node()->name());
}
}
auto* attr = fused_op.mutable_attr();
(*attr)["T"] = maximum->attr().at("T");
SetAttrValue(alpha, &(*attr)["alpha"]);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("max_to_leakyrelu")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status ReplaceSigmoidMulWithSwish(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
const NodeDef* mul =
ctx->graph_view.GetNode(matched_nodes_map.at("mul_to_swish"))->node();
const NodeDef* sigmoid =
ctx->graph_view.GetNode(matched_nodes_map.at("sigmoid"))->node();
NodeDef fused_op;
fused_op.set_name(mul->name());
fused_op.set_op("_MklSwish");
fused_op.set_device(mul->device());
fused_op.add_input(sigmoid->input(0));
auto* attr = fused_op.mutable_attr();
(*attr)["T"] = mul->attr().at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("mul_to_swish")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status AddFusedBatchNormExNode(RemapperContext* ctx,
const FusedBatchNormEx& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << activation.op()
<< " with FusedBatchNorm:" << " activation=" << activation.name()
<< " side_input="
<< (matched.side_input != kMissingIndex
? graph->node(matched.side_input).name()
: "<none>")
<< " invalidated="
<< (matched.invalidated != kMissingIndex
? graph->node(matched.invalidated).name()
: "<none>")
<< " fused_batch_norm=" << fused_batch_norm.name();
NodeDef fused_op;
fused_op.set_op(kFusedBatchNormEx);
fused_op.set_name(fused_batch_norm.name());
fused_op.set_device(fused_batch_norm.device());
fused_op.add_input(fused_batch_norm.input(0));
fused_op.add_input(fused_batch_norm.input(1));
fused_op.add_input(fused_batch_norm.input(2));
fused_op.add_input(fused_batch_norm.input(3));
fused_op.add_input(fused_batch_norm.input(4));
CopyFusedBatchNormAttributes(fused_batch_norm, &fused_op);
auto* attrs = fused_op.mutable_attr();
SetAttrValue(activation.op(), &(*attrs)["activation_mode"]);
if (matched.side_input != kMissingIndex) {
SetAttrValue(1, &(*attrs)["num_side_inputs"]);
const NodeDef& side_input = graph->node(matched.side_input);
fused_op.add_input(side_input.name());
} else {
SetAttrValue(0, &(*attrs)["num_side_inputs"]);
}
NodeDef identity_op;
identity_op.set_op("Identity");
identity_op.set_name(activation.name());
identity_op.set_device(fused_batch_norm.device());
identity_op.add_input(fused_batch_norm.name());
(*identity_op.mutable_attr())["T"] = attrs->at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
mutation->AddNode(std::move(identity_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm] = true;
(*invalidated_nodes)[matched.activation] = true;
if (matched.side_input != kMissingIndex) {
(*nodes_to_delete)[matched.invalidated] = true;
}
return absl::OkStatus();
}
Status AddFusedBatchNormGradExNode(RemapperContext* ctx,
const FusedBatchNormGradEx& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_batch_norm_grad =
graph->node(matched.fused_batch_norm_grad);
const NodeDef& activation_grad = graph->node(matched.activation_grad);
const NodeDef& fwd_fused_batch_norm =
graph->node(matched.fwd_fused_batch_norm);
VLOG(2) << "Fuse FusedBatchNormGrad with " << activation_grad.op() << ": "
<< " fused_batch_norm_grad=" << fused_batch_norm_grad.name()
<< " side_input="
<< (matched.side_input_grad != kMissingIndex
? graph->node(matched.side_input_grad).name()
: "<none>")
<< " activation=" << activation_grad.name()
<< " corresponding FusedBatchNorm=" << fwd_fused_batch_norm.name();
NodeDef fused_op;
fused_op.set_op(kFusedBatchNormGradEx);
fused_op.set_name(fused_batch_norm_grad.name());
fused_op.set_device(fused_batch_norm_grad.device());
fused_op.add_input(activation_grad.input(0));
fused_op.add_input(fused_batch_norm_grad.input(1));
fused_op.add_input(fused_batch_norm_grad.input(2));
fused_op.add_input(fused_batch_norm_grad.input(3));
fused_op.add_input(fused_batch_norm_grad.input(4));
fused_op.add_input(fused_batch_norm_grad.input(5));
fused_op.add_input(fwd_fused_batch_norm.input(2));
fused_op.add_input(activation_grad.input(1));
CopyFusedBatchNormGradAttributes(fused_batch_norm_grad, &fused_op);
auto* attrs = fused_op.mutable_attr();
SetAttrValue("Relu", &(*attrs)["activation_mode"]);
if (matched.side_input_grad != kMissingIndex) {
SetAttrValue(1, &(*attrs)["num_side_inputs"]);
} else {
SetAttrValue(0, &(*attrs)["num_side_inputs"]);
}
NodeDef identity_op;
identity_op.set_op("Identity");
identity_op.set_name(activation_grad.name());
identity_op.set_device(fused_batch_norm_grad.device());
identity_op.add_input(absl::StrCat(fused_batch_norm_grad.name(), ":5"));
(*identity_op.mutable_attr())["T"] = attrs->at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
if (matched.side_input_grad != kMissingIndex) {
mutation->AddNode(std::move(identity_op), &status);
TF_RETURN_IF_ERROR(status);
}
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm_grad] = true;
if (matched.side_input_grad != kMissingIndex) {
(*invalidated_nodes)[matched.activation_grad] = true;
} else {
(*nodes_to_delete)[matched.activation_grad] = true;
}
return absl::OkStatus();
}
Status AddBatchNormNodes(RemapperContext* ctx, const FusedBatchNorm& matched) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_node = graph->node(matched.fused_batch_norm);
VLOG(2) << "Optimizing fused batch norm node "
<< SummarizeNodeDef(fused_node);
const string& x = fused_node.input(0);
string scale = fused_node.input(1);
string offset = fused_node.input(2);
string mean = fused_node.input(3);
string variance = fused_node.input(4);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
string x_format = fused_node.attr().at(kDataFormat).s();
if (x_format == "NCHW" || x_format == "NCDHW") {
NodeDef new_shape;
const string new_shape_name =
AddPrefixToNodeName(x_format + "Shape", fused_node.name());
new_shape.set_name(new_shape_name);
new_shape.set_op("Const");
new_shape.set_device(fused_node.device());
*new_shape.add_input() = AsControlDependency(scale);
(*new_shape.mutable_attr())["dtype"].set_type(DT_INT32);
if (x_format == "NCHW") {
Tensor t(DT_INT32, {4});
t.flat<int32>()(0) = 1;
t.flat<int32>()(1) = -1;
t.flat<int32>()(2) = 1;
t.flat<int32>()(3) = 1;
t.AsProtoTensorContent(
(*new_shape.mutable_attr())["value"].mutable_tensor());
} else {
Tensor t(DT_INT32, {5});
t.flat<int32>()(0) = 1;
t.flat<int32>()(1) = -1;
t.flat<int32>()(2) = 1;
t.flat<int32>()(3) = 1;
t.flat<int32>()(4) = 1;
t.AsProtoTensorContent(
(*new_shape.mutable_attr())["value"].mutable_tensor());
}
mutation->AddNode(std::move(new_shape), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_scale;
reshaped_scale.set_name(
AddPrefixToNodeName(x_format + "ShapedScale", fused_node.name()));
reshaped_scale.set_op("Reshape");
reshaped_scale.set_device(fused_node.device());
*reshaped_scale.add_input() = scale;
*reshaped_scale.add_input() = new_shape_name;
(*reshaped_scale.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_scale.mutable_attr())["Tshape"].set_type(DT_INT32);
scale = reshaped_scale.name();
mutation->AddNode(std::move(reshaped_scale), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_offset;
reshaped_offset.set_name(
AddPrefixToNodeName(x_format + "ShapedOffset", fused_node.name()));
reshaped_offset.set_op("Reshape");
reshaped_offset.set_device(fused_node.device());
*reshaped_offset.add_input() = offset;
*reshaped_offset.add_input() = new_shape_name;
(*reshaped_offset.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_offset.mutable_attr())["Tshape"].set_type(DT_INT32);
offset = reshaped_offset.name();
mutation->AddNode(std::move(reshaped_offset), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_mean;
reshaped_mean.set_name(
AddPrefixToNodeName(x_format + "ShapedMean", fused_node.name()));
reshaped_mean.set_op("Reshape");
reshaped_mean.set_device(fused_node.device());
*reshaped_mean.add_input() = mean;
*reshaped_mean.add_input() = new_shape_name;
(*reshaped_mean.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_mean.mutable_attr())["Tshape"].set_type(DT_INT32);
mean = reshaped_mean.name();
mutation->AddNode(std::move(reshaped_mean), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_variance;
reshaped_variance.set_name(
AddPrefixToNodeName(x_format + "ShapedVariance", fused_node.name()));
reshaped_variance.set_op("Reshape");
reshaped_variance.set_device(fused_node.device());
*reshaped_variance.add_input() = variance;
*reshaped_variance.add_input() = new_shape_name;
(*reshaped_variance.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_variance.mutable_attr())["Tshape"].set_type(DT_INT32);
variance = reshaped_variance.name();
mutation->AddNode(std::move(reshaped_variance), &status);
TF_RETURN_IF_ERROR(status);
}
float epsilon = 0.0f;
if (fused_node.attr().count("epsilon")) {
epsilon = fused_node.attr().at("epsilon").f();
}
DataType dtype = fused_node.attr().at("T").type();
Tensor value(dtype, TensorShape());
value.scalar<float>()() = epsilon;
NodeDef variance_epsilon;
const string variance_epsilon_name =
AddPrefixToNodeName("Const", fused_node.name());
TF_RETURN_IF_ERROR(ConstantFolding::CreateNodeDef(
variance_epsilon_name, TensorValue(&value), &variance_epsilon));
variance_epsilon.set_device(fused_node.device());
mutation->AddNode(std::move(variance_epsilon), &status);
TF_RETURN_IF_ERROR(status);
NodeDef variance_plus_epsilon;
const string variance_plus_epsilon_name =
AddPrefixToNodeName("VarPlusEpsilon", fused_node.name());
variance_plus_epsilon.set_name(variance_plus_epsilon_name);
variance_plus_epsilon.set_op("Add");
(*variance_plus_epsilon.mutable_attr())["T"].set_type(dtype);
variance_plus_epsilon.set_device(fused_node.device());
*variance_plus_epsilon.add_input() = variance;
*variance_plus_epsilon.add_input() = variance_epsilon_name;
mutation->AddNode(std::move(variance_plus_epsilon), &status);
TF_RETURN_IF_ERROR(status);
NodeDef inv;
const string inv_name = AddPrefixToNodeName("Inv", fused_node.name());
inv.set_name(inv_name);
inv.set_op("Rsqrt");
inv.set_device(fused_node.device());
(*inv.mutable_attr())["T"].set_type(dtype);
*inv.add_input() = variance_plus_epsilon_name;
mutation->AddNode(std::move(inv), &status);
TF_RETURN_IF_ERROR(status);
NodeDef scaled;
const string scaled_name = AddPrefixToNodeName("Scaled", fused_node.name());
scaled.set_name(scaled_name);
scaled.set_op("Mul");
scaled.set_device(fused_node.device());
(*scaled.mutable_attr())["T"].set_type(dtype);
*scaled.add_input() = inv_name;
*scaled.add_input() = scale;
mutation->AddNode(std::move(scaled), &status);
TF_RETURN_IF_ERROR(status);
NodeDef a;
const string a_name = AddPrefixToNodeName("Mul", fused_node.name());
a.set_name(a_name);
a.set_op("Mul");
a.set_device(fused_node.device());
(*a.mutable_attr())["T"].set_type(dtype);
*a.add_input() = x;
*a.add_input() = scaled_name;
mutation->AddNode(std::move(a), &status);
TF_RETURN_IF_ERROR(status);
NodeDef b;
const string b_name = AddPrefixToNodeName("Mul2", fused_node.name());
b.set_name(b_name);
b.set_op("Mul");
b.set_device(fused_node.device());
(*b.mutable_attr())["T"].set_type(dtype);
*b.add_input() = mean;
*b.add_input() = scaled_name;
mutation->AddNode(std::move(b), &status);
TF_RETURN_IF_ERROR(status);
NodeDef c;
const string c_name = AddPrefixToNodeName("Offset", fused_node.name());
c.set_name(c_name);
c.set_op("Sub");
c.set_device(fused_node.device());
(*c.mutable_attr())["T"].set_type(dtype);
*c.add_input() = offset;
*c.add_input() = b_name;
mutation->AddNode(std::move(c), &status);
TF_RETURN_IF_ERROR(status);
NodeDef r;
r.set_name(fused_node.name());
r.set_op("Add");
r.set_device(fused_node.device());
(*r.mutable_attr())["T"].set_type(dtype);
*r.add_input() = a_name;
*r.add_input() = c_name;
mutation->AddNode(std::move(r), &status);
TF_RETURN_IF_ERROR(status);
return mutation->Apply();
}
Status AddTensorToHashBucketNode(RemapperContext* ctx,
const TensorToHashBucket& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& pre_as_string = graph->node(matched.pre_as_string);
const NodeDef& as_string = graph->node(matched.as_string);
const NodeDef& string_to_hash_bucket =
graph->node(matched.string_to_hash_bucket);
VLOG(2) << "Fuse AsString with StringToHashBucketFast:" << " as_string="
<< as_string.name()
<< " string_to_hash_bucket=" << string_to_hash_bucket.name()
<< " on device=" << pre_as_string.device();
NodeDef fused_op;
fused_op.set_name(string_to_hash_bucket.name());
fused_op.set_device(pre_as_string.device());
fused_op.add_input(as_string.input(0));
fused_op.set_op(kTensorToHashBucket);
auto* attr = fused_op.mutable_attr();
auto& src_attr0 = as_string.attr();
auto& src_attr1 = string_to_hash_bucket.attr();
(*attr)["T"] = src_attr0.at("T");
(*attr)["num_buckets"] = src_attr1.at("num_buckets");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.string_to_hash_bucket] = true;
(*nodes_to_delete)[matched.as_string] = true;
return absl::OkStatus();
}
Status AddFusedBatchMatMul(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
const std::vector<string>& input_node_names,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
auto* batch_matmul_node =
ctx->graph_view.GetNode(matched_nodes_map.at("batch_matmul"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_MklFusedBatchMatMulV2");
fused_node.set_device(batch_matmul_node->device());
for (const auto& name : input_node_names) fused_node.add_input(name);
CopyBatchMatMulAttributes(*batch_matmul_node, &fused_node);
SetFusedOpAttributes(&fused_node, {"Mul", "Add"}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
template <typename T, typename U>
std::vector<U> GetTensorValues(const Tensor& tensor) {
std::vector<U> result_vector;
int item_count = tensor.flat<T>().size();
result_vector.reserve(item_count);
for (int i = 0; i < item_count; i++) {
result_vector.push_back((U)(tensor.flat<T>()(i)));
}
return result_vector;
}
Status AddMklFusedInstanceNorm(RemapperContext* ctx,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete,
bool fuse_activation) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto* input_node =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
auto* epsilon_node =
ctx->graph_view.GetNode(matched_nodes_map->at("epsilon"))->node();
auto* mean_axes_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axes_node || mean_axes_node->op() != "Const") {
VLOG(2) << "Mean reduction axes node is not valid, abort fusion";
return absl::OkStatus();
}
DataType dtype;
Tensor mean_axes_tensor;
if (!mean_axes_tensor.FromProto(
mean_axes_node->attr().at("value").tensor())) {
VLOG(2) << "Unable to get mean reduction axes, abort fusion";
return absl::OkStatus();
}
dtype = mean_axes_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) {
VLOG(2) << "Unexpected mean reduction axes data type, abort fusion";
return absl::OkStatus();
}
std::vector<int> reduction_axes =
(dtype == DT_INT32) ? GetTensorValues<int32, int>(mean_axes_tensor)
: GetTensorValues<int64, int>(mean_axes_tensor);
NodeDef* activation_node = nullptr;
if (fuse_activation) {
activation_node =
ctx->graph_view.GetNode(matched_nodes_map->at("activation"))->node();
if (!activation_node) {
VLOG(2) << "Error to retrieve activation node, abort fusion";
return absl::OkStatus();
}
if (!IsLeakyRelu(*activation_node) && !IsRelu(*activation_node)) {
VLOG(2) << "Unsupported activation node, abort fusion";
return absl::OkStatus();
}
}
NodeDef fused_node;
fused_node.set_op("_MklFusedInstanceNorm");
fused_node.set_device(output_node->device());
fused_node.add_input(input_node->name());
fused_node.add_input(gamma_node->name());
fused_node.add_input(beta_node->name());
auto* attr = fused_node.mutable_attr();
auto& src_attr = output_node->attr();
(*attr)["T"] = src_attr.at("T");
Tensor epsilon_tensor;
float epsilon_value = 0.0001;
if (epsilon_node != nullptr && epsilon_node->op() == "Const" &&
epsilon_tensor.FromProto(epsilon_node->attr().at("value").tensor())) {
dtype = epsilon_tensor.dtype();
if (dtype == DT_BFLOAT16) {
epsilon_value = static_cast<float>(epsilon_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
epsilon_value = static_cast<float>(epsilon_tensor.flat<Eigen::half>()(0));
} else if (dtype == DT_FLOAT) {
epsilon_value = epsilon_tensor.flat<float>()(0);
}
SetAttrValue(epsilon_value, &(*attr)["epsilon"]);
}
SetAttrValue(reduction_axes, &(*attr)["reduction_axes"]);
if (fuse_activation) {
fused_node.set_name(activation_node->name());
string activation_op = activation_node->op();
absl::string_view fused_items[] = {activation_op};
SetAttrValue(absl::Span<absl::string_view>(fused_items),
&(*attr)["fused_ops"]);
if (activation_op == "LeakyRelu") {
auto& activation_attr = activation_node->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
} else {
fused_node.set_name(output_node->name());
}
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
if (fuse_activation) {
(*invalidated_nodes)[matched_nodes_map->at("activation")] = true;
} else {
(*invalidated_nodes)[matched_nodes_map->at("output")] = true;
}
for (const auto& node_idx : *remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
bool IsContractionWithAdd(const RemapperContext& ctx, int node_index) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
auto is_supported_add_input = [](const auto* node_view) -> bool {
if (IsConvOrMatMul(*node_view->node())) return true;
if (IsBiasAdd(*node_view->node()) || IsAdd(*node_view->node())) {
if (node_view->NumRegularFanins() < 2) return false;
const auto& bias_add_fanin_0 = node_view->GetRegularFanin(0);
const auto& bias_add_fanin_1 = node_view->GetRegularFanin(1);
return IsConvOrMatMul(*bias_add_fanin_0.node_view()->node()) ||
IsConvOrMatMul(*bias_add_fanin_1.node_view()->node());
}
return false;
};
auto is_supported_add = [&](const auto* node_view) -> bool {
const auto* node_def = node_view->node();
if (IsAdd(*node_def)) {
if (node_view->NumRegularFanins() < 2) return false;
const auto& add_fanin_0 = node_view->GetRegularFanin(0);
const auto& add_fanin_1 = node_view->GetRegularFanin(1);
return is_supported_add_input(add_fanin_0.node_view()) ||
is_supported_add_input(add_fanin_1.node_view());
}
return false;
};
if (is_supported_add(node_view)) {
return true;
}
if (IsSupportedActivation(*node_view->node(), nullptr)) {
for (int i = 0; i < node_view->NumRegularFanins(); i++) {
const auto& fanin_i = node_view->GetRegularFanin(i);
if (is_supported_add(fanin_i.node_view())) return true;
}
}
return false;
}
bool FindSoftplusAndTanhAndMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern softplustanhmul_pattern {
"Mul", "mul_to_mish", NodeStatus::kReplace,
{
{
"Tanh", "tanh", NodeStatus::kRemove,
{
{
"Softplus", "softplus", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain}
}
}
}
},
{"*", "input", NodeStatus::kRemain}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!HasDataType(mul_node_def, DT_FLOAT) &&
!HasDataType(mul_node_def, DT_HALF) &&
!HasDataType(mul_node_def, DT_BFLOAT16))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
softplustanhmul_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
NodeDef* matched_softplus_node =
ctx->graph_view.GetNode(matched_nodes_map->at("softplus"))->node();
auto in_tensor_softplus = matched_softplus_node->input(0);
if ((mul_node_def->input(0) != in_tensor_softplus) &&
(mul_node_def->input(1) != in_tensor_softplus)) {
found_op_type_match = false;
}
}
return found_op_type_match;
}
Status ReplaceSoftplusTanhAndMulWithMish(
RemapperContext* ctx, const std::map<string, int>* matched_nodes_map,
const std::set<int>* remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
auto* old_mul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mul_to_mish"))->node();
auto* softplus_node =
ctx->graph_view.GetNode(matched_nodes_map->at("softplus"))->node();
NodeDef fused_node;
fused_node.set_name(old_mul_node->name());
fused_node.set_op("_MklFusedMish");
fused_node.set_device(old_mul_node->device());
fused_node.add_input(softplus_node->input(0));
auto* fused_node_attr = fused_node.mutable_attr();
(*fused_node_attr)["T"] = old_mul_node->attr().at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map->at("mul_to_mish")] = true;
for (const auto& node_index : *remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
bool RequiresInferredShapes(const RemapperContext& ctx, int node_index,
const Cluster* cluster) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
const auto is_batch_norm_candidate = [&]() -> bool {
if (!IsFusedBatchNorm(*node_def)) return false;
if (GetDataTypeFromAttr(*node_def, "T") != DT_FLOAT) return false;
bool is_training = true;
if (!TryGetNodeAttr(*node_def, kIsTraining, &is_training)) return false;
if (is_training) return false;
return true;
};
const auto is_act_biasadd_conv_candidate = [&]() -> bool {
if (!IsSupportedActivation(*node_def, cluster)) return false;
if (!RuntimeFusionEnabled(cluster) && !IsRelu(*node_def)) return false;
const auto is_compatible_dtype = [&](const NodeDef& node) -> bool {
bool fp16_only =
IsRelu6(*node_def) || IsElu(*node_def) || IsLeakyRelu(*node_def);
DataType dtype = GetDataTypeFromAttr(node, "T");
return dtype == DT_HALF || (!fp16_only && dtype == DT_FLOAT);
};
if (!is_compatible_dtype(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (!IsBiasAdd(*relu_fanin_0_node_def) && !IsAdd(*relu_fanin_0_node_def))
return false;
if (!is_compatible_dtype(*relu_fanin_0_node_def)) return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 1) return false;
const auto& biasadd_fanin_0 = relu_fanin_0_node_view->GetRegularFanin(0);
const auto* biasadd_fanin_0_node_def = biasadd_fanin_0.node_view()->node();
if (!IsConv2D(*biasadd_fanin_0_node_def) &&
!IsConv3D(*biasadd_fanin_0_node_def))
return false;
if (!is_compatible_dtype(*biasadd_fanin_0_node_def)) return false;
return true;
};
const auto is_batch_norm_fusion_candidate = [&]() -> bool {
if (!IsRelu(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (IsFusedBatchNorm(*relu_fanin_0_node_def)) {
return true;
} else if (IsAdd(*relu_fanin_0_node_def)) {
if (relu_fanin_0_node_view->NumRegularFanins() < 2) return false;
const auto& add_regular_fanin_0 =
relu_fanin_0_node_view->GetRegularFanin(0);
if (IsFusedBatchNorm(*add_regular_fanin_0.node_view()->node()))
return true;
const auto& add_regular_fanin_1 =
relu_fanin_0_node_view->GetRegularFanin(1);
if (IsFusedBatchNorm(*add_regular_fanin_1.node_view()->node()))
return true;
}
return false;
};
const auto is_batch_norm_grad_fusion_candidate = [&]() -> bool {
if (!IsFusedBatchNormGrad(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& bn_fanin_0 = node_view->GetRegularFanin(0);
const auto* bn_fanin_0_node_view = bn_fanin_0.node_view();
const auto* bn_fanin_0_node_def = bn_fanin_0_node_view->node();
if (IsReluGrad(*bn_fanin_0_node_def)) {
return true;
}
return false;
};
const auto is_matmul_gelu_exact_fusion_candidate = [&]() -> bool {
if (!RuntimeFusionEnabled(cluster)) return false;
DataType node_dtype = GetDataTypeFromAttr(*node_def, "T");
if (node_dtype != DT_HALF) return false;
return IsMatchedMatMulBiasAddAndGeluExact(const_cast<RemapperContext&>(ctx),
node_index);
};
const auto is_act_biasadd_matmul_candidate = [&]() -> bool {
if (!IsTanh(*node_def) && !IsSigmoid(*node_def)) return false;
if (!RuntimeFusionEnabled(cluster)) return false;
DataType act_dtype = GetDataTypeFromAttr(*node_def, "T");
if (act_dtype != DT_HALF) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (!IsBiasAdd(*relu_fanin_0_node_def) && !IsAdd(*relu_fanin_0_node_def)) {
return false;
}
DataType biasadd_dtype = GetDataTypeFromAttr(*relu_fanin_0_node_def, "T");
if (biasadd_dtype != DT_HALF) return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 1) return false;
const auto& biasadd_fanin_0 = relu_fanin_0_node_view->GetRegularFanin(0);
const auto* biasadd_fanin_0_node_def = biasadd_fanin_0.node_view()->node();
if (!IsMatMul(*biasadd_fanin_0_node_def)) return false;
DataType matmul_dtype = GetDataTypeFromAttr(*biasadd_fanin_0_node_def, "T");
if (matmul_dtype != DT_HALF) return false;
return true;
};
if (IsMKLEnabled())
return is_batch_norm_candidate() || is_batch_norm_fusion_candidate() ||
IsContractionWithAdd(ctx, node_index) ||
is_act_biasadd_conv_candidate() || IsBiasAdd(*node_def) ||
IsTranspose(*node_def);
return is_act_biasadd_conv_candidate() || is_batch_norm_candidate() ||
is_batch_norm_fusion_candidate() ||
is_batch_norm_grad_fusion_candidate() ||
is_matmul_gelu_exact_fusion_candidate() ||
is_act_biasadd_matmul_candidate();
}
inline bool IsXlaCpuGlobalJitOn() {
std::vector<string> tf_xla_flags;
const std::string tf_xla_cpu_global_jit = "--tf_xla_cpu_global_jit";
TF_CHECK_OK(ReadStringsFromEnvVar("TF_XLA_FLAGS", "", &tf_xla_flags));
return std::find(tf_xla_flags.begin(), tf_xla_flags.end(),
tf_xla_cpu_global_jit) != tf_xla_flags.end();
}
}
Status Remapper::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
GrapplerItem mutable_item = item;
Status status;
bool xla_cpu_jit_disable_fusion =
xla_auto_clustering_on_ && IsXlaCpuGlobalJitOn();
#ifdef DNNL_AARCH64_USE_ACL
xla_cpu_jit_disable_fusion = false;
#endif
RemapperContext ctx(&mutable_item, &status, cpu_layout_conversion_,
xla_auto_clustering_on_, xla_cpu_jit_disable_fusion);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(
ctx.graph_view.SortTopologically(false, {}));
const int num_nodes = item.graph.node_size();
std::vector<bool> invalidated_nodes(num_nodes);
std::vector<bool> nodes_to_delete(num_nodes);
bool allow_non_differentiable_rewrites =
item.optimization_options().allow_non_differentiable_rewrites;
for (int i = num_nodes - 1; i >= 0; --i) {
if (invalidated_nodes[i] || nodes_to_delete[i]) {
continue;
}
if (!ctx.inferred_graph_properties &&
RequiresInferredShapes(ctx, i, cluster)) {
const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE;
TF_RETURN_IF_ERROR(ctx.graph_properties.InferStatically(
assume_valid_feeds,
false,
true,
false));
ctx.inferred_graph_properties = true;
}
ContractionWithBiasAddAndAdd contract_with_bias_and_add;
ContractionWithActivation contract_with_activation;
ContractionWithBiasAndAddActivation contract_with_bias_and_add_activation;
if (IsConv2D(ctx.graph_view.graph()->node(i)) ||
IsFusedBatchNorm(ctx.graph_view.graph()->node(i)) ||
IsDepthwiseConv2dNative(ctx.graph_view.graph()->node(i)) ||
IsBiasAdd(ctx.graph_view.graph()->node(i)) ||
IsTranspose(ctx.graph_view.graph()->node(i)) ||
IsSigmoid(ctx.graph_view.graph()->node(i)) ||
IsMatMul(ctx.graph_view.graph()->node(i))) {
AddInputShapesAttr(ctx, i);
}
if (IsMKLEnabled() && !ctx.xla_cpu_jit_disable_fusion) {
const auto* node_view = ctx.graph_view.GetNode(i);
const auto* node_def = node_view->node();
const string& type_attr = "T";
DataType dtype = GetDataTypeFromAttr(*node_def, type_attr);
if ((dtype == DT_BFLOAT16 || dtype == DT_HALF) &&
!IsDataTypeSupportedByOneDNNOnThisCPU(dtype))
continue;
if (FindContractionWithBiasAndAddActivation(
ctx, i, &contract_with_bias_and_add_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_add_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
if (FindFusedConvWithFusedActivation(ctx, i, &contract_with_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
if (FindContractionWithBiasAddAndAdd(ctx, i,
&contract_with_bias_and_add)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_add,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
PadWithConv3D pad_with_conv3d;
if (FindPadWithConv3D(ctx, i, &pad_with_conv3d)) {
TF_RETURN_IF_ERROR(AddFusedConv3DNode(
&ctx, pad_with_conv3d, &invalidated_nodes, &nodes_to_delete));
continue;
}
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
std::vector<string> input_node_names;
if (FindContractionWithBiasAddAndHardSwish(ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(FuseContractionWithBiasAddAndHardSwish(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindSoftplusAndTanhAndMul(&ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(ReplaceSoftplusTanhAndMulWithMish(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
input_node_names.clear();
if (FindFusedBatchMatMul(&ctx, i, &matched_nodes_map,
&remove_node_indices, &input_node_names)) {
TF_RETURN_IF_ERROR(AddFusedBatchMatMul(
&ctx, matched_nodes_map, remove_node_indices, input_node_names,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
std::map<string, int> fusedconv2dSwish_matched_nodes_map;
std::set<int> fusedconv2dSwish_remove_node_indices;
if (FindConv2DSwish(&ctx, i, &fusedconv2dSwish_matched_nodes_map,
&fusedconv2dSwish_remove_node_indices)) {
TF_RETURN_IF_ERROR(
FuseConv2DSwish(&ctx, fusedconv2dSwish_matched_nodes_map,
fusedconv2dSwish_remove_node_indices,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
std::map<string, int> mulmax_matched_nodes_map;
std::set<int> mulmax_remove_node_indices;
float alpha;
if (FindMulAndMaximum(&ctx, i, &mulmax_matched_nodes_map,
&mulmax_remove_node_indices, &alpha)) {
TF_RETURN_IF_ERROR(ReplaceMulMaximumWithLeakyRelu(
&ctx, mulmax_matched_nodes_map, mulmax_remove_node_indices,
&invalidated_nodes, &nodes_to_delete, alpha));
continue;
}
std::map<string, int> sigmoidmul_matched_nodes_map;
std::set<int> sigmoidmul_remove_node_indices;
if (FindSigmoidAndMul(&ctx, i, &sigmoidmul_matched_nodes_map,
&sigmoidmul_remove_node_indices)) {
bool replace = true;
#ifdef DNNL_AARCH64_USE_ACL
const int sigmoid_idx = sigmoidmul_matched_nodes_map.at("sigmoid");
AddInputShapesAttr(ctx, sigmoid_idx);
const NodeDef* sigmoid = ctx.graph_view.GetNode(sigmoid_idx)->node();
const int intra_op_parallelism_threads =
item.optimization_options().intra_op_parallelism_threads;
double total_mflops =
CalculateNodeMFlops(AttrSlice(*sigmoid), "Sigmoid");
double thr =
FindRewriteThreshold("Sigmoid", intra_op_parallelism_threads);
if (total_mflops != -1 && total_mflops < thr) {
replace = false;
}
#endif
if (replace) {
TF_RETURN_IF_ERROR(
ReplaceSigmoidMulWithSwish(&ctx, sigmoidmul_matched_nodes_map,
sigmoidmul_remove_node_indices,
&invalidated_nodes, &nodes_to_delete));
continue;
}
}
matched_nodes_map.clear();
remove_node_indices.clear();
input_node_names.clear();
float epsilon = 0.001;
if (FindMklLayerNorm(&ctx, i, &matched_nodes_map, &remove_node_indices,
&input_node_names, &epsilon)) {
TF_RETURN_IF_ERROR(AddMklLayerNorm(
&ctx, matched_nodes_map, remove_node_indices, input_node_names,
&invalidated_nodes, &nodes_to_delete, epsilon));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindInstanceNormWithActivation(&ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(AddMklFusedInstanceNorm(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete, true));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindInstanceNorm(&ctx, i, &matched_nodes_map, &remove_node_indices)) {
TF_RETURN_IF_ERROR(AddMklFusedInstanceNorm(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete, false));
continue;
}
}
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool is_gelu_approximate = false;
if (FindMatMulBiasAddAndGelu(&ctx, i, cluster, &matched_nodes_map,
&remove_node_indices, &is_gelu_approximate)) {
TF_RETURN_IF_ERROR(AddFusedMatMulBiasAddAndGelu(
&ctx, matched_nodes_map, remove_node_indices, &invalidated_nodes,
&nodes_to_delete, is_gelu_approximate));
continue;
}
ContractionWithBiasAdd contract_with_bias;
if (allow_non_differentiable_rewrites &&
FindContractionWithBias(ctx, i, &contract_with_bias)) {
TF_RETURN_IF_ERROR(AddFusedContractionNode(
&ctx, contract_with_bias, &invalidated_nodes, &nodes_to_delete));
continue;
}
ContractionWithBiasAddAndActivation contract_with_bias_and_activation;
if (allow_non_differentiable_rewrites &&
FindContractionWithBiasAndActivation(
ctx, cluster, i, &contract_with_bias_and_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
ContractionWithSqueezeAndBiasAdd contract_with_squeeze_and_bias;
if (allow_non_differentiable_rewrites &&
FindConvWithSqueezeAndBias(ctx, i, &contract_with_squeeze_and_bias)) {
TF_RETURN_IF_ERROR(AddFusedConvNode(&ctx, contract_with_squeeze_and_bias,
&invalidated_nodes,
&nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
ContractionWithBatchNorm contract_with_batch_norm;
if (allow_non_differentiable_rewrites &&
FindConv2DWithBatchNorm(ctx, i, &contract_with_batch_norm)) {
TF_RETURN_IF_ERROR(AddFusedConv2DNode(&ctx, contract_with_batch_norm,
&invalidated_nodes,
&nodes_to_delete));
continue;
}
ContractionWithBatchNormAndActivation
contract_with_batch_norm_and_activation;
if (allow_non_differentiable_rewrites &&
FindConv2DWithBatchNormAndActivation(
ctx, i, &contract_with_batch_norm_and_activation)) {
TF_RETURN_IF_ERROR(
AddFusedConv2DNode(&ctx, contract_with_batch_norm_and_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
FusedBatchNormEx fused_batch_norm_ex;
if (allow_non_differentiable_rewrites &&
FindFusedBatchNormEx(ctx, i, &fused_batch_norm_ex)) {
TF_RETURN_IF_ERROR(AddFusedBatchNormExNode(
&ctx, fused_batch_norm_ex, &invalidated_nodes, &nodes_to_delete));
continue;
}
FusedBatchNormGradEx fused_batch_norm_grad_ex;
if (allow_non_differentiable_rewrites &&
FindFusedBatchNormGradEx(ctx, i, &fused_batch_norm_grad_ex)) {
TF_RETURN_IF_ERROR(
AddFusedBatchNormGradExNode(&ctx, fused_batch_norm_grad_ex,
&invalidated_nodes, &nodes_to_delete));
continue;
}
TensorToHashBucket tensor_to_hash_bucket;
if (allow_non_differentiable_rewrites &&
FindTensorToHashBucket(ctx, i, &tensor_to_hash_bucket)) {
TF_RETURN_IF_ERROR(AddTensorToHashBucketNode(
&ctx, tensor_to_hash_bucket, &invalidated_nodes, &nodes_to_delete));
continue;
}
FusedBatchNorm fused_batch_norm;
if (FindFusedBatchNorm(ctx, i, &fused_batch_norm)) {
TF_RETURN_IF_ERROR(AddBatchNormNodes(&ctx, fused_batch_norm));
continue;
}
}
utils::Mutation* mutation = ctx.graph_view.GetMutationBuilder();
for (int i = 0; i < num_nodes; ++i) {
if (nodes_to_delete[i]) {
mutation->RemoveNode(ctx.graph_view.GetNode(i));
}
}
TF_RETURN_IF_ERROR(mutation->Apply());
*optimized_graph = std::move(mutable_item.graph);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/remapper.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
class RemapperTest : public GrapplerTest {
protected:
void SetUp() override {
setenv("TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT", "1", 1 );
setenv("TF_USE_CUBLASLT", "1", 1 );
}
};
TEST_F(RemapperTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt = ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f}, {2, 1, 1, 1});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {2, 1, 1, 1});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f}, {1});
Output offset = ops::Const(s.WithOpName("offset"), {0.123f}, {1});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f}, {1});
Output variance = ops::Const(s.WithOpName("variance"), {0.57f}, {1});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm bn(s.WithOpName("batch_norm"), x, scale, offset, mean,
variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FusedBatchNormNCHW) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt =
ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f, 1.0f, 2.0f, 3.0f, 100.0f},
{1, 3, 1, 2});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {1, 3, 1, 2});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f, 7.0f, 123.0f}, {3});
Output offset =
ops::Const(s.WithOpName("offset"), {0.123f, 2.1f, 0.55f}, {3});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f, 8.3f, 3.1f}, {3});
Output variance =
ops::Const(s.WithOpName("variance"), {0.57f, 1.0f, 2.0f}, {3});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
attr = attr.DataFormat("NCHW");
ops::FusedBatchNorm bn(s.WithOpName("batch_norm").WithDevice("/device:GPU:0"),
x, scale, offset, mean, variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-3);
}
}
TEST_F(RemapperTest, FuseBatchNormWithRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 3);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 3);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 3);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
}
}
#endif
TEST_F(RemapperTest, FuseBatchNormWithAddAndRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, side_input_cast);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"side_input", side_input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "side_input_cast");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithAddAndReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input =
ops::FusedBatchNormV3(s.WithOpName("fused_batch_norm_side_input"),
side_input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, fbn_side_input.y);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_side_input_grad"), relu_grad,
side_input_cast, scale, fbn_side_input.reserve_space_1,
fbn_side_input.reserve_space_2, fbn_side_input.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto fetch3 =
ops::Identity(s.WithOpName("fetch3"), fbn_side_input_grad.x_backprop);
auto fetch4 =
ops::Identity(s.WithOpName("fetch4"), fbn_side_input_grad.scale_backprop);
auto fetch5 = ops::Identity(s.WithOpName("fetch5"),
fbn_side_input_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2", "fetch3", "fetch4", "fetch5"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t},
{"side_input", side_input_t},
{"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "fused_batch_norm_side_input");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "relu_grad") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm_grad:5");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 4);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 6);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 6);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
test::ExpectClose(tensors[3], tensors_expected[3], 1e-2, 1e-2);
test::ExpectClose(tensors[4], tensors_expected[4], 1e-2, 1e-2);
test::ExpectClose(tensors[5], tensors_expected[5], 1e-2, 1e-2);
}
}
#endif
class RemapperFuseConvWithBias : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseConvWithBias, Conv2D_F32) { RunTest<2, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv3D_F32) { RunTest<3, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv2DWithBias with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBias, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv3DWithBias with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithBiasAndActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
float leakyrelu_alpha = 0.5;
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv2DWithBiasAndActivation with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv3DWithBiasAndActivation with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithBiasAndAddActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 32, 32, 128});
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 128});
float leakyrelu_alpha = 0.5;
std::vector<int> strides = {1, 1, 1, 1};
if (dim == 3) {
input_shape = Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = Placeholder::Shape({128});
add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
bias_t = GenerateRandomTensor<DT_FLOAT>({128});
add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
}
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add =
Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(fetch,
ops::internal::LeakyRelu(activate, add, attr));
}
return ops::Identity(fetch, bias);
}();
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(fetch,
ops::internal::LeakyRelu(activate, add, attr));
}
return ops::Identity(fetch, bias);
}();
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 3);
EXPECT_EQ("BiasAdd", fused_ops[0]);
EXPECT_EQ("Add", fused_ops[1]);
EXPECT_EQ(activation, fused_ops[2]);
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
}
};
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv2D_BF16) {
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv3D_BF16) {
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithSqueezeAndBias : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 1, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 1, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = ops::Placeholder::Shape({8, 4, 32, 1, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 1, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), conv,
ops::Squeeze::Attrs().Axis({2}));
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), squeeze, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), conv,
ops::Squeeze::Attrs().Axis({3}));
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), squeeze, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "conv") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
} else if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "Squeeze");
ASSERT_GE(node.input_size(), 1);
EXPECT_EQ(node.input(0), "conv");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv2D_FP32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv3D_FP32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConvWithSqueezeAndBias with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConvWithSqueezeAndBias with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
TEST_F(RemapperTest, FusePadPrecededConv2DWithBias) {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 224, 224, 3});
auto filter_shape = ops::Placeholder::Shape({7, 7, 3, 64});
auto paddings_shape = ops::Placeholder::Shape({4, 2});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias_in = Placeholder(s.WithOpName("bias_in"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 2, 2, 1};
auto padding_const =
ops::Const(s.WithOpName("padding"), {0, 0, 3, 3, 3, 3, 0, 0}, {4, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv = ops::Conv2D(s.WithOpName("conv"), pad, filter, strides, "VALID");
auto bias = ops::BiasAdd(s.WithOpName("bias"), conv, bias_in);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias);
auto input_t = GenerateTensorWithSetRandom<DT_FLOAT>({8, 224, 224, 3});
auto filter_t = GenerateTensorWithSetRandom<DT_FLOAT>({7, 7, 3, 64});
auto bias_t = GenerateTensorWithSetRandom<DT_FLOAT>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias_in", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "pad");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.input(2), "bias_in");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
#ifdef INTEL_MKL
TEST_F(RemapperTest, FuseConv3DWithBias) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"),
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}, {6});
auto add = ops::Add(s.WithOpName("b_add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "b_add") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseConv3DWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({1, 1, 1, 1, 6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto a_placeholder =
Placeholder(s.WithOpName("add_placeholder"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 1, 1, 1, 6});
auto add = ops::Add(s.WithOpName("add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 1, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseConv2DWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({1, 1, 6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto a_placeholder =
Placeholder(s.WithOpName("add_placeholder"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 1, 6});
auto add = ops::Add(s.WithOpName("add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseMatmulWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto lhs = Placeholder(s.WithOpName("lhs"), DT_FLOAT, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DT_FLOAT, rhs_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 64});
auto add = ops::Add(s.WithOpName("add"), matmul, add_const);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto lhs_t = GenerateTensorWithSetRandom<DT_FLOAT>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DT_FLOAT>({32, 64});
auto add_t = GenerateTensorWithSetRandom<DT_FLOAT>({1, 64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
class RemapperFuseSoftplusTanhMul : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
auto mul = ops::Mul(s.WithOpName("mul"), tanh, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), mul);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "mul") {
EXPECT_EQ(node.op(), "_MklFusedMish");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "bias_add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16) {
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
} else {
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseSoftplusTanhMul, FP32) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseSoftplusTanhMul, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Test only applicable to oneDNN.";
RunTest<DT_BFLOAT16>();
}
#endif
TEST_F(RemapperTest, FuseMklLayerNorm) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TensorShape input_shape = TensorShape({2, 4});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {2, 4});
auto add = ops::Add(s.WithOpName("b_add"), add_const, input);
auto r_indices = ops::Const(s.WithOpName("r_indices"), {1}, {1});
ops::Mean::Attrs attrs;
attrs = attrs.KeepDims(true);
auto mean = ops::Mean(s.WithOpName("mean"), add, r_indices, attrs);
auto s_diff = ops::SquaredDifference(s.WithOpName("s_diff"), mean, add);
auto variance = ops::Mean(s.WithOpName("variance"), s_diff, r_indices, attrs);
auto e_const = ops::Const(s.WithOpName("e_const"), {0.001f}, {});
auto add_1 = ops::Add(s.WithOpName("add_1"), e_const, variance);
auto rsqrt = ops::Rsqrt(s.WithOpName("rsqrt"), add_1);
auto g_const = ops::Const(s.WithOpName("g_const"), 1.0f, {4});
auto mul = ops::Mul(s.WithOpName("mul"), rsqrt, g_const);
auto mul_1 = ops::Mul(s.WithOpName("mul_1"), mul, add);
auto mul_2 = ops::Mul(s.WithOpName("mul_2"), mul, mean);
auto b_const = ops::Const(s.WithOpName("b_const"), 0.0f, {4});
auto sub = ops::Sub(s.WithOpName("sub"), b_const, mul_2);
auto add_2 = ops::Add(s.WithOpName("add_2"), mul_1, sub);
auto fetch = ops::Identity(s.WithOpName("fetch"), add_2);
auto input_t = GenerateTensorWithSetRandom<DT_FLOAT>({2, 4});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_2") {
EXPECT_EQ(node.op(), "_MklLayerNorm");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "b_add");
EXPECT_EQ(node.input(1), "g_const");
EXPECT_EQ(node.input(2), "b_const");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-4);
}
class FuseMklLayerNormPattern : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TensorShape input_shape = TensorShape({2, 4});
auto input = Placeholder(s.WithOpName("input"), DTYPE,
ops::Placeholder::Shape(input_shape));
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {2, 4});
auto add = ops::Add(s.WithOpName("b_add"), add_const, input);
auto r_indices = ops::Const(s.WithOpName("r_indices"), {1}, {1});
ops::Mean::Attrs attrs;
attrs = attrs.KeepDims(true);
auto mean = ops::Mean(s.WithOpName("mean"), add, r_indices, attrs);
auto sub = ops::Sub(s.WithOpName("sub"), add, mean);
auto s_diff = ops::SquaredDifference(s.WithOpName("s_diff"), mean, add);
auto variance =
ops::Mean(s.WithOpName("variance"), s_diff, r_indices, attrs);
auto e_const = ops::Const(s.WithOpName("e_const"), {0.001f}, {});
auto add_1 = ops::AddV2(s.WithOpName("add_1"), e_const, variance);
auto rsqrt = ops::Rsqrt(s.WithOpName("rsqrt"), add_1);
auto mul = ops::Mul(s.WithOpName("mul"), sub, rsqrt);
auto g_const = ops::Const(s.WithOpName("g_const"), 1.0f, {4});
auto mul_1 = ops::Mul(s.WithOpName("mul_1"), g_const, mul);
auto b_const = ops::Const(s.WithOpName("b_const"), 0.0f, {4});
auto add_2 = ops::AddV2(s.WithOpName("add_2"), mul_1, b_const);
auto fetch = ops::Identity(s.WithOpName("fetch"), add_2);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({2, 4});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_2") {
EXPECT_EQ(node.op(), "_MklLayerNorm");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "b_add");
EXPECT_EQ(node.input(1), "g_const");
EXPECT_EQ(node.input(2), "b_const");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-4);
}
};
TEST_F(FuseMklLayerNormPattern, F32) { RunTest<DT_FLOAT>(); }
class RemapperTensorToHashBucketTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
int num_buckets = 100;
auto to_string = ops::AsString(s.WithOpName("to_string"), input);
auto to_bucket = ops::StringToHashBucketFast(s.WithOpName("to_bucket"),
to_string, num_buckets);
auto fetch = ops::Identity(s.WithOpName("fetch"), to_bucket);
auto input_t = GenerateRandomTensor<DTYPE>({8, 32, 32, 3});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string input_device =
GetNumAvailableGPUs() > 0 ? "/device:GPU:0" : "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
if (item.graph.node(i).name() == "input") {
item.graph.mutable_node(i)->set_device(input_device);
} else {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "to_bucket") {
EXPECT_EQ(node.op(), "_TensorToHashBucketFast");
ASSERT_GE(node.input_size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.attr().at("num_buckets").i(), num_buckets);
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int64_t>(tensors[0], tensors_expected[0]);
}
};
TEST_F(RemapperTensorToHashBucketTest, I8) { RunTest<DT_INT8>(); }
TEST_F(RemapperTensorToHashBucketTest, I16) { RunTest<DT_INT16>(); }
TEST_F(RemapperTensorToHashBucketTest, I32) { RunTest<DT_INT32>(); }
TEST_F(RemapperTensorToHashBucketTest, I64) { RunTest<DT_INT64>(); }
class RemapperFuseMatMulWithBiasTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device =
GetNumAvailableGPUs() > 0 && (DTYPE == DT_HALF || DTYPE == DT_FLOAT)
? "/device:GPU:0"
: "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16 || DTYPE == DT_HALF)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseMatMulWithBiasTest, F16) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA) || !TF_HIPBLASLT
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBias with half, which is only "
"supported in CUDA.";
}
RunTest<DT_HALF>();
}
TEST_F(RemapperFuseMatMulWithBiasTest, F32) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA)
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBias with float, which is only "
"supported in CUDA.";
}
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseMatMulWithBiasTest, Bf16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseMatMulWithBias with bfloat16.";
RunTest<DT_BFLOAT16>();
}
TEST_F(RemapperTest, DISABLED_FuseConv2DWithBiasAndActivationOnGPU) {
#if !(GOOGLE_CUDA)
GTEST_SKIP() << "No CUDA, skip FuseConv2DWithBiasAndActivation on GPU";
#endif
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({3, 3, 3, 128});
auto bias_shape = Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv = ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
return ops::Identity(fetch, ops::Relu(activate, bias_add));
}();
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({3, 3, 3, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Relu");
found++;
}
}
EXPECT_EQ(found, 1);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
class RemapperFuseMatMulWithBiasAndActivationTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
std::vector<string> activations = {"Relu", "Relu6", "Elu", "LeakyRelu"};
#if !defined(GOOGLE_CUDA)
activations.push_back("Tanh");
#endif
for (const string& activation : activations) {
if (DTYPE == DT_HALF && activation != "Relu") continue;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
#if !defined(GOOGLE_CUDA)
} else if (activation == "Tanh") {
return ops::Identity(fetch, ops::Tanh(activate, bias_add));
#endif
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device = GetNumAvailableGPUs() > 0 &&
(DTYPE == DT_HALF || DTYPE == DT_FLOAT) &&
activation == "Relu"
? "/device:GPU:0"
: "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16 || DTYPE == DT_HALF)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, F16) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA) || !TF_HIPBLASLT
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBiasAndActivationTest with half, "
"which is only supported in CUDA.";
}
RunTest<DT_HALF>();
}
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, F32) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA)
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBiasAndActivationTest with float, "
"which is only supported in CUDA.";
}
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, Bf16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseMatMulWithBiasAndActivation with bfloat16.";
RunTest<DT_BFLOAT16>();
}
TEST_F(RemapperTest, FuseConv2DWithBatchNorm) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
using ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto scale_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT, scale_shape);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT, scale_shape);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT, scale_shape);
auto variance = Placeholder(s.WithOpName("variance"), DT_FLOAT, scale_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv = ops::Conv2D(
s.WithOpName("conv"), input, filter, strides, "EXPLICIT",
ops::Conv2D::Attrs().ExplicitPaddings({0, 0, 1, 2, 3, 4, 0, 0}));
ops::FusedBatchNorm::Attrs attrs;
attrs = attrs.IsTraining(false);
auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv, scale,
offset, mean, variance, attrs);
auto fetch = ops::Identity(s.WithOpName("fetch"), batch_norm.y);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto scale_t = GenerateRandomTensor<DT_FLOAT>({128});
auto offset_t = GenerateRandomTensor<DT_FLOAT>({128});
auto mean_t = GenerateRandomTensor<DT_FLOAT>({128});
auto variance_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t},
{"scale", scale_t}, {"offset", offset_t},
{"mean", mean_t}, {"variance", variance_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "batch_norm") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 4);
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "offset");
EXPECT_EQ(node.input(4), "mean");
EXPECT_EQ(node.input(5), "variance");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "FusedBatchNorm");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6, 1e-4);
}
TEST_F(RemapperTest, FuseConv2DWithBatchNormAndActivation) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
using ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto scale_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT, scale_shape);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT, scale_shape);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT, scale_shape);
auto variance =
Placeholder(s.WithOpName("variance"), DT_FLOAT, scale_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
ops::FusedBatchNorm::Attrs attrs;
attrs = attrs.IsTraining(false);
auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv,
scale, offset, mean, variance, attrs);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, batch_norm.y));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, batch_norm.y));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, batch_norm.y));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, batch_norm.y, attr));
}
return ops::Identity(fetch, batch_norm.y);
}();
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto scale_t = GenerateRandomTensor<DT_FLOAT>({128});
auto offset_t = GenerateRandomTensor<DT_FLOAT>({128});
auto mean_t = GenerateRandomTensor<DT_FLOAT>({128});
auto variance_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t},
{"scale", scale_t}, {"offset", offset_t},
{"mean", mean_t}, {"variance", variance_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 4);
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "offset");
EXPECT_EQ(node.input(4), "mean");
EXPECT_EQ(node.input(5), "variance");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "FusedBatchNorm");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6, 1e-4);
}
}
#ifdef INTEL_MKL
TEST_F(RemapperTest, FuseConv3DWithBiasAndAddN) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add = Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv = ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::AddN(s.WithOpName("add_op"),
std::initializer_list<Input>{input_add, bias_add});
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_op") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
TEST_F(RemapperTest, FuseConv3DWithBiasAndAdd) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add = Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv = ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_op") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
TEST_F(RemapperTest, FuseConv2DWithSemanticAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 6});
auto filter_shape_1 = ops::Placeholder::Shape({1, 1, 6, 6});
auto semanticadd_shape = ops::Placeholder::Shape({6});
auto bias_shape = ops::Placeholder::Shape({6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto filter_1 =
Placeholder(s.WithOpName("filter_1"), DT_FLOAT, filter_shape_1);
auto semanticadd =
Placeholder(s.WithOpName("semanticadd"), DT_FLOAT, semanticadd_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add = ops::Add(s.WithOpName("add"), semanticadd, conv);
auto conv_1 =
ops::Conv2D(s.WithOpName("conv_1"), add, filter_1, strides, "VALID");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv_1, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto input_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(input_shape.shape_.dim_sizes()));
auto filter_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(filter_shape.shape_.dim_sizes()));
auto filter_tensor_1 = GenerateRandomTensor<DT_FLOAT>(
TensorShape(filter_shape_1.shape_.dim_sizes()));
auto semanticadd_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(semanticadd_shape.shape_.dim_sizes()));
auto bias_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(bias_shape.shape_.dim_sizes()));
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_tensor},
{"filter", filter_tensor},
{"filter_1", filter_tensor_1},
{"semanticadd", semanticadd_tensor},
{"bias", bias_tensor}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "add");
EXPECT_EQ(node.input(1), "filter_1");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "semanticadd");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
class RemapperFusePadConv3D : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto paddings_shape = ops::Placeholder::Shape({5, 2});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto padding_const = ops::Const(s.WithOpName("padding"),
{0, 0, 1, 1, 1, 1, 1, 1, 0, 0}, {5, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv =
ops::Conv3D(s.WithOpName("conv"), pad, filter, strides, "VALID");
auto fetch = ops::Identity(s.WithOpName("fetch"), conv);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "conv") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 2);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFusePadConv3D, Conv3D_FP32) {
if (!IsMKLEnabled())
GTEST_SKIP()
<< "Pad fusion with Conv3D is only enabled with oneDNN, skipping "
"RemapperFusePadConv3D with FP32.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFusePadConv3D, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFusePadConv3D with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class RemapperFusePadWithFusedConv3D : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"", "Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto paddings_shape = ops::Placeholder::Shape({5, 2});
auto strides = {1, 1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto padding_const = ops::Const(s.WithOpName("padding"),
{0, 0, 1, 1, 1, 1, 1, 1, 0, 0}, {5, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv =
ops::Conv3D(s.WithOpName("conv"), pad, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output_1;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output_1));
item.graph = std::move(output_1);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
string fused_node_name;
std::vector<string> expected_fused_ops = {"BiasAdd"};
if (activation.empty()) {
fused_node_name = "bias_add";
} else {
fused_node_name = "activation";
expected_fused_ops.push_back(activation);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == fused_node_name) {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), expected_fused_ops.size());
for (int i = 0; i < fused_ops.size(); ++i) {
EXPECT_EQ(fused_ops[i], expected_fused_ops[i]);
}
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFusePadWithFusedConv3D, FusedConv3D_FP32) {
if (!IsMKLEnabled())
GTEST_SKIP()
<< "Pad fusion with FusedConv3D is only enabled with oneDNN, skipping "
"RemapperFusePadWithFusedConv3D with FP32.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFusePadWithFusedConv3D, FusedConv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFusePadWithFusedConv3D with bfloat16.";
RunTest<DT_BFLOAT16>();
}
#endif
class RemapperLeakyReluTest : public GrapplerTest {
protected:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto max_shape = ops::Placeholder::Shape({64, 64});
auto input = Placeholder(s.WithOpName("input"), DTYPE, max_shape);
float epsilon = 0.3f;
typedef typename EnumToDataType<DTYPE>::Type CType;
auto leakyrelu_alpha = ops::Const<CType>(s.WithOpName("alpha"), epsilon);
auto mul = ops::Mul(s.WithOpName("Mul"), input, leakyrelu_alpha);
auto max = ops::Maximum(s.WithOpName("Maximum"), mul, input);
auto fetch = ops::Identity(s.WithOpName("fetch"), max);
auto max_t = GenerateTensorWithSetRandom<DTYPE>({64, 64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", max_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "Maximum") {
EXPECT_EQ(node.op(), "LeakyRelu");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "input");
++found;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
float atol = 1e-6, rtol = 1e-6;
if (DTYPE == DT_BFLOAT16) {
atol = 1e-2;
rtol = 1e-2;
}
test::ExpectClose(tensors[0], tensors_expected[0], atol, rtol);
}
};
TEST_F(RemapperLeakyReluTest, F32) { RunTest<DT_FLOAT>(); }
TEST_F(RemapperLeakyReluTest, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperLeakyRelu with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class RemapperFuseFusedConvWithFusedActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"LeakyRelu", "Mish"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
float leakyrelu_alpha = 0.5;
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
if (activation == "LeakyRelu") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}();
} else if (activation == "Mish") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
return ops::Identity(fetch, ops::Mul(activate, bias_add, tanh));
}();
}
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
if (activation == "LeakyRelu") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}();
} else if (activation == "Mish") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
return ops::Identity(fetch, ops::Mul(activate, bias_add, tanh));
}();
}
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output_1;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output_1));
item.graph = std::move(output_1);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFuseFusedConvWithFusedActivation with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFuseFusedConvWithFusedActivation with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperControlDependencyPatternMatcher : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input0_shape = ops::Placeholder::Shape({1});
auto input0 = Placeholder(s.WithOpName("input_0"), DTYPE, input0_shape);
auto input0_t = GenerateTensorWithSetRandom<DTYPE>({1});
auto input1_shape = ops::Placeholder::Shape({1});
auto input1 = Placeholder(s.WithOpName("input_1"), DTYPE, input1_shape);
auto input1_t = GenerateTensorWithSetRandom<DTYPE>({1});
auto add0 = ops::Add(s.WithOpName("add_0"), input0, input1);
auto add1 = ops::Add(s.WithOpName("add_1"), input0, input1);
float leakyrelu_alpha = 0.18;
typedef typename EnumToDataType<DTYPE>::Type CType;
auto const1 = ops::Const<CType>(
s.WithOpName("alpha").WithControlDependencies(
std::vector<Operation>{add0.operation, add1.operation}),
leakyrelu_alpha);
auto sub = ops::Subtract(s.WithOpName("sub_0"), input0, input1);
auto mul = ops::Mul(s.WithOpName("mul_0"), const1, sub);
auto max = ops::Maximum(s.WithOpName("max_0"), mul, sub);
auto softplus = ops::Softplus(s.WithOpName("softplus"), max);
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input_0", input0_t}, {"input_1", input1_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
Status status;
utils::MutableGraphView graph_view(&output, &status);
const int num_nodes = output.node_size();
int found = 0;
for (int i = 0; i < num_nodes; i++) {
auto* node = graph_view.GetNode(i)->node();
if (node->name() == "max_0") {
EXPECT_EQ(node->op(), "LeakyRelu");
EXPECT_EQ(node->attr().at("alpha").f(), leakyrelu_alpha);
ASSERT_EQ(node->input_size(), 3);
EXPECT_EQ(node->input(0), "sub_0");
auto* node_view = graph_view.GetNode(i);
EXPECT_EQ(node_view->NumControllingFanins(), 2);
if (node->input(1).compare("^add_0")) {
if (node->input(2).compare("^add_1")) found++;
} else if (node->input(1).compare("^add_1")) {
if (node->input(2).compare("^add_0")) found++;
}
}
}
EXPECT_EQ(found, 1);
}
};
TEST_F(RemapperControlDependencyPatternMatcher, F32) { RunTest<DT_FLOAT>(); }
TEST_F(RemapperControlDependencyPatternMatcher, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperControlDependencyPatternMatcher with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class XlaCpuJitDisableFusionTest : public RemapperTest {
protected:
void SetUp() override {
setenv("TF_XLA_FLAGS", "--tf_xla_cpu_global_jit", 1);
}
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device = "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON, RewriterConfig::NO_CONVERSION_ON_CPU,
true);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "BiasAdd");
found++;
} else if (node.name() == "matmul") {
EXPECT_EQ(node.op(), "MatMul");
found++;
}
}
EXPECT_EQ(2, found);
}
};
#if !(DNNL_AARCH64_USE_ACL || GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
TEST_F(XlaCpuJitDisableFusionTest, MatMulWithBias) { RunTest<DT_FLOAT>(); }
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/remapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/remapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b935a03c-90c5-43fc-8348-cf2a6b8dcea3 | cpp | tensorflow/tensorflow | generic_layout_optimizer | tensorflow/core/grappler/optimizers/generic_layout_optimizer.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include <utility>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kNHWC[] = "NHWC";
constexpr char kNCHW[] = "NCHW";
constexpr float kGPURatioThreshold = 0.5;
constexpr float kConvGPUExpectedDtypeThreshold = 0.5;
struct MutableNodeViewFormatter {
void operator()(std::string* out, utils::MutableNodeView* node_view) const {
absl::StrAppend(out, node_view->node()->name());
}
};
struct GpuStats {
int num_gpus;
int num_voltas;
int num_amperes;
};
inline GpuStats GetNumGPUs(const Cluster& cluster) {
auto devices = cluster.GetDevices();
GpuStats gpu_stats{};
for (const auto& device : devices) {
if (device.second.type() != kGPU) {
continue;
}
gpu_stats.num_gpus++;
auto compute_capability_it =
device.second.environment().find("architecture");
if (compute_capability_it == device.second.environment().end()) {
continue;
}
double compute_capability = 0.0;
if (absl::SimpleAtod(compute_capability_it->second, &compute_capability)) {
if (compute_capability >= 7.0) gpu_stats.num_voltas++;
if (compute_capability >= 8.0) gpu_stats.num_amperes++;
}
}
return gpu_stats;
}
inline bool ConvBackpropExists(const TransposeContext& context,
absl::string_view device,
const DataType& data_type) {
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2DBackpropFilter(*node_def) &&
!IsConv2DBackpropInput(*node_def) &&
!IsConv3DBackpropFilterV2(*node_def) &&
!IsConv3DBackpropInputV2(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(device))) {
continue;
}
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
if (t_attr->type() == data_type) {
return true;
}
}
return false;
}
inline std::pair<string, string> GetSrcAndDstDataFormats(
const TransposeContext& context, GpuStats gpu_stats) {
string src_format = kNHWC;
string dst_format = kNCHW;
const bool is_NHWC_enforced =
(!context.enforced_layout.empty() && context.enforced_layout == "NHWC");
const bool volta_ready =
(static_cast<float>(gpu_stats.num_voltas) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
const bool ampere_ready =
(static_cast<float>(gpu_stats.num_amperes) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
int num_conv_gpu = 0;
int num_conv_gpu_prefer_swap = 0;
bool fp32_backprop = ConvBackpropExists(context, kGPU, DT_FLOAT);
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2D(*node_def) && !IsConv3D(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(kGPU))) {
continue;
}
num_conv_gpu++;
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
const DataType dtype = t_attr->type();
if ((volta_ready && dtype == DT_HALF) ||
(ampere_ready && dtype == DT_BFLOAT16) ||
(ampere_ready && dtype == DT_FLOAT &&
tsl::tensor_float_32_execution_enabled() && !fp32_backprop)) {
num_conv_gpu_prefer_swap++;
}
}
const bool should_swap =
num_conv_gpu > 0 &&
(static_cast<float>(num_conv_gpu_prefer_swap) /
static_cast<float>(num_conv_gpu)) >= kConvGPUExpectedDtypeThreshold;
if (is_NHWC_enforced || (context.enforced_layout.empty() && should_swap)) {
std::swap(src_format, dst_format);
}
VLOG(2) << "Layout conversion of " << src_format << " to " << dst_format
<< " will take place.";
return {src_format, dst_format};
}
Status ExpandLayoutSensitiveOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutSensitiveOp(*node_def)) {
std::shared_ptr<Transposer> transposer =
transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout sensitive operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
Status ExpandLayoutAgnosticOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutAgnosticOp(*node_def)) {
const auto& transposer = transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout agnostic operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
inline bool IsCancellableConstPermTransposeNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
Tensor fanout_tensor;
if (!GetValueAttrFromConstInputNode(fanout_transpose, IsTranspose, 1,
&fanout_tensor)) {
return false;
}
Tensor fanin_tensor;
if (!GetValueAttrFromConstInputNode(fanin_transpose, IsTranspose, 1,
&fanin_tensor)) {
return false;
}
if (fanout_tensor.NumElements() != fanin_tensor.NumElements()) {
return false;
}
const auto& fanout_tensor_data = fanout_tensor.unaligned_flat<int32>();
const auto& fanin_tensor_data = fanin_tensor.unaligned_flat<int32>();
const int num_elements = fanout_tensor.NumElements();
for (int i = 0; i < num_elements; ++i) {
if (fanout_tensor_data(fanin_tensor_data(i)) != i) {
return false;
}
}
return true;
}
inline bool IsCancellableDataFormatNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
if (!IsDataFormatOp(fanout_transpose) || !IsDataFormatOp(fanin_transpose)) {
return false;
}
auto src_dst_match = [](const utils::MutableNodeView& src,
const utils::MutableNodeView& dst) {
const auto* src_format = src.GetAttr(kAttrSrcFormat);
if (src_format == nullptr) {
return false;
}
const auto* dst_format = dst.GetAttr(kAttrDstFormat);
if (dst_format == nullptr) {
return false;
}
return src_format->s() == dst_format->s();
};
return src_dst_match(fanin_transpose, fanout_transpose) &&
src_dst_match(fanout_transpose, fanin_transpose);
}
inline bool IsCancellableNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
return IsCancellableConstPermTransposeNodePair(fanout_transpose,
fanin_transpose) ||
IsCancellableDataFormatNodePair(fanout_transpose, fanin_transpose);
}
Status EraseCancellableNodes(TransposeContext* context) {
const int original_num_nodes = context->num_nodes;
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = original_num_nodes; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (node->NumRegularFanins() < 1) {
continue;
}
const auto& regular_fanin_0 = node->GetRegularFanin(0);
auto* fanin_node = regular_fanin_0.node_view();
if (fanin_node->node_index() < original_num_nodes) {
continue;
}
if (!IsCancellableNodePair(*node, *fanin_node)) {
continue;
}
const auto& fanin_to_forward = fanin_node->GetRegularFanin(0);
TensorId fanin_id_to_forward(fanin_to_forward.node_view()->GetName(),
fanin_to_forward.index());
for (const auto& regular_fanout : node->GetRegularFanout(0)) {
mutation->AddOrUpdateRegularFanin(regular_fanout.node_view(),
regular_fanout.index(),
fanin_id_to_forward);
}
mutation->RemoveNode(node);
if (node->NumRegularFanins() > 1) {
mutation->RemoveNode(node->GetRegularFanin(1).node_view());
}
mutation->RemoveNode(fanin_node);
if (fanin_node->NumRegularFanins() > 1) {
mutation->RemoveNode(fanin_node->GetRegularFanin(1).node_view());
}
}
return mutation->Apply();
}
Status EraseCancellableNodesAroundPad(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
absl::flat_hash_set<utils::MutableNodeView*> cancelled_transposes;
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* transpose_after = graph_view->GetNode(i);
if (!IsTranspose(*transpose_after->node())) continue;
if (cancelled_transposes.contains(transpose_after)) continue;
const auto& transpose_after_fanin = transpose_after->GetRegularFanin(0);
auto* pad = transpose_after_fanin.node_view();
if (!IsPad(*pad->node())) continue;
const auto& pad_fanin_0 = pad->GetRegularFanin(0);
auto* transpose_before = pad_fanin_0.node_view();
if (!IsTranspose(*transpose_before->node())) continue;
if (transpose_before->NumRegularFanouts() != 1) continue;
if (!IsCancellableConstPermTransposeNodePair(*transpose_after,
*transpose_before))
continue;
Tensor paddings_t;
if (!GetValueAttrFromConstInputNode(*pad, IsPad, 1, &paddings_t)) continue;
const auto& pad_fanin_1 = pad->GetRegularFanin(1);
auto* paddings = pad_fanin_1.node_view();
if (paddings->NumRegularFanouts() != 1) continue;
Tensor permute_t;
if (!GetValueAttrFromConstInputNode(*transpose_after, IsTranspose, 1,
&permute_t))
continue;
std::vector<utils::MutableNodeView*> pad_fanout_transposes;
pad_fanout_transposes.emplace_back(transpose_after);
bool pad_has_unsupported_fanout = false;
for (auto& fanout : pad->GetRegularFanout(0)) {
auto* extra_transpose = fanout.node_view();
if (extra_transpose == transpose_after) continue;
Tensor extra_permute_t;
if (!GetValueAttrFromConstInputNode(*extra_transpose, IsTranspose, 1,
&extra_permute_t) ||
extra_permute_t.tensor_data() != permute_t.tensor_data()) {
pad_has_unsupported_fanout = true;
break;
}
pad_fanout_transposes.emplace_back(extra_transpose);
}
if (pad_has_unsupported_fanout) continue;
VLOG(0) << "Cancel Transpose nodes around Pad:"
<< " transpose_before=" << transpose_before->node()->name()
<< " pad=" << pad->node()->name() << " transpose_after="
<< absl::StrJoin(pad_fanout_transposes, ",",
MutableNodeViewFormatter());
auto permutation_s = absl::Span<int32>(permute_t.flat<int32>().data(),
permute_t.NumElements());
auto paddings_s = absl::Span<int32>(paddings_t.flat<int32>().data(),
paddings_t.NumElements());
TF_RETURN_IF_ERROR(
PermuteDouble(absl::StrCat("paddings in ", pad->GetName()),
permutation_s, &paddings_s));
AttrValue permuted_paddings_tensor;
paddings_t.AsProtoTensorContent(permuted_paddings_tensor.mutable_tensor());
mutation->AddOrUpdateNodeAttr(paddings, "value", permuted_paddings_tensor);
const auto transpose_to_identity =
[&cancelled_transposes,
&mutation](utils::MutableNodeView* transpose) -> void {
mutation->UpdateNodeOp(transpose, "Identity");
mutation->RemoveNodeAttr(transpose, "Tperm");
mutation->RemoveRegularFanin(transpose, 1);
cancelled_transposes.insert(transpose);
};
transpose_to_identity(transpose_before);
absl::c_for_each(pad_fanout_transposes, transpose_to_identity);
}
return mutation->Apply();
}
Status EraseOutputShapeAttrs(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (IsArg(*node->node())) {
continue;
}
mutation->RemoveNodeAttr(node, kAttrOutputShape);
TF_RETURN_IF_ERROR(mutation->Apply());
}
return absl::OkStatus();
}
}
Status GenericLayoutOptimizer::Optimize(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output) {
if (cluster == nullptr) {
LOG(WARNING)
<< "generic layout optimizer was called with cluster == nullptr";
return errors::Aborted("cluster == nullptr.");
}
if (!enforced_layout_.empty() && enforced_layout_ != "NHWC" &&
enforced_layout_ != "NCHW") {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Invalid value for enforced_layout: ", enforced_layout_,
". Supported layouts: 'NHWC', 'NCHW'."));
}
const auto gpu_stats = GetNumGPUs(*cluster);
const bool is_aggressive = opt_level_ == RewriterConfig::AGGRESSIVE;
TransposeContext context;
context.enforced_layout = enforced_layout_;
if (gpu_stats.num_gpus > 0) {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
const auto src_dst_formats = GetSrcAndDstDataFormats(context, gpu_stats);
context.AssignDeviceAndDataFormats(kGPU, src_dst_formats.first,
src_dst_formats.second);
} else {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
switch (cpu_layout_conversion_) {
case RewriterConfig::NCHW_TO_NHWC:
context.AssignDeviceAndDataFormats(kCPU, kNCHW, kNHWC);
break;
case RewriterConfig::NHWC_TO_NCHW:
return errors::Aborted(
"Conversion from NHWC to NCHW is currently not available for "
"CPU.");
default:
*output = item.graph;
VLOG(2) << "No layout conversion will take place for CPU.";
return absl::OkStatus();
}
}
TransposerFactory transposer_factory;
TF_RETURN_IF_ERROR(ExpandLayoutSensitiveOp(&context, &transposer_factory));
if (context.graph.node_size() > context.num_nodes || is_aggressive) {
TF_RETURN_IF_ERROR(ExpandLayoutAgnosticOp(&context, &transposer_factory));
TF_RETURN_IF_ERROR(EraseCancellableNodes(&context));
TF_RETURN_IF_ERROR(EraseCancellableNodesAroundPad(&context));
TF_RETURN_IF_ERROR(
context.graph_view->SortTopologically(false, {}));
}
TF_RETURN_IF_ERROR(EraseOutputShapeAttrs(&context));
*output = context.graph;
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
using ::tensorflow::Scope;
using ::tensorflow::ops::Conv2D;
using ::tensorflow::ops::Conv3D;
using ::tensorflow::ops::Identity;
using ::tensorflow::ops::RandomUniform;
constexpr int kBatchSize = 32;
constexpr int kWidth = 10;
constexpr int kHeight = 10;
constexpr int kDepthIn = 8;
constexpr int kKernel = 3;
constexpr int kDepthOut = 16;
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
#define DIMS(n, h, w, c) \
{ n, h, w, c }
#define SRC_DATA_FORMAT "NHWC"
#define DST_DATA_FORMAT "NCHW"
#define DEVICE "GPU"
#define REWRITER_CONFIG \
RewriterConfig::DEFAULT, RewriterConfig::NO_CONVERSION_ON_CPU
#define PERMUTATION_SRC_TO_DST \
{ 0, 3, 1, 2 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 2, 3, 1 }
#define DIMS_5D(n, d, h, w, c) \
{ n, d, h, w, c }
#define SRC_DATA_FORMAT_5D "NDHWC"
#define DST_DATA_FORMAT_5D "NCDHW"
#else
#define DIMS(n, h, w, c) \
{ n, c, h, w }
#define SRC_DATA_FORMAT "NCHW"
#define DST_DATA_FORMAT "NHWC"
#define DEVICE "CPU"
#define REWRITER_CONFIG RewriterConfig::DEFAULT, RewriterConfig::NCHW_TO_NHWC
#define PERMUTATION_SRC_TO_DST \
{ 0, 2, 3, 1 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 3, 1, 2 }
#define DIMS_5D(n, d, h, w, c) \
{ n, c, d, h, w }
#define SRC_DATA_FORMAT_5D "NCDHW"
#define DST_DATA_FORMAT_5D "NDHWC"
#endif
template <typename T = float>
Output SimpleConv2D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_shape(
DIMS(batch_size, input_height, input_width, input_depth));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv = ops::Conv2D(s->WithOpName("Conv2D").WithDevice(device), input,
filter, DIMS(1, stride, stride, 1), padding,
ops::Conv2D::Attrs().DataFormat(SRC_DATA_FORMAT));
return conv;
}
Output SimpleConv2DBackpropInput(tensorflow::Scope* s, int input_size,
int filter_size, const string& padding,
bool dilated, const int input_sizes_length) {
int batch_size = 128;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_sizes_shape({input_sizes_length});
Tensor input_data(DT_INT32, input_sizes_shape);
if (input_sizes_length == 4) {
test::FillValues<int>(
&input_data, DIMS(batch_size, input_height, input_width, input_depth));
} else {
test::FillValues<int>(&input_data, {input_height, input_width});
}
Output input_sizes =
ops::Const(s->WithOpName("InputSizes"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Output filter =
ops::Variable(s->WithOpName("Filter"), filter_shape, DT_FLOAT);
int output_height = input_height;
int output_width = input_width;
TensorShape output_shape(
DIMS(batch_size, output_height, output_width, filter_count));
Tensor output_data(DT_FLOAT, output_shape);
test::FillIota<float>(&output_data, 1.0f);
Output output =
ops::Const(s->WithOpName("Output"), Input::Initializer(output_data));
Output conv_backprop_input;
Output input_sizes_i =
ops::Identity(s->WithOpName("InputSizesIdentity"), input_sizes);
ops::Conv2DBackpropInput::Attrs attrs;
attrs = attrs.DataFormat(SRC_DATA_FORMAT);
if (dilated) {
attrs = attrs.Dilations(DIMS(1, 2, 2, 1));
}
conv_backprop_input = ops::Conv2DBackpropInput(
s->WithOpName("Conv2DBackpropInput"), input_sizes_i, filter, output,
DIMS(1, stride, stride, 1), padding, attrs);
return conv_backprop_input;
}
template <typename T = float>
Output SimpleConv3D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 4;
int input_channel = 3;
int filter_count = 6;
int stride = 1;
TensorShape input_shape(DIMS_5D(batch_size, input_depth, input_height,
input_width, input_channel));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, filter_size, input_channel, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv =
ops::Conv3D(s->WithOpName("Conv3D").WithDevice(device), input, filter,
DIMS_5D(1, stride, stride, stride, 1), padding,
ops::Conv3D::Attrs().DataFormat(SRC_DATA_FORMAT_5D));
return conv;
}
class GenericLayoutOptimizerTest : public GrapplerTest {
protected:
void SetUp() override {
bool gpu_available = GetNumAvailableGPUs() > 0;
if (gpu_available) {
virtual_cluster_ =
std::make_unique<SingleMachine>(10, 1, 1);
} else {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.mutable_environment()->insert({"architecture", "6"});
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device},
{ "/GPU:1",
gpu_device }}));
#else
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device}}));
#endif
}
TF_ASSERT_OK(virtual_cluster_->Provision());
}
void TearDown() override {
TF_ASSERT_OK(virtual_cluster_->Shutdown());
tsl::enable_tensor_float_32_execution(true);
}
std::unique_ptr<Cluster> virtual_cluster_;
};
void VerifyRegularFaninMatch(const utils::NodeView* node, int port,
absl::string_view fanin_name, int fanin_port) {
ASSERT_GE(node->NumRegularFanins(), port);
const auto& fanin = node->GetRegularFanin(port);
EXPECT_EQ(fanin.node_view()->GetName(), fanin_name);
EXPECT_EQ(fanin.index(), fanin_port);
}
void VerifyRegularFanoutMatch(const utils::NodeView* node, int port,
absl::string_view fanout_name, int fanout_port) {
bool found = false;
for (const auto& regular_fanout : node->GetRegularFanout(port)) {
if (regular_fanout.node_view()->GetName() == fanout_name &&
regular_fanout.index() == fanout_port) {
found = true;
}
}
EXPECT_TRUE(found);
}
void VerifyDataFormatAttributeMatch(const utils::NodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr("data_format");
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->s(), attr_value);
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv2DGraph) {
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope, 4, 2, "VALID", "");
auto identity = Identity(scope.WithOpName("Output"), conv2d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_node, 1, "Filter", 0);
VerifyDataFormatAttributeMatch(conv2d_node, SRC_DATA_FORMAT);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
}
TEST_F(GenericLayoutOptimizerTest, PreserveFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
auto i = ops::Identity(s.WithOpName("i"), conv);
GrapplerItem item;
item.fetch.push_back("Conv2D");
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, EmptyDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, GPUDevice) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tsl::enable_tensor_float_32_execution(false);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv =
SimpleConv2D(&s, 4, 2, "VALID", "/job:w/replica:0/task:0/device:GPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, "NCHW");
}
TEST_F(GenericLayoutOptimizerTest, CPUDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "/CPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv_node, "NHWC");
#else
VerifyDataFormatAttributeMatch(conv_node, DST_DATA_FORMAT);
#endif
}
TEST_F(GenericLayoutOptimizerTest, NoOptimizeIntegerConvolution) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D<int32>(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, Connectivity) {
Scope scope = Scope::NewRootScope();
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto i1 = ops::Identity(scope.WithOpName("i1"), conv);
auto i2 = ops::Identity(scope.WithOpName("i2"), i1);
auto i3 = ops::Identity(scope.WithOpName("i3"), i2);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
Status status;
utils::GraphView graph_view_original(&item.graph, &status);
const int i1_index = graph_view_original.GetNode("i1")->node_index();
const int i2_index = graph_view_original.GetNode("i2")->node_index();
item.graph.mutable_node()->SwapElements(i1_index, i2_index);
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* node_i2_output = graph_view.GetNode("i2");
ASSERT_NE(node_i2_output, nullptr);
ASSERT_EQ(node_i2_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(node_i2_output, 0, "i1", 0);
}
TEST_F(GenericLayoutOptimizerTest, Conv2DBackpropInputNonConstInputSizes) {
for (const int input_sizes_length : {2, 4}) {
Scope s = Scope::NewRootScope();
auto conv = SimpleConv2DBackpropInput(&s, 7, 2, "SAME", false,
input_sizes_length);
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_backprop_node = graph_view.GetNode("Conv2DBackpropInput");
ASSERT_NE(conv2d_backprop_node, nullptr);
ASSERT_EQ(conv2d_backprop_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(conv2d_backprop_node, 0, "InputSizesIdentity", 0);
}
}
TEST_F(GenericLayoutOptimizerTest, Conv2DDataFormatVecPermuteCollapse) {
tsl::enable_tensor_float_32_execution(false);
Scope scope =
Scope::NewRootScope().WithDevice(absl::StrCat("/device:", DEVICE, ":0"));
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto shape = ops::Shape(scope.WithOpName("shape"), conv);
auto value = ops::Const(scope.WithOpName("value"), 0, {});
auto fill = ops::Fill(scope.WithOpName("fill"), shape, value);
auto i = ops::Identity(scope.WithOpName("i"), fill);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(
conv2d_node, 0,
absl::StrCat("Conv2D-0-Transpose", SRC_DATA_FORMAT, "To", DST_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* shape_node = graph_view.GetNode("shape");
ASSERT_NE(shape_node, nullptr);
ASSERT_EQ(shape_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(shape_node, 0, conv2d_node->GetName(), 0);
auto* fill_node = graph_view.GetNode("fill");
ASSERT_NE(fill_node, nullptr);
ASSERT_EQ(fill_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(fill_node, 0, shape_node->GetName(), 0);
VerifyRegularFanoutMatch(
fill_node, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* graph_output = graph_view.GetNode("i");
ASSERT_NE(graph_output, nullptr);
ASSERT_EQ(graph_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(
graph_output, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
}
TEST_F(GenericLayoutOptimizerTest, DoNotPruneNonAddedCancellableTransposes) {
GrapplerItem item;
{
Scope scope = Scope::NewRootScope().WithDevice(
absl::StrCat("/device:", DEVICE, ":0"));
auto input = ops::RandomUniform(scope.WithOpName("input"),
DIMS(kBatchSize, kHeight, kWidth, kDepthIn),
DT_FLOAT);
auto input_in_transpose =
ops::Transpose(scope.WithOpName("input_in_transpose"), input,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto input_out_transpose = ops::Transpose(
scope.WithOpName("input_out_transpose"), input_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
Tensor bias_data(DT_FLOAT, TensorShape({kDepthIn}));
test::FillIota<float>(&bias_data, 1.0f);
auto bias_add = ops::BiasAdd(
scope.WithOpName("bias_add"), input_out_transpose, bias_data,
ops::BiasAdd::Attrs().DataFormat(SRC_DATA_FORMAT));
auto output_in_transpose =
ops::Transpose(scope.WithOpName("output_in_transpose"), bias_add,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto output_out_transpose = ops::Transpose(
scope.WithOpName("output_out_transpose"), output_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
auto output =
ops::Identity(scope.WithOpName("output"), output_out_transpose);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
}
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* input_node = graph_view.GetNode("input");
ASSERT_NE(input_node, nullptr);
auto* input_in_transpose_node = graph_view.GetNode("input_in_transpose");
ASSERT_NE(input_in_transpose_node, nullptr);
ASSERT_EQ(input_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_in_transpose_node, 0, input_node->GetName(), 0);
auto* input_out_transpose_node = graph_view.GetNode("input_out_transpose");
ASSERT_NE(input_out_transpose_node, nullptr);
ASSERT_EQ(input_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_out_transpose_node, 0,
input_in_transpose_node->GetName(), 0);
auto* bias_add_in_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-Transpose", SRC_DATA_FORMAT, "To",
DST_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_in_transpose_node, nullptr);
ASSERT_EQ(bias_add_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_in_transpose_node, 0,
input_out_transpose_node->GetName(), 0);
auto* bias_add_node = graph_view.GetNode("bias_add");
ASSERT_NE(bias_add_node, nullptr);
ASSERT_EQ(bias_add_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_node, 0,
bias_add_in_transpose_node->GetName(), 0);
auto* bias_add_out_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-0-Transpose", DST_DATA_FORMAT, "To",
SRC_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_out_transpose_node, nullptr);
ASSERT_EQ(bias_add_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_out_transpose_node, 0,
bias_add_node->GetName(), 0);
auto* output_in_transpose_node = graph_view.GetNode("output_in_transpose");
ASSERT_NE(output_in_transpose_node, nullptr);
ASSERT_EQ(output_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_in_transpose_node, 0,
bias_add_out_transpose_node->GetName(), 0);
auto* output_out_transpose_node = graph_view.GetNode("output_out_transpose");
ASSERT_NE(output_out_transpose_node, nullptr);
ASSERT_EQ(output_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_out_transpose_node, 0,
output_in_transpose_node->GetName(), 0);
auto* output_node = graph_view.GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_out_transpose_node->GetName(),
0);
}
TEST_F(GenericLayoutOptimizerTest, CancelTransposeAroundPad) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(
RewriterConfig::AGGRESSIVE,
RewriterConfig::NCHW_TO_NHWC );
const Tensor kPermuteNhwcToNchw = test::AsTensor<int32>({0, 3, 1, 2});
const Tensor kPermuteNchwToNhwc = test::AsTensor<int32>({0, 2, 3, 1});
const Tensor kPad = test::AsTensor<int32>({1, 2, 3, 4, 5, 6, 7, 8}, {4, 2});
GrapplerItem item;
item.graph = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {}, {{"dtype", DT_INT32}, {"value", kPad}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Transpose", {"x", "perm_nhwc_to_nchw"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("transpose_2", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
const Tensor kPermutedPaddings =
test::AsTensor<int32>({1, 2, 5, 6, 7, 8, 3, 4}, {4, 2});
GraphDef expected = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermutedPaddings}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
NDef("transpose_2", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
});
CompareGraphs(expected, output);
Tensor x = GenerateRandomTensor<DT_FLOAT>({2, 6, 6, 8});
item.fetch = {"transpose_1", "transpose_2"};
item.feed.emplace_back("x", x);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
ASSERT_EQ(tensors_expected.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(GenericLayoutOptimizerTest, PreserveInputShapes) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(RewriterConfig::AGGRESSIVE);
AttrValue output_shapes;
auto* shape = output_shapes.mutable_list()->add_shape();
shape->add_dim()->set_size(-1);
GrapplerItem item;
item.graph = test::function::GDef({NDef(
"x", "_Arg", {},
{{"T", DT_FLOAT}, {"index", 0}, {"_output_shapes", output_shapes}})});
item.feed.emplace_back("x", Tensor(DT_FLOAT));
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* arg = graph_view.GetNode("x");
ASSERT_NE(arg, nullptr);
EXPECT_TRUE(arg->HasAttr("_output_shapes"));
EXPECT_EQ(arg->GetAttr("_output_shapes")->DebugString(),
output_shapes.DebugString());
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv3DGraph_CPU) {
Scope scope = Scope::NewRootScope();
auto conv3d = SimpleConv3D(&scope, 32, 1, "VALID", "/CPU:0");
auto identity = Identity(scope.WithOpName("Output"), conv3d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv3d_node = graph_view.GetNode("Conv3D");
ASSERT_NE(conv3d_node, nullptr);
ASSERT_EQ(conv3d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv3d_node, 1, "Filter", 0);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv3d_node, SRC_DATA_FORMAT_5D);
#else
auto* input_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-Transpose", SRC_DATA_FORMAT_5D, "To",
DST_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "Input", 0);
VerifyRegularFaninMatch(conv3d_node, 0, input_transpose_node->GetName(), 0);
VerifyDataFormatAttributeMatch(conv3d_node, DST_DATA_FORMAT_5D);
auto* output_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-0-Transpose", DST_DATA_FORMAT_5D, "To",
SRC_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, conv3d_node->GetName(), 0);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d1a82c8-f54e-44c5-b05f-f68fb87ecf6e | cpp | tensorflow/tensorflow | meta_optimizer | tensorflow/core/grappler/optimizers/data/meta_optimizer.cc | tensorflow/core/grappler/optimizers/meta_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/data/meta_optimizer.h"
#include <array>
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ConfigMap =
std::map<string, tensorflow::RewriterConfig_CustomGraphOptimizer>;
constexpr std::array<const char*, 22> kTFDataOptimizations = {
"noop_elimination",
"disable_intra_op_parallelism",
"use_private_thread_pool",
"shuffle_and_repeat_fusion",
"map_parallelization",
"map_fusion",
"filter_fusion",
"map_and_filter_fusion",
"map_and_batch_fusion",
"batch_parallelization",
"filter_parallelization",
"make_sloppy",
"parallel_batch",
"slack",
"autotune_buffer_sizes",
"seq_interleave_prefetch",
"inject_prefetch",
"inject_io_prefetch_eligible",
"inject_io_prefetch",
"disable_prefetch_legacy_autotune",
"enable_gradient_descent",
"make_deterministic"};
Status ToConfigMap(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config,
ConfigMap* result) {
auto found = gtl::FindOrNull(config->parameter_map(), "optimizer_configs");
if (!found) return absl::OkStatus();
auto& options = found->list().s();
for (const auto& option_string : options) {
std::vector<string> split = absl::StrSplit(option_string, ':');
if (split.size() != 3) {
return errors::Internal(
"Wrong format for optimizer options. Expect <optimizer name>:<config "
"key>:<config value>, received: ",
option_string);
}
const string& optimizer_name = split[0];
const string& config_key = split[1];
const string& config_value = split[2];
auto optimizer_config = gtl::FindOrNull(*result, optimizer_name);
if (!optimizer_config) {
(*result)[optimizer_name] =
tensorflow::RewriterConfig_CustomGraphOptimizer();
optimizer_config = gtl::FindOrNull(*result, optimizer_name);
}
(*optimizer_config->mutable_parameter_map())[config_key].set_s(
config_value);
}
return absl::OkStatus();
}
}
Status TFDataMetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
GrapplerItem optimized_item = item;
for (const auto& optimization : kTFDataOptimizations) {
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"TFData", optimization});
Status status = ApplyOptimization(optimization, cluster, &optimized_item);
timings.ReportAndStop();
if (!status.ok()) return status;
}
output->Swap(&optimized_item.graph);
FunctionLibraryDefinition flib =
FunctionLibraryDefinition(OpRegistry::Global(), output->library())
.ReachableDefinitions(*output);
const auto producer = output->versions().producer();
bool optimized_functions = false;
for (const auto& name : flib.ListFunctionNames()) {
auto* func = flib.Find(name);
if (!data::IsTFDataFunction(*func)) continue;
VLOG(3) << "Optimize function: function=" << func->signature().name();
optimized_functions = true;
GrapplerFunctionItem func_item;
TF_RETURN_IF_ERROR(
MakeGrapplerFunctionItem(*func, flib, producer, &func_item));
GraphDef optimized_func_graph;
TF_RETURN_IF_ERROR(Optimize(cluster, func_item, &optimized_func_graph));
for (const FunctionDef& func_def :
optimized_func_graph.library().function()) {
if (flib.Find(func_def.signature().name()) == nullptr) {
TF_RETURN_IF_ERROR(flib.AddFunctionDef(func_def));
}
}
FunctionDef optimized_func;
func_item.SwapFunctionBody(std::move(optimized_func_graph));
TF_RETURN_IF_ERROR(MakeFunctionDef(func_item, flib, &optimized_func));
TF_RETURN_IF_ERROR(
flib.ReplaceFunction(func->signature().name(), optimized_func));
}
if (optimized_functions) {
*output->mutable_library() = flib.ToProto();
}
return absl::OkStatus();
}
Status TFDataMetaOptimizer::ApplyOptimization(const string& name,
Cluster* cluster,
GrapplerItem* item) const {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
const auto* optimizer = gtl::FindOrNull(enabled_optimizers_, name);
if (!optimizer) {
return absl::OkStatus();
}
GraphDef result;
(*optimizer)->set_deadline_usec(this->deadline_usec());
Status status = (*optimizer)->Optimize(cluster, *item, &result);
if (status.ok()) {
item->graph.Swap(&result);
} else if (absl::IsAborted(status)) {
status = absl::OkStatus();
}
return status;
}
Status TFDataMetaOptimizer::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) return absl::OkStatus();
auto& optimizers = config->parameter_map().at("optimizers").list().s();
ConfigMap optimizer_configs;
TF_RETURN_IF_ERROR(ToConfigMap(config, &optimizer_configs));
for (const auto& optimizer_name : optimizers) {
auto optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull(optimizer_name);
if (optimizer) {
TF_RETURN_IF_ERROR(
optimizer->Init(gtl::FindOrNull(optimizer_configs, optimizer_name)));
enabled_optimizers_[optimizer_name] = std::move(optimizer);
} else {
return errors::Internal(
"Tried to register a dataset optimizer that doesn't exist: ",
optimizer_name);
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(TFDataMetaOptimizer, "tf_data_meta_optimizer");
}
} | #include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include <atomic>
#include "absl/strings/match.h"
#include "absl/strings/substitute.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class TestOptimizer : public CustomGraphOptimizer {
public:
static void SetOptimized(const bool flag_value) { optimized_ = flag_value; }
static bool IsOptimized() { return optimized_; }
TestOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(const tensorflow::RewriterConfig_CustomGraphOptimizer* config =
nullptr) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
optimized_ = true;
*optimized_graph = item.graph;
return absl::OkStatus();
}
private:
static bool optimized_;
};
bool TestOptimizer::optimized_;
REGISTER_GRAPH_OPTIMIZER(TestOptimizer);
class TestGraphOptimizer : public TestOptimizer {
public:
string name() const override { return "test_graph_optimizer"; }
};
REGISTER_GRAPH_OPTIMIZER(TestGraphOptimizer);
class TestOptimizerWithParams : public TestOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
CHECK(config != nullptr);
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(TestOptimizerWithParams);
class GrapplerItemPropertiesAccumulator : public CustomGraphOptimizer {
public:
static void SetOptimizationOptions(
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options) {
optimization_options_ = optimization_options;
}
static void ResetOptimizationOptions() { optimization_options_ = nullptr; }
GrapplerItemPropertiesAccumulator() {}
string name() const override {
return "grappler_item_properties_accumulator";
}
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
if (optimization_options_) {
optimization_options_->insert({item.id, item.optimization_options()});
}
return absl::OkStatus();
}
private:
static gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options_;
};
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
GrapplerItemPropertiesAccumulator::optimization_options_;
REGISTER_GRAPH_OPTIMIZER(GrapplerItemPropertiesAccumulator);
class MetaOptimizerTest : public GrapplerTest {};
TEST_F(MetaOptimizerTest, RunsCustomOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerWithParams) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizerWithParams");
auto* custom_config = rewriter_config.add_custom_optimizers();
custom_config->set_name("TestOptimizerWithParams");
(*custom_config->mutable_parameter_map())["foo"] = AttrValue();
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerAndCustomGraphOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
TestGraphOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsPluginOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"/device:GPU:0"});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_min_graph_nodes(-1);
const auto creator = []() { return new TestOptimizer; };
ConfigList config_list;
config_list.disable_model_pruning = true;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunOptimizersTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunToggleOptimizersAndCustomGraphOptimizerTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibrary) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"my_mul"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "my_mul:z:0"}});
(*square_func.mutable_attr())["_noinline"].set_b(true);
FunctionDef quadratic_func = FunctionDefHelper::Create(
"MyQuadratic", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"square"}, "MySquare", {"x"}, {{"T", "$T"}}},
{{"quadratic"}, "MySquare", {"square:z"}, {{"T", "$T"}}}},
{{"z", "quadratic:z:0"}});
(*quadratic_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("quadratic", "MyQuadratic", {"b"}, {{"T", DT_INT32}}, kDevice),
NDef("out_s", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_q", "Identity", {"quadratic:0"}, {{"T", DT_INT32}}, kDevice)},
{mul_func, square_func, quadratic_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(3, optimized_flib.num_functions());
const auto specialized_name = [](const string& fn, const string& node,
const string& id) {
return absl::Substitute("$0_specialized_for_$1_at_$2", fn, node, id);
};
const string optimized_0 =
specialized_name("MyQuadratic", "quadratic", "tf_graph");
const string optimized_1 = specialized_name("MySquare", "square", "tf_graph");
const string optimized_2 =
specialized_name("MySquare", "square", optimized_0);
const FunctionDef* optimized_func_0 = optimized_flib.Find(optimized_0);
const FunctionDef* optimized_func_1 = optimized_flib.Find(optimized_1);
const FunctionDef* optimized_func_2 = optimized_flib.Find(optimized_2);
ASSERT_NE(optimized_func_0, nullptr);
ASSERT_NE(optimized_func_1, nullptr);
ASSERT_NE(optimized_func_2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_1, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_0, node.op());
}
}
EXPECT_EQ(2, count);
count = 0;
for (const NodeDef& node : optimized_func_0->node_def()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_2, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_2, node.op());
}
}
EXPECT_EQ(2, count);
const std::vector<const FunctionDef*> optimized_funcs = {optimized_func_1,
optimized_func_2};
for (const FunctionDef* optimized_func : optimized_funcs) {
count = 0;
for (const NodeDef& node : optimized_func->node_def()) {
if (node.name() == "Func/my_mul/input/_0" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Func/my_mul/input/_1" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "my_mul/mul" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("Func/my_mul/input/_0:output:0", node.input(0));
EXPECT_EQ("Func/my_mul/input/_1:output:0", node.input(1));
}
EXPECT_TRUE(node.device().empty());
}
EXPECT_EQ(3, count);
ASSERT_EQ(1, optimized_func->ret().size());
EXPECT_EQ("Func/my_mul/output/_2:output:0", optimized_func->ret().at("z"));
}
item.fetch = {"out_s", "out_q"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<int>(4));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<int>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneUnusedOutputs) {
using test::function::NDef;
ConfigProto config_proto;
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_mul = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output0"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output0:z:0"}, {"z1", "output1:z:0"}, {"z2", "output2:z:0"}});
FunctionDef my_fwd = FunctionDefHelper::Create(
"Fwd", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output"}, "MyMul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output:z0:0"}, {"z1", "output:z1:0"}, {"z2", "output:z2:0"}});
(*my_mul.mutable_attr())["_noinline"].set_b(true);
(*my_fwd.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_mul, my_fwd};
GrapplerItem item;
item.id = "tf_graph";
item.fetch = {"ret"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fwd", "Fwd", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("ret", "Identity", {"fwd:2"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string specialized_my_fwd = "Fwd_specialized_for_fwd_at_tf_graph";
const string specialized_my_mul =
absl::StrCat("MyMul_specialized_for_output_at_", specialized_my_fwd);
FunctionDef expected_my_mul = FunctionDefHelper::Create(
specialized_my_mul, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output2"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output2:z:0"}});
FunctionDef expected_my_fwd = FunctionDefHelper::Create(
specialized_my_fwd, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output"}, specialized_my_mul, {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output:z2:0"}});
const FunctionDef* my_mul_spec = optimized_flib.Find(specialized_my_mul);
const FunctionDef* my_fwd_spec = optimized_flib.Find(specialized_my_fwd);
ASSERT_NE(my_mul_spec, nullptr);
ASSERT_NE(my_fwd_spec, nullptr);
CompareFunctions(expected_my_mul, *my_mul_spec);
CompareFunctions(expected_my_fwd, *my_fwd_spec);
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(4.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneFunctionBody) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.add_optimizers("pruning");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T"}, {"T: {float, double}"},
{{{"mul1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"mul2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "mul1:z:0"}, {"z2", "mul2:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn1", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn2", "Identity", {"fn2:1"}, {{"T", DT_FLOAT}}, kDevice)},
{my_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string optimized_fn1 = "MyFunc_specialized_for_fn1_at_tf_graph";
const string optimized_fn2 = "MyFunc_specialized_for_fn2_at_tf_graph";
const FunctionDef* optimized_func_fn1 = optimized_flib.Find(optimized_fn1);
const FunctionDef* optimized_func_fn2 = optimized_flib.Find(optimized_fn2);
ASSERT_NE(optimized_func_fn1, nullptr);
ASSERT_NE(optimized_func_fn2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++count) {
EXPECT_EQ(optimized_fn1, node.op());
} else if (node.name() == "fn2" && ++count) {
EXPECT_EQ(optimized_fn2, node.op());
}
}
EXPECT_EQ(2, count);
ASSERT_EQ(1, optimized_func_fn1->node_def_size());
EXPECT_EQ(1, optimized_func_fn1->signature().output_arg_size());
EXPECT_EQ("z1", optimized_func_fn1->signature().output_arg(0).name());
EXPECT_EQ("mul1", optimized_func_fn1->node_def(0).name());
ASSERT_EQ(1, optimized_func_fn2->node_def_size());
EXPECT_EQ(1, optimized_func_fn2->signature().output_arg_size());
EXPECT_EQ("z2", optimized_func_fn2->signature().output_arg(0).name());
EXPECT_EQ("mul2", optimized_func_fn2->node_def(0).name());
item.fetch = {"out_fn1", "out_fn2"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(3.123f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryWithRestrictions) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func_1 = FunctionDefHelper::Create(
"MyMul1", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 = FunctionDefHelper::Create(
"MyMul2", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(optimization_options.size(), 3);
auto optimization_options_main =
gtl::FindOrNull(optimization_options, "main");
ASSERT_NE(optimization_options_main, nullptr);
EXPECT_TRUE(optimization_options_main->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_1 =
gtl::FindOrNull(optimization_options, "MyMul1");
ASSERT_NE(optimization_options_my_mul_1, nullptr);
EXPECT_TRUE(optimization_options_my_mul_1->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_2 =
gtl::FindOrNull(optimization_options, "MyMul2");
ASSERT_NE(optimization_options_my_mul_2, nullptr);
EXPECT_FALSE(
optimization_options_my_mul_2->allow_non_differentiable_rewrites);
}
class SleepingOptimizer : public CustomGraphOptimizer {
public:
SleepingOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
Env::Default()->SleepForMicroseconds(1000000);
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
optimized_graph->add_node();
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(SleepingOptimizer);
TEST_F(MetaOptimizerTest, OptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
GraphDef output;
GraphDef original = item.graph;
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
CompareGraphs(original, output);
}
TEST_F(MetaOptimizerTest, MetaOptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(1500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
EXPECT_EQ(original_node_size + 1, output.node_size());
}
TEST_F(MetaOptimizerTest, OptimizerDoesNotTimeOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(2500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(original_node_size + 2, output.node_size());
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& post_optimization_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_post_optimization_verifier_config();
post_optimization_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunInterOptimizerVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& inter_optimizer_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_inter_optimizer_verifier_config();
inter_optimizer_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnInvalidGraph) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef mul_func_1 =
FunctionDefHelper::Create("MyMul1", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 =
FunctionDefHelper::Create("MyMul2", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
auto& post_optimization_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_post_optimization_verifier_config();
post_optimization_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer_with_post_verifiers(nullptr, config_proto);
Status status =
optimizer_with_post_verifiers.Optimize(nullptr, item, &output);
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StrContains(
status.message(),
"NodeDef expected inputs 'float' do not match 3 inputs specified"));
}
TEST_F(MetaOptimizerTest, RunInterOptimizerVerifiersOnInvalidGraph) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef mul_func_1 =
FunctionDefHelper::Create("MyMul1", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 =
FunctionDefHelper::Create("MyMul2", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
auto& inter_optimizer_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_inter_optimizer_verifier_config();
inter_optimizer_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer_with_inter_verifiers(nullptr, config_proto);
Status status =
optimizer_with_inter_verifiers.Optimize(nullptr, item, &output);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(absl::StrContains(
status.message(),
"NodeDef expected inputs 'float' do not match 3 inputs specified"));
}
TEST_F(MetaOptimizerTest, CompressConstants) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Tensor zeros_t(DT_FLOAT, TensorShape({64}));
Tensor ones_t(DT_FLOAT, TensorShape({64}));
for (int i = 0; i < 64; ++i) {
zeros_t.flat<float>()(i) = 0.0f;
ones_t.flat<float>()(i) = 1.0f;
}
Output zeros = ops::Const(scope.WithOpName("zeros"), zeros_t);
Output host_ones = ops::Const(scope.WithOpName("host_ones"), ones_t);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
ASSERT_EQ(item.graph.node(1).name(), "host_ones");
item.graph.mutable_node(1)->set_op("HostConst");
item.fetch = {"zeros", "host_ones"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {});
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
bool found_zeros = false;
bool found_host_ones = false;
ASSERT_EQ(output.node_size(), 2);
for (const auto& node : output.node()) {
if (node.name() == "zeros") {
found_zeros = true;
EXPECT_EQ(node.op(), "Const");
const TensorProto& zeroes_t = node.attr().at("value").tensor();
EXPECT_EQ(zeroes_t.float_val_size(), 0);
} else if (node.name() == "host_ones") {
found_host_ones = true;
EXPECT_EQ(node.op(), "HostConst");
const TensorProto& ones_t = node.attr().at("value").tensor();
EXPECT_EQ(ones_t.float_val_size(), 1);
EXPECT_EQ(ones_t.float_val(0), 1.0f);
}
}
EXPECT_TRUE(found_zeros);
EXPECT_TRUE(found_host_ones);
auto tensors = EvaluateNodes(output, item.fetch, {});
ASSERT_EQ(tensors.size(), 2);
ASSERT_EQ(tensors_expected.size(), 2);
for (int i = 0; i < 2; ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
}
TEST_F(MetaOptimizerTest, TestTFGRemoveDeadArguments) {
using test::function::NDef;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef case_func = FunctionDefHelper::Create(
"branch_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
AttrValue branches;
branches.mutable_list()->add_func()->set_name("branch_func");
AttrValue output_shapes;
output_shapes.mutable_list()->add_shape();
item.graph = test::function::GDef(
{NDef("idx", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("case", "Case", {"idx", "x", "y"},
{{"branches", std::move(branches)},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"output_shapes", std::move(output_shapes)}},
kDevice)},
{case_func});
item.fetch = {"case"};
GraphDef output;
ConfigProto config_proto;
config_proto.mutable_graph_options()
->mutable_rewrite_options()
->set_experimental_conditional_code_motion(RewriterConfig::OFF);
MetaOptimizer optimizer(nullptr, config_proto);
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_TRUE(status.ok());
EXPECT_EQ(output.library().function_size(), 1);
auto& func = output.library().function(0);
EXPECT_EQ(func.signature().input_arg_size(), 1);
EXPECT_EQ(func.signature().input_arg(0).name(), "x_tfg_result_0");
}
TEST_F(MetaOptimizerTest, TestTFGControlFlowSink) {
using test::function::NDef;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef case_func = FunctionDefHelper::Create(
"branch_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
AttrValue branches;
branches.mutable_list()->add_func()->set_name("branch_func");
AttrValue output_shapes;
output_shapes.mutable_list()->add_shape();
FunctionDef foo_func = FunctionDefHelper::Create(
"Foo", {"idx:int32", "a:float", "b:float"}, {"c:float"}, {},
{{{"add"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"mul"}, "Mul", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"case"},
"Case",
{"idx", "add:z:0", "mul:z:0"},
{{"branches", std::move(branches)},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"output_shapes", std::move(output_shapes)}}}},
{{"c", "case:output:0"}});
(*foo_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("idx", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("foo", "Foo", {"idx", "a", "b"}, {}, kDevice)},
{case_func, foo_func});
item.fetch = {"foo"};
GraphDef output;
ConfigProto config_proto;
MetaOptimizer optimizer(nullptr, config_proto);
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(output.library().function_size(), 2);
const FunctionDef* optimized_foo_func = nullptr;
const FunctionDef* specialized_branch_func = nullptr;
for (const FunctionDef& func : output.library().function()) {
if (func.signature().name() == "Foo")
optimized_foo_func = &func;
else if (absl::StartsWith(func.signature().name(), "branch_func"))
specialized_branch_func = &func;
}
ASSERT_TRUE(optimized_foo_func);
EXPECT_EQ(optimized_foo_func->node_def_size(), 1);
ASSERT_TRUE(specialized_branch_func);
EXPECT_EQ(specialized_branch_func->node_def_size(), 3);
}
class TfDataTestOptimizer : public CustomGraphOptimizer {
public:
static void InitCount() { count_ = 0; }
static int GetCount() { return count_; }
TfDataTestOptimizer() = default;
~TfDataTestOptimizer() override = default;
TfDataTestOptimizer(const TfDataTestOptimizer&) = delete;
TfDataTestOptimizer& operator=(const TfDataTestOptimizer& other) = delete;
std::string name() const override { return "tf_data_test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
++count_;
*optimized_graph = item.graph;
return absl::OkStatus();
}
private:
static std::atomic<int> count_;
};
std::atomic<int> TfDataTestOptimizer::count_;
REGISTER_GRAPH_OPTIMIZER(TfDataTestOptimizer);
enum class FuncNestingType {
CallFromNode = 0,
CallFromAttr = 1,
CallFromList = 2
};
class TfDataTestFixture
: public ::testing::TestWithParam<std::tuple<bool, bool, FuncNestingType>> {
protected:
void SetUp() override {
is_inner_func_tf_data_ = std::get<0>(GetParam());
is_outer_func_tf_data_ = std::get<1>(GetParam());
func_nesting_type_ = std::get<2>(GetParam());
}
bool is_inner_func_tf_data_ = false;
bool is_outer_func_tf_data_ = false;
FuncNestingType func_nesting_type_ = FuncNestingType::CallFromNode;
};
void SetUpCallFromNode(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"inner_func"}, "inner_func", {"x", "x"}, {{"T", DT_FLOAT}}}},
{{"z", "inner_func:z:0"}});
}
void SetUpCallFromAttr(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"identity"},
"Identity",
{"x"},
{{"T", DT_FLOAT},
{"f", FunctionDefHelper::FunctionRef("inner_func", {})}}}},
{{"z", "x"}});
}
void SetUpCallFromList(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"identity"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}},
{{"z", "x"}});
AttrValue_ListValue* list_value =
(*outer_func.mutable_node_def(0)->mutable_attr())["list"].mutable_list();
NameAttrList* entry = list_value->add_func();
entry->set_name("inner_func");
}
TEST_P(TfDataTestFixture, TfDataTests) {
using test::function::NDef;
FunctionDef inner_func = FunctionDefHelper::Create(
"inner_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
(*inner_func.mutable_attr())[data::kTFDataFunction].set_b(
is_inner_func_tf_data_);
FunctionDef outer_func;
switch (func_nesting_type_) {
case FuncNestingType::CallFromNode:
SetUpCallFromNode(outer_func);
break;
case FuncNestingType::CallFromAttr:
SetUpCallFromAttr(outer_func);
break;
case FuncNestingType::CallFromList:
SetUpCallFromList(outer_func);
break;
default:
break;
}
(*outer_func.mutable_attr())[data::kTFDataFunction].set_b(
is_outer_func_tf_data_);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("outer_func_node", "outer_func", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_s", "Identity", {"outer_func_node:0"}, {{"T", DT_FLOAT}},
kDevice)},
{inner_func, outer_func});
TfDataTestOptimizer::InitCount();
ConfigProto config_proto;
auto& rewriter_config =
*(config_proto.mutable_graph_options()->mutable_rewrite_options());
rewriter_config.add_optimizers("TfDataTestOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
int expected_count = 3;
if (is_outer_func_tf_data_)
expected_count = 1;
else if (is_inner_func_tf_data_)
expected_count = 2;
EXPECT_EQ(TfDataTestOptimizer::GetCount(), expected_count);
FunctionLibraryDefinition flib(OpRegistry::Global(), output.library());
const FunctionDef* outer_func_after_opt = flib.Find("outer_func");
const FunctionDef* inner_func_after_opt = flib.Find("inner_func");
EXPECT_EQ(data::IsTFDataFunction(*outer_func_after_opt),
is_outer_func_tf_data_);
if (is_outer_func_tf_data_ || is_inner_func_tf_data_) {
EXPECT_EQ(data::IsTFDataFunction(*inner_func_after_opt), true);
} else {
EXPECT_EQ(data::IsTFDataFunction(*inner_func_after_opt), false);
}
}
INSTANTIATE_TEST_SUITE_P(
MetaOptimizerTest, TfDataTestFixture,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(FuncNestingType::CallFromNode,
FuncNestingType::CallFromAttr,
FuncNestingType::CallFromList)),
[](const ::testing::TestParamInfo<TfDataTestFixture::ParamType>& info) {
bool is_inner_func_tf_data = std::get<0>(info.param);
bool is_outer_func_tf_data = std::get<1>(info.param);
FuncNestingType func_nesting_type = std::get<2>(info.param);
std::string test_name;
if (is_inner_func_tf_data && is_outer_func_tf_data)
test_name = "both_funcs_tf_data";
else if (is_inner_func_tf_data)
test_name = "inner_func_tf_data";
else if (is_outer_func_tf_data)
test_name = "outer_func_tf_data";
else
test_name = "no_func_tf_data";
switch (func_nesting_type) {
case FuncNestingType::CallFromNode:
test_name += "_call_from_node";
break;
case FuncNestingType::CallFromAttr:
test_name += "_call_from_attribute";
break;
case FuncNestingType::CallFromList:
test_name += "_call_from_list";
break;
default:
break;
}
return test_name;
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/meta_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/meta_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
12911963-3d86-43d4-9313-02b2ed479c8a | cpp | tensorflow/tensorflow | shape_optimizer | tensorflow/core/grappler/optimizers/shape_optimizer.cc | tensorflow/core/grappler/optimizers/shape_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
Status ShapeOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
bool can_optimize = false;
bool has_div = false;
bool has_size = false;
bool has_shape = false;
bool has_prod = false;
auto is_int = [](const NodeDef& node) -> bool {
return node.attr().at("T").type() == DT_INT32 ||
node.attr().at("T").type() == DT_INT64;
};
for (const NodeDef& node : item.graph.node()) {
if (IsShape(node)) {
has_shape = true;
} else if (IsProd(node) && is_int(node)) {
has_prod = true;
} else if (IsDiv(node) && is_int(node)) {
has_div = true;
} else if (IsSize(node)) {
has_size = true;
}
if ((has_shape && has_prod) || (has_div && has_size)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return absl::AbortedError("Nothing to do.");
}
*optimized_graph = item.graph;
GraphProperties properties(item);
bool inferred_properties = false;
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (!IsShape(node)) {
continue;
}
for (MutableGraphView::InputPort fanout :
graph.GetFanout(MutableGraphView::OutputPort(&node, 0))) {
if (fanout.node->op() != "Prod") {
continue;
}
if (fanout.node->attr().count("keep_dims") != 0 &&
fanout.node->attr().at("keep_dims").b()) {
continue;
}
const MutableGraphView::OutputPort reduce_indices =
graph.GetRegularFanin(MutableGraphView::InputPort(fanout.node, 1));
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop =
properties.GetOutputProperties(reduce_indices.node->name());
const int prop_size = prop.size();
if (prop_size <= reduce_indices.port_id) {
continue;
}
const TensorShapeProto& reduction_indices_shape =
prop[reduce_indices.port_id].shape();
if (NumCoefficients(reduction_indices_shape) == 1) {
const auto& input_props = properties.GetInputProperties(node.name());
if (input_props.size() != 1) {
continue;
}
NodeDef size_node(*fanout.node);
const DataType type = input_props[0].dtype();
size_node.set_op("Size");
size_node.set_input(0, node.input(0));
size_node.set_input(1, AsControlDependency(node));
size_node.mutable_attr()->erase("Tidx");
size_node.mutable_attr()->erase("keep_dims");
(*size_node.mutable_attr())["out_type"] = fanout.node->attr().at("T");
(*size_node.mutable_attr())["T"].set_type(type);
size_node.set_device(node.device());
Status s = IsKernelRegisteredForNode(size_node);
if (!s.ok()) {
continue;
}
fanout.node->Swap(&size_node);
}
}
}
}
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (node.op() == "Div") {
const MutableGraphView::OutputPort input1 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 0));
const MutableGraphView::OutputPort input2 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 1));
if (input1.node == nullptr || input2.node == nullptr) continue;
if (!IsSize(*input1.node) || !IsSize(*input2.node)) {
continue;
}
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop1 = properties.GetInputProperties(input1.node->name());
const auto& prop2 = properties.GetInputProperties(input2.node->name());
if (prop1.size() != 1 || prop2.size() != 1) {
continue;
}
const TensorShapeProto& shape1 = prop1[0].shape();
const TensorShapeProto& shape2 = prop2[0].shape();
int64_t result = ComputeSizeRatio(shape1, shape2);
if (result >= 0) {
node.set_op("Const");
DataType dtype = node.attr().at("T").type();
node.mutable_attr()->erase("T");
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorProto* t = (*node.mutable_attr())["value"].mutable_tensor();
t->set_dtype(dtype);
*t->mutable_tensor_shape() = TensorShapeProto();
if (dtype == DT_INT32) {
t->add_int_val(result);
} else {
t->add_int64_val(result);
}
node.set_input(0, AsControlDependency(node.input(0)));
node.set_input(1, AsControlDependency(node.input(1)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class ShapeOptimizerTest : public GrapplerTest {};
TEST_F(ShapeOptimizerTest, OptimizeShapeProduct) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithOpName("e"), c, d, attrs.KeepDims(false));
Output f = ops::ReduceProd(s.WithOpName("f"), c, d, attrs.KeepDims(true));
GrapplerItem item;
item.fetch = {"e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
} else if (node.name() == "f") {
found++;
EXPECT_EQ("Prod", node.op());
EXPECT_EQ("c", node.input(0));
}
}
EXPECT_EQ(2, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
EXPECT_NEAR(tensors_expected[1].scalar<int>()(),
tensors_actual[1].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeProductMissingKernel) {
{
std::vector<std::unique_ptr<Device>> devices;
SessionOptions session_options;
session_options.config.mutable_gpu_options()
->set_per_process_gpu_memory_fraction(0.1);
session_options.env = Env::Default();
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(session_options, "", &devices));
bool found_gpu = false;
for (const auto& d : devices) {
if (d->device_type() == DEVICE_GPU) {
found_gpu = true;
break;
}
}
if (!found_gpu) {
LOG(INFO) << "Skipping test that requires GPU.";
return;
}
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), string("Hello"), {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithDevice("/gpu:0").WithOpName("e"), c, d,
attrs.KeepDims(false));
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("/cpu:0", node.device());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeRatio) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 32});
Output b = ops::Const(s.WithOpName("b"), 3.14f, {32, 16});
Output c = ops::Size(s.WithOpName("c"), a);
Output d = ops::Size(s.WithOpName("d"), b);
Output e = ops::Div(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Const", node.op());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/shape_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/shape_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
761b914e-0551-49f0-83ab-8e512d88d3fe | cpp | tensorflow/tensorflow | tfg_optimizer_hook | tensorflow/core/grappler/optimizers/tfg_optimizer_hook.cc | tensorflow/core/grappler/optimizers/tfg_optimizer_hook_test.cc | #include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/ir/tf_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/dump_graph.h"
using tensorflow::Status;
using tensorflow::errors::InvalidArgument;
namespace mlir {
namespace tfg {
class TFGGrapplerOptimizer::Impl {
public:
explicit Impl(TFGPassPipelineBuilder builder, unsigned num_tfg_threads)
: ctx_(MLIRContext::Threading::DISABLED), mgr_(&ctx_) {
DialectRegistry registry;
registry.addExtension(+[](MLIRContext* ctx, TFGraphDialect* dialect) {
dialect->addInterfaces<TensorFlowOpRegistryInterface>();
});
ctx_.appendDialectRegistry(registry);
builder(mgr_);
if (num_tfg_threads) {
llvm::ThreadPoolStrategy strategy;
strategy.ThreadsRequested = num_tfg_threads;
threadpool_ = std::make_unique<llvm::DefaultThreadPool>(strategy);
ctx_.setThreadPool(*threadpool_);
}
}
LogicalResult RunPipeline(ModuleOp module) { return mgr_.run(module); }
MLIRContext* GetContext() { return &ctx_; }
std::string GetPipelineString() {
std::string pipeline;
llvm::raw_string_ostream os(pipeline);
mgr_.printAsTextualPipeline(os);
return os.str();
}
private:
std::unique_ptr<llvm::DefaultThreadPool> threadpool_;
MLIRContext ctx_;
PassManager mgr_;
};
TFGGrapplerOptimizer::TFGGrapplerOptimizer(TFGPassPipelineBuilder builder,
unsigned num_tfg_threads)
: impl_(std::make_unique<Impl>(std::move(builder), num_tfg_threads)) {}
TFGGrapplerOptimizer::~TFGGrapplerOptimizer() = default;
std::string TFGGrapplerOptimizer::name() const {
return absl::StrCat("tfg_optimizer{", impl_->GetPipelineString(), "}");
}
Status TFGGrapplerOptimizer::Optimize(
tensorflow::grappler::Cluster* cluster,
const tensorflow::grappler::GrapplerItem& item,
tensorflow::GraphDef* optimized_graph) {
if (VLOG_IS_ON(4)) {
tensorflow::DumpGraphDefToFile(
absl::StrCat("tfg_before_graph_", item.id, "_",
std::hash<std::string>()(name())),
item.graph);
}
VLOG(5) << "TFG Before Graph: \n" << item.graph.DebugString();
tensorflow::GraphDebugInfo debug_info;
tensorflow::metrics::ScopedCounter<2> metrics(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"TfgOptimizer", "convert_graphdef_to_tfg"});
auto error_or_module =
ImportGraphDef(impl_->GetContext(), debug_info, item.graph);
if (!error_or_module.ok()) {
auto status = error_or_module.status();
tensorflow::errors::AppendToMessage(
&status, "when importing GraphDef to MLIR module in GrapplerHook");
LOG(ERROR) << name() << " failed: " << status.ToString();
return absl::AbortedError(status.message());
}
metrics.ReportAndStop();
ModuleOp module = (*error_or_module).get();
if (failed(impl_->RunPipeline(module))) {
return absl::InvalidArgumentError("MLIR Graph Optimizer failed: ");
}
tensorflow::GraphDef graphdef;
metrics.Reset({"TfgOptimizer", "convert_tfg_to_graphdef"});
TF_RETURN_WITH_CONTEXT_IF_ERROR(
ConvertToGraphDef(module, &graphdef),
"when exporting MLIR module to GraphDef in GrapplerHook");
(void)graphdef.mutable_library();
metrics.ReportAndStop();
*optimized_graph = std::move(graphdef);
if (VLOG_IS_ON(4)) {
tensorflow::DumpGraphDefToFile(
absl::StrCat("tfg_after_graph_", item.id, "_",
std::hash<std::string>()(name())),
*optimized_graph);
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "TFG After Graph: \n"
<< optimized_graph->DebugString() << "\nMLIR module: \n";
module.dump();
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h"
#include <utility>
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
class TestPass : public PassWrapper<TestPass, OperationPass<GraphOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPass);
StringRef getArgument() const override { return "grappler-hook-test-pass"; }
void runOnOperation() override {
GraphOp graph = getOperation();
for (TFOp op : graph.getOps()) op.setName(op.name() + "_visited");
}
};
class AlwaysFailPass
: public PassWrapper<AlwaysFailPass, OperationPass<GraphOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AlwaysFailPass);
StringRef getArgument() const override { return "grappler-hook-fail-pass"; }
void runOnOperation() override { signalPassFailure(); }
};
}
}
}
namespace tensorflow {
namespace grappler {
namespace {
TEST(TFGOptimizerTest, TestCustomPipeline) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(s.WithOpName("b"), 1.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ("a", item.graph.node(0).name());
EXPECT_EQ("b", item.graph.node(1).name());
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::TestPass>());
});
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_ASSERT_OK(status);
EXPECT_EQ("a_visited", output.node(0).name());
EXPECT_EQ("b_visited", output.node(1).name());
}
TEST(TFGOptimizerTest, TestCustomPipelineName) {
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::TestPass>());
});
EXPECT_EQ(optimizer.name(),
"tfg_optimizer{any(tfg.graph(grappler-hook-test-pass))}");
}
TEST(TFGOptimizerTest, TestImportErrorReturnsAborted) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AttrValue attr;
attr.set_i(0);
item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)});
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(errors::IsAborted(status));
}
TEST(TFGOptimizerTest, TestPassErrorIsFatal) {
Scope s = Scope::NewRootScope();
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {
mgr.addNestedPass<mlir::tfg::GraphOp>(
std::make_unique<mlir::tfg::AlwaysFailPass>());
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(status.ok());
EXPECT_FALSE(errors::IsAborted(status));
EXPECT_TRUE(errors::IsInvalidArgument(status));
}
TEST(TFGOptimizerTest, TestImportErrorMetaOptimizerIsNotFatal) {
Scope s = Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AttrValue attr;
attr.set_i(0);
item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)});
std::vector<std::unique_ptr<GraphOptimizer>> optimizers;
optimizers.push_back(std::make_unique<mlir::tfg::TFGGrapplerOptimizer>(
[](mlir::PassManager &mgr) {}));
GraphDef output;
Status status =
RunMetaOptimizer(std::move(item), {}, nullptr, nullptr, &output);
TF_EXPECT_OK(status);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/tfg_optimizer_hook.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/tfg_optimizer_hook_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98f4b3b6-d6bd-46a5-be18-068ea7853607 | cpp | tensorflow/tensorflow | function_api_info | tensorflow/core/grappler/optimizers/function_api_info.cc | tensorflow/core/grappler/optimizers/function_api_info_test.cc | #include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
FunctionApiInfo::FunctionApiInfo() {}
FunctionApiInfo::~FunctionApiInfo() {}
Status FunctionApiInfo::Init(const FunctionDef& function_def) {
function_type_ = FunctionApiInfo::FunctionType::INFERENCE;
for (const auto& attr : function_def.attr()) {
if (attr.first == "api_preferred_device") {
preferred_device_ = attr.second.s();
}
if (attr.first == "api_implements") {
interface_name_ = attr.second.s();
}
if (attr.first == "forward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::BACKWARD;
pairing_function_name_ = attr.second.s();
}
if (attr.first == "backward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::FORWARD;
pairing_function_name_ = attr.second.s();
}
}
input_arg_dtypes_.reserve(function_def.signature().input_arg_size());
for (const auto& input_arg : function_def.signature().input_arg()) {
input_arg_dtypes_.emplace_back(input_arg.type());
}
output_arg_dtypes_.reserve(function_def.signature().output_arg_size());
for (const auto& output_arg : function_def.signature().output_arg()) {
output_arg_dtypes_.emplace_back(output_arg.type());
}
if (interface_name_.empty() && !preferred_device_.empty()) {
return errors::InvalidArgument(
"Function '", function_def.signature().name(),
"' has a preferred device, but does not implement an interface");
}
return absl::OkStatus();
}
const string& FunctionApiInfo::preferred_device() const {
return preferred_device_;
}
const string& FunctionApiInfo::interface_name() const {
return interface_name_;
}
const FunctionApiInfo::FunctionType FunctionApiInfo::function_type() const {
return function_type_;
}
const string& FunctionApiInfo::pairing_function_name() const {
return pairing_function_name_;
}
const DataTypeVector& FunctionApiInfo::input_arg_dtypes() const {
return input_arg_dtypes_;
}
const DataTypeVector& FunctionApiInfo::output_arg_dtypes() const {
return output_arg_dtypes_;
}
FunctionLibraryApiInfo::FunctionLibraryApiInfo() {}
FunctionLibraryApiInfo::~FunctionLibraryApiInfo() {}
namespace {
bool IsSameArgDef(const OpDef::ArgDef& arg1, const OpDef::ArgDef& arg2) {
if (arg1.type() != arg2.type()) return false;
if (arg1.type_attr() != arg2.type_attr()) return false;
if (arg1.number_attr() != arg2.number_attr()) return false;
if (arg1.type_list_attr() != arg2.type_list_attr()) return false;
if (arg1.is_ref() != arg2.is_ref()) return false;
return true;
}
bool IsSameSignature(const FunctionDef& f1, const FunctionDef& f2,
const bool check_inputs, const bool check_outputs) {
const auto& sig1 = f1.signature();
const auto& sig2 = f2.signature();
if (check_inputs) {
if (sig1.input_arg_size() != sig2.input_arg_size()) return false;
for (int k = 0; k < sig1.input_arg_size(); ++k) {
if (!IsSameArgDef(sig1.input_arg(k), sig2.input_arg(k))) return false;
}
}
if (check_outputs) {
if (f1.ret().size() != f2.ret().size()) return false;
if (sig1.output_arg_size() != sig2.output_arg_size()) return false;
for (int k = 0; k < sig1.output_arg_size(); ++k) {
if (!IsSameArgDef(sig1.output_arg(k), sig2.output_arg(k))) return false;
}
}
return true;
}
Status ValidateSignature(const string& interface_name,
const std::vector<const FunctionDef*>& equiv_funcs,
const FunctionApiInfo::FunctionType function_type) {
if (equiv_funcs.size() < 2) return absl::OkStatus();
for (size_t k = 1; k < equiv_funcs.size(); ++k) {
const bool check_input =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::FORWARD);
const bool check_output =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::BACKWARD);
if (!IsSameSignature(*equiv_funcs[0], *equiv_funcs[k], check_input,
check_output)) {
return errors::InvalidArgument(
"Functions '", equiv_funcs[0]->signature().name(), "' and '",
equiv_funcs[k]->signature().name(), "' both implement '",
interface_name, "' but their signatures do not match.");
}
}
return absl::OkStatus();
}
Status ValidateSignatures(
const std::unordered_map<string, std::vector<const FunctionDef*>>&
intf_to_func,
const FunctionApiInfo::FunctionType function_type) {
for (const auto& item : intf_to_func)
TF_RETURN_IF_ERROR(
ValidateSignature(item.first, item.second, function_type));
return absl::OkStatus();
}
}
Status FunctionLibraryApiInfo::Init(
const FunctionDefLibrary& function_library) {
std::unordered_map<string, std::vector<const FunctionDef*>> infer_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> fwd_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> bwd_funcs;
for (const auto& function : function_library.function()) {
std::unique_ptr<FunctionApiInfo> func_info(new FunctionApiInfo);
TF_RETURN_IF_ERROR(func_info->Init(function));
if (func_info->interface_name().empty()) continue;
const string& function_name = function.signature().name();
const string& interface_name = func_info->interface_name();
VLOG(3) << "Got " << func_info->function_type()
<< " function: " << function_name
<< " with interface: " << interface_name;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
intf_to_inference_funcs_[interface_name].emplace_back(function_name);
infer_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::FORWARD:
intf_to_forward_funcs_[interface_name].emplace_back(function_name);
fwd_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::BACKWARD:
intf_to_backward_funcs_[interface_name].emplace_back(function_name);
bwd_funcs[interface_name].emplace_back(&function);
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
func_info_[function_name] = std::move(func_info);
}
TF_RETURN_IF_ERROR(ValidateSignatures(
infer_funcs, FunctionApiInfo::FunctionType::INFERENCE));
TF_RETURN_IF_ERROR(
ValidateSignatures(fwd_funcs, FunctionApiInfo::FunctionType::FORWARD));
TF_RETURN_IF_ERROR(
ValidateSignatures(bwd_funcs, FunctionApiInfo::FunctionType::BACKWARD));
return absl::OkStatus();
}
Status FunctionLibraryApiInfo::GetEquivalentImplementations(
const string& function_name, std::vector<string>* other_functions) const {
const auto func_it = func_info_.find(function_name);
if (func_it == func_info_.end()) return absl::OkStatus();
const FunctionApiInfo* func_info = func_it->second.get();
absl::flat_hash_map<string, std::vector<string>>::const_iterator it;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
it = intf_to_inference_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::FORWARD:
it = intf_to_forward_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::BACKWARD:
it = intf_to_backward_funcs_.find(func_info->interface_name());
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
for (const auto& func_name : it->second) {
if (func_name == function_name) continue;
other_functions->emplace_back(func_name);
}
return absl::OkStatus();
}
const FunctionApiInfo* FunctionLibraryApiInfo::GetApiInfo(
const string& function_name) const {
const auto it = func_info_.find(function_name);
if (it == func_info_.end()) return nullptr;
return it->second.get();
}
}
} | #include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void SetArg(const string& name, const string& type_name,
OpDef::ArgDef* arg_def) {
arg_def->set_name(name);
arg_def->set_type_attr(type_name);
}
typedef std::pair<string, string> ArgSpec;
void SetArgs(const std::vector<ArgSpec>& input_args_spec,
const std::vector<ArgSpec>& output_args_spec, OpDef* sig) {
for (const auto& arg_spec : input_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_input_arg());
for (const auto& arg_spec : output_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_output_arg());
}
void PopulateFunction(const string& name, const string& api_interface_name,
const string& preferred_device,
const std::vector<ArgSpec>& input_args,
const std::vector<ArgSpec>& output_args,
const string& forward_function_name,
const string& backward_function_name,
FunctionDef* func_def) {
OpDef* sig = func_def->mutable_signature();
sig->set_name(name);
SetArgs(input_args, output_args, sig);
auto* func_attr = func_def->mutable_attr();
if (!api_interface_name.empty())
(*func_attr)["api_implements"].set_s(api_interface_name);
if (!preferred_device.empty())
(*func_attr)["api_preferred_device"].set_s(preferred_device);
if (!forward_function_name.empty())
(*func_attr)["forward_function_name"].set_s(forward_function_name);
if (!backward_function_name.empty())
(*func_attr)["backward_function_name"].set_s(backward_function_name);
}
void PopulateSampleLibrary(const bool mismatch_args,
FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> func_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> func_wrong_args{{"in1", "int32"},
{"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU",
mismatch_args ? func_wrong_args : func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoThings", "DoThings", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("OneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("AnotherOneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
}
void PopulateComplexLibrary(FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> input_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
const std::vector<ArgSpec> output_with_state{
{"out", "float32"}, {"state1", "int32"}, {"state2", "int32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", input_args, output_args, "",
"DoStuffCpu_gradient", func_lib->add_function());
PopulateFunction("DoStuffCpu_gradient", "DoStuff", "CPU", output_args,
input_args, "DoStuffCpu", "", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU", input_args,
output_with_state, "", "DoStuffGpu_gradient",
func_lib->add_function());
PopulateFunction("DoStuffGpu_gradient", "DoStuff", "GPU", output_with_state,
input_args, "DoStuffGpu", "", func_lib->add_function());
}
bool CheckEquivImpl(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name,
const std::vector<string>& expected_other) {
std::vector<string> other_impl;
Status status =
lib_api_info.GetEquivalentImplementations(func_name, &other_impl);
EXPECT_EQ(status, absl::OkStatus());
const std::unordered_set<string> actual(other_impl.begin(), other_impl.end());
const std::unordered_set<string> expected(expected_other.begin(),
expected_other.end());
return actual == expected;
}
string GetInterfaceName(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->interface_name();
}
string GetPreferredDevice(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->preferred_device();
}
TEST(FunctionApiInfoTest, ParseTags) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( false, &func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoThings", GetInterfaceName(lib_api_info, "DoThings"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("", GetPreferredDevice(lib_api_info, "DoThings"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "OneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "AnotherOneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoThings", {}));
}
TEST(FunctionApiInfoTest, ComplexFunctionLib) {
FunctionDefLibrary func_lib;
PopulateComplexLibrary(&func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu_gradient",
{"DoStuffGpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu_gradient",
{"DoStuffCpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
}
TEST(FunctionApiInfoTest, MismatchedArguments) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( true, &func_lib);
FunctionLibraryApiInfo lib_api_info;
const Status ret = lib_api_info.Init(func_lib);
EXPECT_FALSE(ret.ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_api_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_api_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7fbb648b-bae7-4997-9456-1c16ed08c53c | cpp | tensorflow/tensorflow | dependency_optimizer | tensorflow/core/grappler/optimizers/dependency_optimizer.cc | tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include <unordered_set>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
bool RemoveControlInput(NodeDef* node, const string& control_input_to_remove,
NodeMap* node_map) {
for (int pos = node->input_size() - 1; pos >= 0; --pos) {
const string& input = node->input(pos);
if (input[0] != '^') break;
if (input == control_input_to_remove) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
node_map->RemoveOutput(NodeName(input), node->name());
return true;
}
}
return false;
}
}
bool DependencyOptimizer::SafeToRemoveIdentity(const NodeDef& node) const {
if (!IsIdentity(node) && !IsIdentityN(node)) {
return true;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (!fetch_nodes_known_) {
return false;
}
if (node.input_size() < 1) {
return false;
}
const NodeDef* input = node_map_->GetNode(NodeName(node.input(0)));
if (input == nullptr) {
VLOG(1) << "node = " << node.name() << " input = " << node.input(0);
return false;
}
if (IsVariable(*input) || IsRecv(*input)) {
return false;
}
for (const auto& consumer : node_map_->GetOutputs(node.name())) {
if (node.input_size() > 1 && (IsRetval(*consumer) || IsMerge(*consumer))) {
return false;
}
if (IsSwitch(*input)) {
for (const string& consumer_input : consumer->input()) {
if (consumer_input == AsControlDependency(node.name())) {
return false;
}
}
}
}
return true;
}
bool DependencyOptimizer::SafeToConvertToNoOp(const NodeDef& node) const {
if (HasRegularOutputs(node, *node_map_)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has outputs.";
return false;
}
if (!fetch_nodes_known_) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Fetches unknown.";
return false;
}
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
VLOG(3) << "Not safe to convert to NoOp: " << node.name()
<< " is in preserve set.";
return false;
}
if (IsMerge(node) || IsSwitch(node) || ModifiesFrameInfo(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node modifies frame info.";
return false;
}
static const absl::flat_hash_set<string>* gather_ops =
new absl::flat_hash_set<string>{"Gather", "GatherV2", "GatherNd",
"ResourceGather", "ResourceGatherNd"};
const bool is_variable_read =
IsReadVariableOp(node) || IsReadVariablesOp(node) ||
gather_ops->find(node.op()) != gather_ops->end();
if (!is_variable_read && !IsFreeOfSideEffect(node)) {
VLOG(3) << "Not safe to convert '" << node.name()
<< " to NoOp. Node has side effect.";
return false;
}
if (absl::StartsWith(node.op(), "Submodel")) {
return false;
}
const OpDef* op_def = nullptr;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok() || op_def->output_arg_size() == 0) {
return false;
}
const std::unordered_set<string> do_not_rewrite_ops{
"Assert", "CheckNumerics", "_Retval",
"_Arg", "_ParallelConcatUpdate", "TPUExecute",
"TPUCompile", "ControlTrigger"};
if (do_not_rewrite_ops.find(node.op()) != do_not_rewrite_ops.end()) {
return false;
}
if (!SafeToRemoveIdentity(node)) {
return false;
}
return true;
}
int DependencyOptimizer::NumEdgesIfBypassed(
const NodeDef& node, const std::vector<NodeDef*>& output_nodes) const {
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (is_multi_input_identity_n) {
int num_edges_if_bypassed(0);
for (const string& input_node_name : node.input()) {
if (IsControlInput(input_node_name)) {
num_edges_if_bypassed += num_outputs;
} else {
++num_edges_if_bypassed;
}
}
for (auto consumer : output_nodes) {
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId consumer_input = ParseTensorName(consumer->input(j));
if (consumer_input.node() == node.name()) {
if (IsControlInput(consumer_input)) {
num_edges_if_bypassed += num_inputs;
} else {
++num_edges_if_bypassed;
}
}
}
}
return num_edges_if_bypassed;
} else {
return num_inputs * num_outputs;
}
}
bool DependencyOptimizer::BypassingNodeIsBeneficial(
const NodeDef& node, const std::vector<NodeDef*>& input_nodes,
const std::vector<NodeDef*>& output_nodes) const {
const bool is_identity = IsIdentity(node) || IsIdentityNSingleInput(node);
const bool is_multi_input_identity_n =
IsIdentityN(node) && !IsIdentityNSingleInput(node);
const int num_outputs = output_nodes.size();
const int num_inputs = node.input_size();
if (NumEdgesIfBypassed(node, output_nodes) > num_inputs + num_outputs) {
return false;
}
if ((num_inputs == 1 && num_outputs > 1 &&
input_nodes[0]->device() != node.device()) ||
(num_inputs > 1 && num_outputs == 1 &&
output_nodes[0]->device() != node.device())) {
return false;
}
const string& node_dev = node.device();
int num_cross_in = 0;
for (NodeDef* input_node : input_nodes) {
num_cross_in += static_cast<int>(input_node->device() != node_dev);
}
int num_cross_out = 0;
for (NodeDef* output_node : output_nodes) {
num_cross_out += static_cast<int>(output_node->device() != node_dev);
}
const int num_cross_before = num_cross_in + num_cross_out;
int num_cross_after = 0;
for (NodeDef* input_node : input_nodes) {
for (NodeDef* output_node : output_nodes) {
num_cross_after +=
static_cast<int>(input_node->device() != output_node->device());
}
}
if (num_cross_after > num_cross_before) {
return false;
}
if ((is_identity || is_multi_input_identity_n) && num_cross_in > 0 &&
num_cross_out > 0 && num_cross_after > 0) {
return false;
}
return true;
}
void DependencyOptimizer::OptimizeNode(int node_idx,
SetVector<int>* nodes_to_simplify,
std::set<int>* nodes_to_delete) {
NodeDef* node = optimized_graph_->mutable_node(node_idx);
const bool is_noop = IsNoOp(*node);
const bool is_identity = IsIdentity(*node) || IsIdentityNSingleInput(*node);
const bool is_multi_input_identity =
IsIdentityN(*node) && !IsIdentityNSingleInput(*node);
const string node_name = node->name();
if (IsConstant(*node) && node->input_size() == 0) {
const auto output_nodes = node_map_->GetOutputs(node_name);
for (NodeDef* fanout : output_nodes) {
bool optimize_fanout = false;
bool data_connection = false;
for (int i = fanout->input_size() - 1; i >= 0; --i) {
const TensorId input_tensor = ParseTensorName(fanout->input(i));
if (input_tensor.node() == node_name) {
if (input_tensor.index() < 0) {
fanout->mutable_input()->SwapElements(i, fanout->input_size() - 1);
fanout->mutable_input()->RemoveLast();
optimize_fanout = true;
} else {
data_connection = true;
}
}
}
if (optimize_fanout) {
nodes_to_simplify->PushBack(node_to_idx_[fanout]);
if (!data_connection) {
node_map_->RemoveOutput(node_name, fanout->name());
}
}
}
if (node_map_->GetOutputs(node_name).empty() && fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_to_idx_[node]);
}
return;
}
if (!is_noop && SafeToConvertToNoOp(*node)) {
VLOG(2) << "***** Replacing " << node_name << " (" << node->op()
<< ") with NoOp.";
std::unordered_set<string> ctrl_inputs;
int pos = 0;
while (pos < node->input_size()) {
const string old_input = node->input(pos);
if (IsControlInput(old_input)) {
if (!ctrl_inputs.insert(old_input).second) {
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
} else {
++pos;
}
continue;
}
const string ctrl_input = ConstantFolding::AddControlDependency(
old_input, optimized_graph_, node_map_.get());
ctrl_inputs.insert(ctrl_input);
node->set_input(pos, ctrl_input);
node_map_->UpdateInput(node_name, old_input, ctrl_input);
const NodeDef* old_input_node = node_map_->GetNode(old_input);
nodes_to_simplify->PushBack(node_to_idx_[old_input_node]);
++pos;
}
ChangeToNoOp(node);
EraseRegularNodeAttributes(node);
DedupControlInputs(node);
nodes_to_simplify->PushBack(node_to_idx_[node]);
return;
}
if (is_noop || ((is_identity || is_multi_input_identity) &&
SafeToRemoveIdentity(*node))) {
const int num_inputs = node->input_size();
std::vector<NodeDef*> input_nodes;
for (int i = 0; i < num_inputs; ++i) {
NodeDef* input_node = node_map_->GetNode(node->input(i));
if (input_node == nullptr) {
LOG(ERROR) << "Invalid input " << node->input(i);
return;
}
input_nodes.push_back(input_node);
}
const auto& output_node_set = node_map_->GetOutputs(node_name);
const std::vector<NodeDef*> output_nodes(output_node_set.begin(),
output_node_set.end());
if (!BypassingNodeIsBeneficial(*node, input_nodes, output_nodes)) {
return;
}
VLOG(2) << "***** Rerouting input around\n" << node->DebugString();
for (auto consumer : output_nodes) {
bool updated_consumer = false;
VLOG(2) << "consumer before:\n" << consumer->DebugString();
for (int i = 0; i < num_inputs; ++i) {
const NodeDef* input = input_nodes[i];
if ((is_identity && i == 0) ||
(is_multi_input_identity && !IsControlInput(node->input(i)))) {
string new_input;
const string& input_to_forward = node->input(i);
CHECK(!IsControlInput(input_to_forward));
for (int j = 0; j < consumer->input_size(); ++j) {
const TensorId old_input = ParseTensorName(consumer->input(j));
if (old_input.node() == node_name) {
if (old_input.index() == i) {
new_input = input_to_forward;
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
} else if (old_input.index() == -1) {
new_input = AsControlDependency(NodeName(input_to_forward));
node_map_->UpdateInput(consumer->name(),
string(old_input.node()), new_input);
consumer->set_input(j, new_input);
}
}
}
updated_consumer = true;
} else {
if (node_map_->GetOutputs(input->name()).count(consumer) == 0) {
consumer->add_input(AsControlDependency(input->name()));
node_map_->AddOutput(input->name(), consumer->name());
nodes_to_simplify->PushBack(node_to_idx_[input]);
updated_consumer = true;
}
}
}
updated_consumer |= RemoveControlInput(
consumer, AsControlDependency(node_name), node_map_.get());
if (updated_consumer) {
nodes_to_simplify->PushBack(node_to_idx_[consumer]);
}
VLOG(2) << "consumer after:\n" << consumer->DebugString();
}
node_map_->RemoveOutputs(node_name);
if (fetch_nodes_known_ &&
nodes_to_preserve_.find(node_name) == nodes_to_preserve_.end()) {
nodes_to_delete->insert(node_idx);
node_map_->RemoveInputs(node_name);
node->clear_input();
}
}
}
void DependencyOptimizer::CleanControlInputs() {
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
DedupControlInputs(optimized_graph_->mutable_node(i));
}
}
Status DependencyOptimizer::OptimizeDependencies() {
SetVector<int> nodes_to_simplify;
std::set<int> nodes_to_delete;
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
if (IsNoOp(node) || IsIdentity(node) || IsIdentityN(node) ||
IsConstant(node) || SafeToConvertToNoOp(node)) {
nodes_to_simplify.PushBack(i);
}
}
while (!nodes_to_simplify.Empty()) {
int node_to_simplify = nodes_to_simplify.PopBack();
while (nodes_to_delete.find(node_to_simplify) != nodes_to_delete.end()) {
node_to_simplify = nodes_to_simplify.PopBack();
}
OptimizeNode(node_to_simplify, &nodes_to_simplify, &nodes_to_delete);
}
if (fetch_nodes_known_) {
VLOG(1) << "Deleted " << nodes_to_delete.size() << " out of "
<< optimized_graph_->node_size() << " nodes.";
EraseNodesFromGraph(nodes_to_delete, optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
}
return absl::OkStatus();
}
namespace {
enum DistanceFromSource : uint8 { ZERO = 0, ONE = 1, TWO_OR_GREATER = 2 };
void LongestPathsLowerBounds(
int source, const std::pair<int, int>& target_range,
const std::vector<std::vector<int>>& outputs,
std::vector<DistanceFromSource>* longest_distance) {
std::deque<int> queue;
queue.emplace_front(source);
while (!queue.empty()) {
int node = queue.front();
queue.pop_front();
for (int fanout : outputs[node]) {
if (fanout >= target_range.first && fanout <= target_range.second &&
(*longest_distance)[fanout] != TWO_OR_GREATER) {
(*longest_distance)[fanout] =
(*longest_distance)[fanout] == ZERO ? ONE : TWO_OR_GREATER;
queue.emplace_front(fanout);
}
}
}
}
}
Status DependencyOptimizer::TransitiveReduction() {
const int num_nodes = optimized_graph_->node_size();
int num_controls = 0;
std::vector<std::vector<int>> outputs(num_nodes);
std::vector<absl::InlinedVector<std::pair<int, int>, 2UL>> control_outputs(
num_nodes);
std::vector<std::pair<int, int>> target_range(num_nodes, {num_nodes, -1});
for (int node_idx = 0; node_idx < num_nodes; ++node_idx) {
const NodeDef& node = optimized_graph_->node(node_idx);
if (ModifiesFrameInfo(node) || !HasOpDef(node)) {
continue;
}
for (int input_slot = 0; input_slot < node.input_size(); ++input_slot) {
const string& input = node.input(input_slot);
const NodeDef* input_node = node_map_->GetNode(input);
if (ModifiesFrameInfo(*input_node) || IsMerge(*input_node)) {
continue;
}
const int input_node_idx = node_to_idx_[input_node];
outputs[input_node_idx].push_back(node_idx);
target_range[input_node_idx].first =
std::min(target_range[input_node_idx].first, node_idx);
if (IsControlInput(input)) {
++num_controls;
control_outputs[input_node_idx].emplace_back(node_idx, input_slot);
target_range[input_node_idx].second =
std::max(target_range[input_node_idx].second, node_idx);
}
}
}
int num_controls_removed = 0;
std::vector<DistanceFromSource> longest_distance(num_nodes);
typedef std::pair<int, int> InputSlotAndSource;
absl::flat_hash_map<
int, std::set<InputSlotAndSource, std::greater<InputSlotAndSource>>>
control_edges_to_remove;
for (int source = 0; source < num_nodes; ++source) {
if (target_range[source].first >= target_range[source].second ||
target_range[source].second <= source) {
continue;
}
std::fill(longest_distance.begin() + target_range[source].first,
longest_distance.begin() + target_range[source].second + 1, ZERO);
LongestPathsLowerBounds(source, target_range[source], outputs,
&longest_distance);
for (const auto& control_output : control_outputs[source]) {
const int target = control_output.first;
if (longest_distance[target] == TWO_OR_GREATER) {
const int input_slot = control_output.second;
control_edges_to_remove[target].emplace(input_slot, source);
}
}
}
for (const auto& it : control_edges_to_remove) {
const int target = it.first;
NodeDef* target_node = optimized_graph_->mutable_node(target);
for (const InputSlotAndSource& slot_and_source : it.second) {
const int input_slot = slot_and_source.first;
const int source = slot_and_source.second;
const NodeDef& source_node = optimized_graph_->node(source);
CHECK_LT(input_slot, target_node->input_size());
target_node->mutable_input()->SwapElements(input_slot,
target_node->input_size() - 1);
node_map_->RemoveOutput(source_node.name(), target_node->name());
target_node->mutable_input()->RemoveLast();
++num_controls_removed;
}
}
VLOG(1) << "Removed " << num_controls_removed << " out of " << num_controls
<< " control dependencies";
return absl::OkStatus();
}
void DependencyOptimizer::BuildNodeToIdx() {
node_to_idx_.clear();
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
const NodeDef& node = optimized_graph_->node(i);
node_to_idx_[&node] = i;
}
}
void DependencyOptimizer::GroupCrossDeviceControlEdges(bool host_granularity) {
VLOG(1)
<< "DependencyOptimizer::GroupCrossDeviceControlEdges host_granularity="
<< host_granularity;
const int num_nodes = optimized_graph_->node_size();
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = optimized_graph_->mutable_node(i);
if (node->device().empty()) continue;
string rest, node_device = node->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(node->device(), &node_device, &rest);
}
std::map<string, NodeDef*> noops;
int num_noops = 0;
for (int j = 0; j < node->input_size(); ++j) {
if (IsControlInput(node->input(j))) {
const NodeDef* input = node_map_->GetNode(node->input(j));
if (input == nullptr || input->device().empty()) continue;
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
if (input_device != node_device) {
VLOG(2) << "Cross-device " << node->name() << " " << input->device()
<< " -> " << node->device();
auto emplace_result = noops.emplace(input_device, nullptr);
if (!emplace_result.second &&
emplace_result.first->second == nullptr) {
VLOG(2) << "Duplicate input device from " << node->name();
string group_name;
NodeDef* noop;
do {
group_name = AddPrefixToNodeName(
node->name(),
strings::StrCat("GroupCrossDeviceControlEdges_", num_noops));
noop = node_map_->GetNode(group_name);
++num_noops;
} while (noop != nullptr);
noop = optimized_graph_->add_node();
noop->set_name(group_name);
noop->set_device(input->device());
noop->set_op("NoOp");
node_map_->AddNode(noop->name(), noop);
emplace_result.first->second = noop;
VLOG(1) << "GroupCrossDeviceControlEdges: Added "
<< SummarizeNodeDef(*noop);
}
}
}
}
int pos = 0;
while (pos < node->input_size()) {
const string& input_name = node->input(pos);
if (IsControlInput(input_name)) {
NodeDef* input = node_map_->GetNode(input_name);
if (input == nullptr) {
++pos;
} else {
string input_device = input->device();
if (host_granularity) {
DeviceNameUtils::SplitDeviceName(input->device(), &input_device,
&rest);
}
auto it = noops.find(input_device);
if (it == noops.end() || it->second == nullptr) {
++pos;
} else {
VLOG(2) << "Rewriting input from " << input_name;
node->mutable_input()->SwapElements(pos, node->input_size() - 1);
node->mutable_input()->RemoveLast();
it->second->add_input(AsControlDependency(*input));
node_map_->UpdateOutput(input_name, node->name(),
it->second->name());
}
}
} else {
++pos;
}
}
for (const auto& entry : noops) {
if (entry.second) {
node->add_input(AsControlDependency(*entry.second));
node_map_->AddOutput(entry.second->name(), node->name());
}
}
}
}
Status DependencyOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
optimized_graph_ = optimized_graph;
*optimized_graph_ = item.graph;
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
CleanControlInputs();
const int num_iterations = 2;
for (int iteration = 0; iteration < num_iterations; ++iteration) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
Status topo_sort_status;
topo_sort_status = TopologicalSort(optimized_graph_);
node_map_.reset(new NodeMap(optimized_graph_));
BuildNodeToIdx();
if (topo_sort_status.ok()) {
TF_RETURN_IF_ERROR(TransitiveReduction());
} else {
LOG(ERROR) << "Iteration = " << iteration
<< ", topological sort failed with message: "
<< topo_sort_status.message();
}
TF_RETURN_IF_ERROR(OptimizeDependencies());
CleanControlInputs();
GroupCrossDeviceControlEdges(false);
GroupCrossDeviceControlEdges(true);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/dependency_optimizer.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DependencyOptimizerTest : public GrapplerTest {};
void VerifyGraphsEqual(const GraphDef& original_graph,
const GraphDef& optimized_graph, const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
TEST_F(DependencyOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, DependenciesDrivenByConstants) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output z = ops::Const(s.WithOpName("z"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(x), add);
Output id2 = ops::Identity(
s.WithOpName("id2").WithControlDependencies(y).WithControlDependencies(z),
add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(5, output.node_size());
for (const NodeDef& node : item.graph.node()) {
if (node.name() == "id1" || node.name() == "id2") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("add", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
}
EXPECT_EQ(2, found);
}
TEST_F(DependencyOptimizerTest, FullTypeForKeptNoop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
Output id3 =
ops::Identity(s.WithOpName("id3").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("id1");
item.fetch.push_back("id2");
item.fetch.push_back("id3");
for (int i = 0; i < item.graph.node_size(); ++i) {
NodeDef* node = item.graph.mutable_node(i);
if (node->name() == "add") {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args()->set_type_id(TFT_FLOAT);
*node->mutable_experimental_type() = t;
break;
}
}
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id2") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "id3") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^add", node.input(1));
++found;
} else if (node.name() == "add") {
EXPECT_EQ(node.op(), "NoOp");
FullTypeDef t = node.experimental_type();
EXPECT_TRUE((t.type_id() == TFT_UNSET) ||
((t.type_id() == TFT_PRODUCT) && (t.args_size() == 0)));
++found;
}
}
EXPECT_EQ(4, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_RepeatedInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id1"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
int found = 0;
for (int i = 0; i < item.graph.node_size(); ++i) {
const NodeDef& node = item.graph.node(i);
EXPECT_NE("add", node.name());
if (node.name() == "id1") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
}
EXPECT_EQ(1, found);
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_SwitchIdentity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(scope.WithOpName("switch"), v_in, v_ctrl);
Output neg = ops::Neg(scope.WithOpName("neg"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1").WithControlDependencies(neg),
{1.0f, 2.0f}, {1, 2});
Output ctrl_dep_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c2 =
ops::Const(scope.WithOpName("c2").WithControlDependencies(ctrl_dep_id),
{1.0f, 2.0f}, {1, 2});
Output neg1 = ops::Neg(scope.WithOpName("neg1"), s.output_false);
Output neg2 = ops::Neg(scope.WithOpName("neg2"), ctrl_dep_id);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("c2");
item.fetch.push_back("neg1");
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("neg", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_NoFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output add = ops::Add(s.WithOpName("add"), x, y);
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x);
Output id2 =
ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_EmptyInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s, {1, 2}, DT_FLOAT);
auto noop1 = ops::NoOp(s);
auto noop2 = ops::NoOp(s.WithControlDependencies(x));
Output id = ops::Identity(s.WithControlDependencies({noop1.operation}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("RandomUniform", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x).WithDevice("/CPU:1"));
auto noop_1 = ops::NoOp(
s.WithControlDependencies(x).WithControlDependencies(y).WithDevice(
"/CPU:0"));
Output id = ops::Identity(
s.WithControlDependencies({noop.operation}).WithDevice("/CPU:1"), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation})
.WithDevice("/CPU:1"),
y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_DeviceBoundaries) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithDevice("/CPU:0"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:1"), id_b);
Output id_1 = ops::Identity(s.WithDevice("/CPU:1"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
TF_CHECK_OK(TopologicalSort(&item.graph));
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, RemoveIdentityOps_IdenticalDevices) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x").WithDevice("/CPU:0"), {1, 2},
DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a").WithDevice("/CPU:1"), x);
Output id =
ops::Identity(s.WithControlDependencies(id_a).WithDevice("/CPU:0"), id_a);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "id_a");
if (node.name() == "Identity") {
EXPECT_EQ(node.input(0), "x");
}
}
}
TEST_F(DependencyOptimizerTest, RemoveNoOps_SingleInputOrOutput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
auto noop = ops::NoOp(s.WithControlDependencies(x));
auto noop_1 =
ops::NoOp(s.WithControlDependencies(x).WithControlDependencies(y));
Output id = ops::Identity(s.WithControlDependencies({noop.operation}), x);
Output id_1 = ops::Identity(
s.WithControlDependencies({noop.operation, noop_1.operation}), y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("Identity");
item.fetch.push_back("Identity_1");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
item.graph.Swap(&output);
status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
for (const NodeDef& node : output.node()) {
if (node.name() == "NoOp" || node.name() == "NoOp_1") {
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Identity") {
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Identity_1") {
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("^x", node.input(1));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveIdentity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::RandomUniform(s.WithOpName("x"), {1, 2}, DT_FLOAT);
Output y = ops::RandomUniform(s.WithOpName("y"), {1, 2}, DT_FLOAT);
Output z = ops::RandomUniform(s.WithOpName("z"), {1, 2}, DT_FLOAT);
auto id_a = ops::Identity(s.WithOpName("id_a"), x);
auto id_b = ops::Identity(
s.WithOpName("id_b").WithControlDependencies(y).WithControlDependencies(
z),
x);
auto id_c = ops::Identity(s.WithOpName("id_c").WithControlDependencies(y), x);
Output a_a = ops::Identity(s.WithOpName("a_a"), id_a);
Output a_b = ops::Identity(s.WithOpName("a_b"), id_a);
Output a_c =
ops::Identity(s.WithOpName("a_c").WithControlDependencies(id_a), z);
Output a_d =
ops::Identity(s.WithOpName("a_d").WithControlDependencies(id_a), z);
Output b_a = ops::Identity(s.WithOpName("b_a"), id_b);
Output c_a = ops::Identity(s.WithOpName("c_a"), id_c);
Output c_b =
ops::Identity(s.WithOpName("c_b").WithControlDependencies(id_c), z);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"a_a", "a_b", "a_c", "a_d", "b_a", "c_a", "c_b"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 3, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id_a", node.name());
EXPECT_NE("id_b", node.name());
EXPECT_NE("id_c", node.name());
if (node.name() == "a_a" || node.name() == "a_b") {
ASSERT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
++found;
}
if (node.name() == "a_c" || node.name() == "a_d") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
++found;
}
if (node.name() == "b_a") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
EXPECT_EQ("^z", node.input(2));
++found;
}
if (node.name() == "c_a") {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^y", node.input(1));
++found;
}
if (node.name() == "c_b") {
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("z", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
++found;
}
}
EXPECT_EQ(found, 7);
}
TEST_F(DependencyOptimizerTest, RemoveIdentity_RepeatedInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable x(scope.WithOpName("x"), {}, DT_BOOL);
ops::Variable y(scope.WithOpName("y"), {}, DT_BOOL);
ops::Switch sw(scope.WithOpName("switch"), x, x);
Output id0 = ops::Identity(scope.WithOpName("id0"), sw.output_true);
Output id1 = ops::Identity(scope.WithOpName("id1"), sw.output_false);
Output or0 = ops::LogicalOr(scope.WithOpName("or0"), id0, id0);
Output or1 = ops::LogicalOr(scope.WithOpName("or1"), id0, y);
Output or2 = ops::LogicalOr(
scope.WithOpName("or2").WithControlDependencies(id1), y, y);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("or0");
item.fetch.push_back("or1");
item.fetch.push_back("or2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE("id0", node.name());
if (node.name() == "or0") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("switch:1", node.input(1));
++found;
}
if (node.name() == "or1") {
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("switch:1", node.input(0));
EXPECT_EQ("y", node.input(1));
++found;
}
if (node.name() == "or2") {
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^id1", node.input(2));
++found;
}
}
EXPECT_EQ(found, 3);
}
TEST_F(DependencyOptimizerTest, Transitive_Reduction_Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Square(s.WithOpName("x"), c);
Output neg1 = ops::Neg(s.WithOpName("neg1"), x);
Output neg2 =
ops::Neg(s.WithOpName("neg2").WithControlDependencies({x}), neg1);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("neg2");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(4, output.node_size());
EXPECT_EQ("neg2", output.node(3).name());
EXPECT_EQ(1, output.node(3).input_size());
EXPECT_EQ("neg1", output.node(3).input(0));
}
TEST_F(DependencyOptimizerTest, ChangeToNoop_Identity) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
Output id_after_var = ops::Identity(scope.WithOpName("id_after_var"), v_in);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(
scope.WithOpName("switch").WithControlDependencies(id_after_var), v_in,
v_ctrl);
Output id0 = ops::Identity(scope.WithOpName("id0"), s.output_true);
Output grappler_added_id = ops::Identity(
scope.WithOpName("ConstantFoldingCtrl/switch_1"), s.output_true);
Output c1 = ops::Const(scope.WithOpName("c1")
.WithControlDependencies(id_after_var)
.WithControlDependencies(grappler_added_id),
{1.0f, 2.0f}, {1, 2});
Output id1 = ops::Identity(scope.WithOpName("id1"), c1);
Output id2 = ops::Identity(scope.WithOpName("id2"), id0);
Output fetch =
ops::Identity(scope.WithOpName("fetch").WithControlDependencies(id1), c1);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch.push_back("c1");
item.fetch.push_back("id2");
item.fetch.push_back("fetch");
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size() - 2, output.node_size());
bool found = false;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
EXPECT_NE("id0", node.name());
EXPECT_NE("id1", node.name());
if (node.name() == "c1") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^ConstantFoldingCtrl/switch_1", node.input(0));
found = true;
}
}
EXPECT_TRUE(found);
}
TEST_F(DependencyOptimizerTest, IdentityInputs) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::Identity(scope.WithOpName("id_f"), s.output_false);
auto id_t = ops::Identity(scope.WithOpName("id_t"), s.output_true);
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(6, output.node_size());
EXPECT_EQ("out1", output.node(4).name());
EXPECT_EQ(1, output.node(4).input_size());
EXPECT_EQ("s", output.node(4).input(0));
EXPECT_EQ("out2", output.node(5).name());
EXPECT_EQ(1, output.node(5).input_size());
EXPECT_EQ("s:1", output.node(5).input(0));
}
TEST_F(DependencyOptimizerTest, RemoveIdentityN_SwitchInput) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output b = ops::Placeholder(scope.WithOpName("b"), DT_BOOL);
Output x = ops::RandomUniform(scope.WithOpName("x"), {1, 2}, DT_FLOAT);
auto s = ops::Switch(scope.WithOpName("s"), x, b);
auto id_f = ops::IdentityN(scope.WithOpName("id_f"), {s.output_false});
auto id_t = ops::IdentityN(scope.WithOpName("id_t"), {s.output_true});
auto id_b =
ops::IdentityN(scope.WithOpName("id_b"), {s.output_false, s.output_true});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_f[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_t[0]);
Output out3 = ops::Identity(scope.WithOpName("out3"), id_b[0]);
Output out4 = ops::Identity(scope.WithOpName("out4"), id_b[1]);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3", "out4"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(8, output.node_size());
auto out1_node = output.node(7);
EXPECT_EQ("out1", out1_node.name());
EXPECT_EQ(1, out1_node.input_size());
EXPECT_EQ("s", out1_node.input(0));
auto out2_node = output.node(4);
EXPECT_EQ("out2", out2_node.name());
EXPECT_EQ(1, out2_node.input_size());
EXPECT_EQ("s:1", out2_node.input(0));
auto out3_node = output.node(5);
EXPECT_EQ("out3", out3_node.name());
EXPECT_EQ(1, out3_node.input_size());
EXPECT_EQ("s", out3_node.input(0));
auto out4_node = output.node(6);
EXPECT_EQ("out4", out4_node.name());
EXPECT_EQ(1, out4_node.input_size());
EXPECT_EQ("s:1", out4_node.input(0));
}
TEST_F(DependencyOptimizerTest, DoNotRemoveIdentityNWithControlDependency) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output input1 = ops::Placeholder(scope.WithOpName("input1"), DT_BOOL);
Output input2 = ops::Const(scope.WithOpName("input2"), {1, 2});
auto id_n = ops::IdentityN(scope.WithOpName("id_n"), {input1, input2});
Output out1 = ops::Identity(scope.WithOpName("out1"), id_n[0]);
Output out2 = ops::Identity(scope.WithOpName("out2"), id_n[1]);
auto out3 =
ops::NoOp(scope.WithOpName("out3").WithControlDependencies(id_n[1]));
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
item.fetch = {"out1", "out2", "out3"};
DependencyOptimizer optimizer;
GraphDef optimized_graph_def;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph_def);
TF_EXPECT_OK(status);
EXPECT_EQ(6, optimized_graph_def.node_size());
}
TEST_F(DependencyOptimizerTest,
Identity_DeviceCrossing_ConsumerOnDifferentDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_3 =
ops::Const(s.WithOpName("one_on_3").WithDevice("/gpu:3"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:3"), x_on_2, one_on_3);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(DependencyOptimizerTest, Identity_DeviceCrossing_ConsumerOnSameDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x_on_1 =
ops::Const(s.WithOpName("x_on_1").WithDevice("/gpu:1"), {1.0f}, {});
Output one_on_2 =
ops::Const(s.WithOpName("one_on_2").WithDevice("/gpu:2"), {1.0f}, {});
Output x_on_2 =
ops::Identity(s.WithOpName("x_on_2").WithDevice("/gpu:2"), x_on_1);
Output result =
ops::Add(s.WithOpName("result").WithDevice("/gpu:2"), x_on_2, one_on_2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"result"};
DependencyOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(3, output.node_size());
for (const auto& node : output.node()) {
EXPECT_NE("x_on_2", node.name());
if (node.name() == "result") {
EXPECT_EQ("x_on_1", node.input(0));
}
}
}
TEST_F(DependencyOptimizerTest, RemoveGreaterEqualWithNoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto greaterequal = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto noop =
ops::NoOp(s.WithOpName("NoOp").WithControlDependencies(greaterequal));
Output add = ops::Add(
s.WithOpName("z").WithControlDependencies({noop.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DependencyOptimizer optimizer;
GraphDef output;
item.fetch.push_back("z");
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "GreaterEqual") {
count++;
} else if (node.name() == "NoOp") {
count++;
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
}
}
EXPECT_EQ(3, count);
}
TEST_F(DependencyOptimizerTest, GroupCrossDeviceControlDeps) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output b = ops::RandomUniform(s.WithOpName("b").WithDevice("/CPU:2"),
{1, 2}, DT_FLOAT);
Output c = ops::RandomUniform(s.WithOpName("c").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output d = ops::RandomUniform(s.WithOpName("d").WithDevice("/CPU:3"),
{1, 2}, DT_FLOAT);
Output e = ops::RandomUniform(s.WithOpName("e").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
auto fetch = ops::Identity(
s.WithOpName("f")
.WithControlDependencies({a.op(), b.op(), c.op(), d.op()})
.WithDevice("/GPU:0"),
{e});
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("f");
}
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output b = ops::RandomUniform(s.WithOpName("b").WithDevice("/CPU:2"),
{1, 2}, DT_FLOAT);
Output c = ops::RandomUniform(s.WithOpName("c").WithDevice("/CPU:1"),
{1, 2}, DT_FLOAT);
Output d = ops::RandomUniform(s.WithOpName("d").WithDevice("/CPU:3"),
{1, 2}, DT_FLOAT);
Output e = ops::RandomUniform(s.WithOpName("e").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
auto noop = ops::NoOp(s.WithOpName("GroupCrossDeviceControlEdges_0/f")
.WithDevice("/CPU:1")
.WithControlDependencies({a.op(), c.op()}));
auto fetch =
ops::Identity(s.WithOpName("f")
.WithControlDependencies({b.op(), d.op(), noop})
.WithDevice("/GPU:0"),
{e});
TF_CHECK_OK(s.ToGraphDef(&expected));
}
DependencyOptimizer optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
CompareGraphs(expected, output);
item.graph.Swap(&output);
output.Clear();
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
CompareGraphs(expected, output);
}
TEST_F(DependencyOptimizerTest, GroupCrossHostControlDeps) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
std::vector<Operation> ops;
Output a = ops::RandomUniform(s.WithOpName("a").WithDevice("/CPU:0"),
{1, 2}, DT_FLOAT);
for (int t = 0; t < 4; ++t) {
for (int c = 0; c < 8; ++c) {
string opname = absl::StrCat("t", t, "/c", c);
string device = absl::StrCat("/task:", t, "/device:TPU:", c);
Output output = ops::RandomUniform(
s.WithOpName(opname).WithDevice(device), {1, 2}, DT_FLOAT);
ops.push_back(output.op());
}
}
auto fetch = ops::Identity(
s.WithOpName("f").WithControlDependencies(ops).WithDevice("/CPU:0"),
{a});
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch.push_back("f");
}
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_CHECK_OK(s.ToGraphDef(&expected));
}
DependencyOptimizer optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
std::set<string> tasks;
for (const auto& n : output.node()) {
if (n.op() == "NoOp") {
EXPECT_TRUE(absl::StartsWith(n.name(), "GroupCrossDeviceControlEdges"));
EXPECT_EQ(n.input_size(), 8);
tasks.insert(n.device());
}
if (n.name() == "f") {
EXPECT_EQ(n.input_size(), 5);
for (const auto& i : n.input()) {
EXPECT_TRUE(i == "a" ||
absl::StartsWith(i, "^GroupCrossDeviceControlEdges"));
}
}
}
EXPECT_EQ(tasks.size(), 4);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/dependency_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
63260427-ab90-4995-b751-c43123973239 | cpp | tensorflow/tensorflow | model_pruner | tensorflow/core/grappler/optimizers/model_pruner.cc | tensorflow/core/grappler/optimizers/model_pruner_test.cc | #include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include <unordered_set>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
namespace tensorflow {
namespace grappler {
namespace {
bool IsTrivialIdentity(const NodeDef& node, const GraphView& graph_view) {
for (const auto input :
graph_view.GetFanins(node, true)) {
if (input.port_id == Graph::kControlSlot) {
return false;
} else if (IsSwitch(*input.node)) {
return false;
}
}
for (const auto output :
graph_view.GetFanouts(node, true)) {
if (output.port_id == Graph::kControlSlot) {
return false;
} else if (IsMerge(*output.node)) {
return false;
}
}
return true;
}
bool IsTrivialOp(const NodeDef& node, const GraphView& graph_view) {
if (IsStopGradient(node)) {
return true;
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
return IsTrivialIdentity(node, graph_view);
}
if (IsNoOp(node) && node.input().empty()) {
return true;
}
if (IsConstant(node) && node.input().empty() &&
graph_view.NumFanouts(node, false) == 0) {
return true;
}
return IsAddN(node) && NumNonControlInputs(node) <= 1;
}
bool RemovalIncreasesEdgeCount(const NodeDef& node,
const GraphView& graph_view) {
int in_degree =
graph_view.NumFanins(node, true);
int out_degree =
graph_view.NumFanouts(node, true);
return in_degree * out_degree > in_degree + out_degree;
}
bool IsOutputPortRefValue(const NodeDef& node, int port_id,
const OpRegistryInterface& op_registry) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = op_registry.LookUp(node.op(), &op_reg_data);
if (s.ok()) {
DataType output_type;
s = OutputTypeForNode(node, op_reg_data->op_def, port_id, &output_type);
if (s.ok() && IsRefType(output_type)) {
return true;
}
}
return false;
}
bool CanRemoveNode(const NodeDef& node, const GraphView& graph_view,
const absl::flat_hash_set<string>& function_names,
const OpRegistryInterface& op_registry) {
if (IsNoOp(node) &&
(node.input().empty() ||
graph_view.NumFanouts(node, true) == 0)) {
return true;
}
if (IsConstant(node) && node.input().empty() &&
graph_view.NumFanouts(node, false) == 0) {
return true;
}
if (RemovalIncreasesEdgeCount(node, graph_view)) {
return false;
}
for (const auto input :
graph_view.GetFanins(node, true)) {
if (node.device() != input.node->device()) {
return false;
} else if (input.port_id == Graph::kControlSlot) {
continue;
} else if (function_names.find(input.node->op()) != function_names.end()) {
return false;
} else if (IsOutputPortRefValue(*input.node, input.port_id, op_registry)) {
return false;
}
}
for (const auto output :
graph_view.GetFanouts(node, false)) {
if (function_names.find(output.node->op()) != function_names.end()) {
return false;
}
}
return true;
}
void ForwardInputsInternal(
const NodeDef& node,
const absl::flat_hash_set<const NodeDef*>& nodes_to_delete,
bool add_as_control, NodeDef* new_node,
const absl::flat_hash_map<string, const NodeDef*>& optimized_nodes,
const GraphView& graph_view) {
auto itr = optimized_nodes.find(node.name());
if (itr != optimized_nodes.end()) {
for (const string& input : itr->second->input()) {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
}
return;
}
for (const auto& input : node.input()) {
const NodeDef* input_node = graph_view.GetNode(NodeName(input));
if (input_node == nullptr) {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
continue;
}
if (nodes_to_delete.find(input_node) != nodes_to_delete.end()) {
ForwardInputsInternal(*input_node, nodes_to_delete,
add_as_control || IsControlInput(input), new_node,
optimized_nodes, graph_view);
} else {
*new_node->add_input() =
add_as_control ? AsControlDependency(NodeName(input)) : input;
}
}
}
void ForwardInputs(const NodeDef& original_node,
const absl::flat_hash_set<const NodeDef*>& nodes_to_delete,
NodeDef* new_node,
absl::flat_hash_map<string, const NodeDef*>* optimized_nodes,
const GraphView& graph_view) {
ForwardInputsInternal(original_node, nodes_to_delete,
false, new_node, *optimized_nodes,
graph_view);
if (!new_node->name().empty()) {
(*optimized_nodes)[new_node->name()] = new_node;
}
int pos = 0;
for (int i = 0; i < new_node->input_size(); ++i) {
if (!IsControlInput(new_node->input(i))) {
new_node->mutable_input()->SwapElements(pos, i);
++pos;
}
}
DedupControlInputs(new_node);
}
absl::flat_hash_map<string, absl::flat_hash_set<int>> IdentityNTerminalPorts(
const NodeMap& node_map, const std::vector<string>& terminal_nodes,
int graph_size) {
std::vector<string> to_visit;
to_visit.reserve(graph_size);
absl::flat_hash_set<string> visited(terminal_nodes.begin(),
terminal_nodes.end());
for (const string& terminal_node : terminal_nodes) {
NodeDef* node = node_map.GetNode(terminal_node);
if (node == nullptr) {
continue;
}
for (const string& input : node->input()) {
to_visit.push_back(input);
}
}
absl::flat_hash_set<string> identity_n_fanouts;
while (!to_visit.empty()) {
string curr = to_visit.back();
to_visit.pop_back();
NodeDef* curr_node = node_map.GetNode(curr);
if (curr_node == nullptr ||
visited.find(curr_node->name()) != visited.end()) {
continue;
}
if (IsIdentityN(*curr_node)) {
if (identity_n_fanouts.find(curr) == identity_n_fanouts.end()) {
identity_n_fanouts.emplace(curr);
int pos = NodePositionIfSameNode(curr, curr_node->name());
if (pos >= 0) {
to_visit.push_back(curr_node->input(pos));
}
for (const string& input : curr_node->input()) {
if (IsControlInput(input) &&
identity_n_fanouts.find(input) == identity_n_fanouts.end()) {
to_visit.push_back(input);
}
}
}
} else {
for (const string& input : curr_node->input()) {
to_visit.push_back(input);
}
visited.emplace(curr_node->name());
}
}
absl::flat_hash_map<string, absl::flat_hash_set<int>> identity_n_ports;
for (const auto& fanout : identity_n_fanouts) {
int pos;
string node_name = ParseNodeName(fanout, &pos);
if (node_name.empty() || pos < 0) {
continue;
}
if (identity_n_ports.find(node_name) == identity_n_ports.end()) {
identity_n_ports[node_name] = {pos};
} else {
identity_n_ports[node_name].emplace(pos);
}
}
return identity_n_ports;
}
string NewIdentityFromIdentityN(int pos, const NodeDef& identity_n,
GraphDef* graph, NodeMap* node_map) {
string new_node_name =
strings::StrCat(identity_n.name(), "-", pos, "-grappler-ModelPruner");
if (node_map->NodeExists(new_node_name)) {
return "";
}
NodeDef* new_node = graph->add_node();
Status status = NodeDefBuilder(new_node_name, "Identity")
.Input(identity_n.input(pos), 0,
identity_n.attr().at("T").list().type(pos))
.Device(identity_n.device())
.Finalize(new_node);
if (!status.ok()) {
return "";
}
node_map->AddNode(new_node->name(), new_node);
node_map->AddOutput(NodeName(new_node->input(0)), new_node->name());
return new_node->name();
}
Status RewriteIdentityNAndInputsOutputs(
NodeDef* node, int num_non_control_inputs,
const absl::flat_hash_set<int>& terminal_ports, GraphDef* graph,
NodeMap* node_map) {
struct NodeOutputUpdate {
string input;
string output;
};
absl::flat_hash_map<int, int> terminal_input_pos;
absl::flat_hash_map<int, string> new_identities;
int new_idx = 0;
for (int i = 0; i < num_non_control_inputs; i++) {
if (terminal_ports.find(i) != terminal_ports.end()) {
terminal_input_pos[i] = new_idx++;
} else {
string identity = NewIdentityFromIdentityN(i, *node, graph, node_map);
if (identity.empty()) {
return errors::Internal(
"Could not create Identity node from IdentityN node ", node->name(),
" at port ", i);
}
new_identities[i] = identity;
}
}
std::vector<NodeOutputUpdate> updates;
for (NodeDef* output : node_map->GetOutputs(node->name())) {
for (int i = 0; i < output->input_size(); i++) {
string input = output->input(i);
if (IsControlInput(input)) {
continue;
}
TensorId input_tensor = ParseTensorName(input);
if (input_tensor.node() == node->name()) {
if (terminal_ports.find(input_tensor.index()) == terminal_ports.end()) {
string new_identity = new_identities[input_tensor.index()];
output->set_input(i, new_identity);
updates.push_back({new_identity, output->name()});
} else {
int new_pos = terminal_input_pos[input_tensor.index()];
string updated_input_name =
new_pos > 0 ? strings::StrCat(node->name(), ":", new_pos)
: node->name();
output->set_input(i, updated_input_name);
}
}
}
}
for (const NodeOutputUpdate& update : updates) {
node_map->AddOutput(update.input, update.output);
}
const int num_inputs = node->input_size();
int curr_pos = 0;
auto mutable_inputs = node->mutable_input();
auto mutable_types =
node->mutable_attr()->at("T").mutable_list()->mutable_type();
for (int i = 0; i < num_non_control_inputs; i++) {
if (terminal_input_pos.find(i) != terminal_input_pos.end()) {
mutable_inputs->SwapElements(i, curr_pos);
mutable_types->SwapElements(i, curr_pos);
curr_pos++;
}
}
mutable_types->Truncate(curr_pos);
for (int i = num_non_control_inputs; i < num_inputs; i++) {
mutable_inputs->SwapElements(i, curr_pos++);
}
mutable_inputs->DeleteSubrange(curr_pos, num_inputs - curr_pos);
return absl::OkStatus();
}
Status SplitIdentityNInputs(GraphDef* graph,
const std::vector<string>& terminal_nodes,
bool* updated_graph) {
NodeMap node_map(graph);
for (auto const& terminal :
IdentityNTerminalPorts(node_map, terminal_nodes, graph->node_size())) {
NodeDef* node = node_map.GetNode(terminal.first);
if (node == nullptr) {
continue;
}
const int num_non_control_inputs = NumNonControlInputs(*node);
const int terminal_second_size = terminal.second.size();
if (node->attr().count("T") == 0 ||
node->attr().at("T").list().type_size() != num_non_control_inputs ||
terminal_second_size >= num_non_control_inputs) {
continue;
}
TF_RETURN_IF_ERROR(RewriteIdentityNAndInputsOutputs(
node, num_non_control_inputs, terminal.second, graph, &node_map));
*updated_graph = true;
}
return absl::OkStatus();
}
}
Status ModelPruner::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
const std::unordered_set<string> nodes_to_preserve = item.NodesToPreserve();
std::unique_ptr<GraphDef> pruned_graph_release;
GraphDef* pruned_graph;
if (!nodes_to_preserve.empty()) {
pruned_graph_release.reset(new GraphDef());
pruned_graph = pruned_graph_release.get();
pruned_graph->mutable_node()->Reserve(item.graph.node_size());
std::vector<string> terminal_nodes(nodes_to_preserve.begin(),
nodes_to_preserve.end());
std::sort(terminal_nodes.begin(), terminal_nodes.end());
TF_RETURN_IF_ERROR(
SetTransitiveFaninGraph(item.graph, pruned_graph, terminal_nodes));
bool did_split_identity_n = false;
TF_RETURN_IF_ERROR(SplitIdentityNInputs(pruned_graph, terminal_nodes,
&did_split_identity_n));
if (did_split_identity_n) {
GraphDef fanin_split_identity_n_graph;
TF_RETURN_IF_ERROR(SetTransitiveFaninGraph(
*pruned_graph, &fanin_split_identity_n_graph, terminal_nodes));
pruned_graph->Swap(&fanin_split_identity_n_graph);
}
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
} else {
pruned_graph = const_cast<GraphDef*>(&item.graph);
}
GraphView graph_view(pruned_graph);
absl::flat_hash_set<string> function_names;
for (const auto& function : item.graph.library().function()) {
function_names.insert(function.signature().name());
}
OpRegistryInterface* op_registry = OpRegistry::Global();
absl::flat_hash_set<const NodeDef*> nodes_to_delete;
for (int i = 0; i < pruned_graph->node_size(); ++i) {
NodeDef* node = pruned_graph->mutable_node(i);
DedupControlInputs(node);
if (!IsTrivialOp(*node, graph_view)) {
VLOG(3) << node->name() << " is not trivial.";
continue;
}
if (nodes_to_preserve.find(node->name()) != nodes_to_preserve.end()) {
continue;
}
if (CanRemoveNode(*node, graph_view, function_names, *op_registry)) {
nodes_to_delete.insert(node);
} else {
VLOG(3) << node->name() << " cannot be removed";
}
}
if (nodes_to_delete.empty() && nodes_to_preserve.empty()) {
return errors::Aborted("Nothing to do.");
}
optimized_graph->Clear();
*optimized_graph->mutable_library() = item.graph.library();
*optimized_graph->mutable_versions() = item.graph.versions();
if (nodes_to_delete.empty()) {
optimized_graph->mutable_node()->Swap(pruned_graph->mutable_node());
return absl::OkStatus();
}
const bool fetches_are_known = !item.fetch.empty();
absl::flat_hash_map<string, const NodeDef*> optimized_nodes;
optimized_graph->mutable_node()->Reserve(pruned_graph->node_size());
for (const auto& node : pruned_graph->node()) {
if (!fetches_are_known ||
nodes_to_delete.find(&node) == nodes_to_delete.end()) {
NodeDef* new_node = optimized_graph->add_node();
*new_node = node;
new_node->clear_input();
ForwardInputs(node, nodes_to_delete, new_node, &optimized_nodes,
graph_view);
}
}
VLOG(1) << "Pruned " << nodes_to_delete.size()
<< " nodes from the graph. The graph now contains "
<< optimized_graph->node_size() << " nodes.";
if (optimized_graph->node_size() > item.graph.node_size()) {
return errors::Internal("Pruning increased graph size.");
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/no_op.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDeviceCPU0[] = "/device:CPU:0";
constexpr char kDeviceGPU0[] = "/device:GPU:0";
class ModelPrunerTest : public GrapplerTest {};
TEST_F(ModelPrunerTest, NoPruning) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
}
TEST_F(ModelPrunerTest, StopGradientPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::StopGradient(s.WithOpName("c"), b);
Output d = ops::StopGradient(s.WithOpName("d"), c);
Output e = ops::Sqrt(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::StopGradient(s.WithOpName("c"), b);
Output d = ops::StopGradient(s.WithOpName("d"), b);
Output e = ops::Sqrt(s.WithOpName("e"), {b});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto expected_tensors = EvaluateNodes(item.graph, fetch);
auto actual_tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(expected_tensors.size(), 1);
ASSERT_EQ(actual_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, IdentityPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c").WithControlDependencies(b), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::Sqrt(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch.push_back("e");
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output e = ops::Sqrt(s.WithOpName("e"), {b});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, IdentityNInputPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Const(s.WithOpName("c"), 3.0f, {10, 10});
Output d = ops::Const(s.WithOpName("d"), 4.0f, {10, 10});
auto e =
ops::IdentityN(s.WithOpName("e").WithControlDependencies(d), {a, b, c});
auto f = ops::IdentityN(s.WithOpName("f"), {e[2], e[1], e[0]});
Output g = ops::Sqrt(s.WithOpName("g"), {f[1]});
Output h = ops::Sqrt(s.WithOpName("h"), {f[2]});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"g", "h"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
auto e = ops::IdentityN(s.WithOpName("e"), {a, b});
auto f = ops::IdentityN(s.WithOpName("f"), {e[1], e[0]});
Output g = ops::Sqrt(s.WithOpName("g"), {f[0]});
Output h = ops::Sqrt(s.WithOpName("h"), {f[1]});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 2);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 2);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorEqual<float>(actual_tensors[i], expected_tensors[i]);
}
}
TEST_F(ModelPrunerTest, IdentityNInputPruningWithIdentityNInFetch) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Const(s.WithOpName("c"), 3.0f, {10, 10});
Output d = ops::Const(s.WithOpName("d"), 4.0f, {10, 10});
auto e =
ops::IdentityN(s.WithOpName("e").WithControlDependencies(d), {a, b, c});
auto f = ops::IdentityN(s.WithOpName("f"), {e[0], e[1], e[2]});
auto g = ops::IdentityN(s.WithOpName("g"), {f[1]});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"g"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 2.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
auto e = ops::IdentityN(s.WithOpName("e"), {b});
auto g = ops::IdentityN(s.WithOpName("g"), {e[0]});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, NoOpPruning) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::AddN(s.WithOpName("c"), {b});
Output d = ops::AddN(s.WithOpName("d").WithControlDependencies(b), {c});
Output e = ops::AddN(s.WithOpName("e"), {d});
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::AddN(s.WithOpName("c"), {a});
Output d = ops::AddN(s.WithOpName("d"), {a});
Output e = ops::AddN(s.WithOpName("e"), {a});
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto actual_tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PreserveIdentities) {
GrapplerItem item;
{
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
ops::Variable v_in(scope.WithOpName("v_in"), {3}, DT_FLOAT);
ops::Variable v_ctrl(scope.WithOpName("v_ctrl"), {}, DT_BOOL);
ops::Switch s(scope.WithOpName("switch"), v_in, v_ctrl);
Output id0 = ops::Identity(scope.WithOpName("id0"), s.output_true);
Output id1 =
ops::Identity(scope.WithOpName("id1").WithControlDependencies(v_ctrl),
s.output_false);
Output id2 = ops::Identity(scope.WithOpName("id2"), id0);
Output id3 = ops::Identity(
scope.WithOpName("id3").WithControlDependencies(id0), id1);
auto merge = ops::Merge(scope.WithOpName("merge"), {id0, id1});
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
}
item.fetch = {"id2", "id3", "merge"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
auto v_in_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3}));
Tensor v_ctrl_t(DT_BOOL, TensorShape({}));
v_ctrl_t.flat<bool>()(0) = true;
auto actual_tensors = EvaluateNodes(output, {"merge", "id2"},
{{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}});
ASSERT_EQ(actual_tensors.size(), 2);
auto expected_tensors = EvaluateNodes(
item.graph, {"merge", "id2"}, {{"v_in", v_in_t}, {"v_ctrl", v_ctrl_t}});
ASSERT_EQ(expected_tensors.size(), 2);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorEqual<float>(actual_tensors[i], expected_tensors[i]);
}
}
TEST_F(ModelPrunerTest, PruningSkipsRefOutputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Variable(s.WithOpName("a"), {}, DT_INT64);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::Identity(s.WithOpName("e"), d);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output a = ops::Variable(s.WithOpName("a"), {}, DT_INT64);
Output b = ops::Identity(s.WithOpName("b"), a);
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), b);
Output e = ops::Identity(s.WithOpName("e"), b);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
std::vector<string> fetch = {"e"};
auto a_t = GenerateRandomTensor<DT_INT64>(TensorShape({}));
auto actual_tensors = EvaluateNodes(output, fetch, {{"a", a_t}});
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, fetch, {{"a", a_t}});
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<int64_t>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PruningPreservesFetch) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"c"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Sqrt(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 1);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 1);
test::ExpectTensorEqual<float>(actual_tensors[0], expected_tensors[0]);
}
TEST_F(ModelPrunerTest, PruningPreservesCrossDeviceIdentity) {
GrapplerItem item;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c =
ops::Const(s.WithOpName("c").WithDevice(kDeviceCPU0), 0.0f, {10, 10});
Output i1 = ops::Identity(s.WithOpName("i1").WithDevice(kDeviceGPU0), c);
Output a1 = ops::Identity(s.WithOpName("a1").WithDevice(kDeviceGPU0), i1);
Output a2 = ops::Identity(s.WithOpName("a2").WithDevice(kDeviceGPU0), i1);
Output i2 = ops::Identity(s.WithOpName("i2").WithDevice(kDeviceCPU0), c);
Output a3 = ops::Identity(s.WithOpName("a3").WithDevice(kDeviceGPU0), i2);
Output a4 = ops::Identity(s.WithOpName("a4").WithDevice(kDeviceGPU0), i2);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"a1", "a2", "a3", "a4"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c =
ops::Const(s.WithOpName("c").WithDevice(kDeviceCPU0), 0.0f, {10, 10});
Output i1 = ops::Identity(s.WithOpName("i1").WithDevice(kDeviceGPU0), c);
Output a1 = ops::Identity(s.WithOpName("a1").WithDevice(kDeviceGPU0), i1);
Output a2 = ops::Identity(s.WithOpName("a2").WithDevice(kDeviceGPU0), i1);
Output a3 = ops::Identity(s.WithOpName("a3").WithDevice(kDeviceGPU0), c);
Output a4 = ops::Identity(s.WithOpName("a4").WithDevice(kDeviceGPU0), c);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
if (GetNumAvailableGPUs() > 0) {
auto actual_tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(actual_tensors.size(), 4);
auto expected_tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(expected_tensors.size(), 4);
for (int i = 0; i < actual_tensors.size(); i++) {
test::ExpectTensorNear<float>(actual_tensors[i], expected_tensors[i],
1e-6);
}
}
}
TEST_F(ModelPrunerTest, PruneNoOpsWithoutInputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
auto n1 = ops::NoOp(s.WithOpName("no_op1"));
Output c1 = ops::Const(s.WithOpName("c1"), 0.0f, {1, 1});
auto n2 = ops::NoOp(s.WithOpName("no_op2").WithControlDependencies(c1));
Output id1 = ops::Identity(
s.WithOpName("id1").WithControlDependencies({n1, n2}), c1);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"id1"};
ModelPruner pruner;
GraphDef output;
TF_ASSERT_OK(pruner.Optimize(nullptr, item, &output));
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c1 = ops::Const(s.WithOpName("c1"), 0.0f, {1, 1});
auto n2 = ops::NoOp(s.WithOpName("no_op2").WithControlDependencies(c1));
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies({n2}), c1);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
}
TEST_F(ModelPrunerTest, PruneConstantsWithoutInputsAndOutputs) {
GrapplerItem item;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c0 = ops::Const(s.WithOpName("c0"), 0.0f, {1, 1});
Output c1 = ops::Const(s.WithOpName("c1"), 1.0f, {1, 1});
Output c2 = ops::Const(s.WithOpName("c2").WithControlDependencies({c0}),
2.0f, {1, 1});
Output c3 = ops::Const(s.WithOpName("c3"), 3.0f, {1, 1});
Output id1 = ops::Identity(s.WithOpName("id1")
.WithControlDependencies({c2})
.WithControlDependencies({c3}),
c0);
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
}
item.fetch = {"id1"};
ModelPruner pruner;
GraphDef output;
Status status = pruner.Optimize(nullptr, item, &output);
TF_ASSERT_OK(status);
GraphDef expected;
{
tensorflow::Scope s = CreateScopeWithDevice(kDeviceCPU0);
Output c0 = ops::Const(s.WithOpName("c0"), 0.0f, {1, 1});
Output c2 = ops::Const(s.WithOpName("c2").WithControlDependencies({c0}),
2.0f, {1, 1});
Output id1 =
ops::Identity(s.WithOpName("id1").WithControlDependencies({c2}), c0);
TF_ASSERT_OK(s.ToGraphDef(&expected));
}
CompareGraphs(expected, output);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/model_pruner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/model_pruner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
397218b9-d790-4fc3-b0cd-29f95b4e35f6 | cpp | tensorflow/tensorflow | custom_graph_optimizer_registry | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry_test.cc | #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <string>
#include <unordered_map>
#include "absl/base/call_once.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace grappler {
namespace {
typedef std::unordered_map<string, CustomGraphOptimizerRegistry::Creator>
RegistrationMap;
RegistrationMap* registered_optimizers = nullptr;
RegistrationMap* GetRegistrationMap() {
if (registered_optimizers == nullptr)
registered_optimizers = new RegistrationMap;
return registered_optimizers;
}
typedef std::unordered_map<string, PluginGraphOptimizerRegistry::Creator>
PluginRegistrationMap;
PluginRegistrationMap* GetPluginRegistrationMap() {
static PluginRegistrationMap* registered_plugin_optimizers =
new PluginRegistrationMap;
return registered_plugin_optimizers;
}
typedef std::unordered_map<string, ConfigList> PluginConfigMap;
PluginConfigMap* GetPluginConfigMap() {
static PluginConfigMap* plugin_config_map = new PluginConfigMap;
return plugin_config_map;
}
const ConfigList& DefaultPluginConfigs() {
static ConfigList* default_plugin_configs = new ConfigList(
false,
{{"implementation_selector", RewriterConfig::ON},
{"function_optimization", RewriterConfig::ON},
{"common_subgraph_elimination", RewriterConfig::ON},
{"arithmetic_optimization", RewriterConfig::ON},
{"debug_stripper", RewriterConfig::ON},
{"constant_folding", RewriterConfig::ON},
{"shape_optimization", RewriterConfig::ON},
{"auto_mixed_precision", RewriterConfig::ON},
{"auto_mixed_precision_onednn_bfloat16", RewriterConfig::ON},
{"auto_mixed_precision_mkl", RewriterConfig::ON},
{"auto_mixed_precision_cpu", RewriterConfig::ON},
{"pin_to_host_optimization", RewriterConfig::ON},
{"layout_optimizer", RewriterConfig::ON},
{"remapping", RewriterConfig::ON},
{"loop_optimization", RewriterConfig::ON},
{"dependency_optimization", RewriterConfig::ON},
{"auto_parallel", RewriterConfig::ON},
{"memory_optimization", RewriterConfig::ON},
{"scoped_allocator_optimization", RewriterConfig::ON}});
return *default_plugin_configs;
}
}
std::unique_ptr<CustomGraphOptimizer>
CustomGraphOptimizerRegistry::CreateByNameOrNull(const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it == GetRegistrationMap()->end()) return nullptr;
return std::unique_ptr<CustomGraphOptimizer>(it->second());
}
std::vector<string> CustomGraphOptimizerRegistry::GetRegisteredOptimizers() {
std::vector<string> optimizer_names;
optimizer_names.reserve(GetRegistrationMap()->size());
for (const auto& opt : *GetRegistrationMap())
optimizer_names.emplace_back(opt.first);
return optimizer_names;
}
void CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
const Creator& optimizer_creator, const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it != GetRegistrationMap()->end()) {
LOG(FATAL) << "CustomGraphOptimizer is registered twice: " << name;
}
GetRegistrationMap()->insert({name, optimizer_creator});
}
std::vector<std::unique_ptr<CustomGraphOptimizer>>
PluginGraphOptimizerRegistry::CreateOptimizers(
const std::set<string>& device_types) {
std::vector<std::unique_ptr<CustomGraphOptimizer>> optimizer_list;
for (auto it = GetPluginRegistrationMap()->begin();
it != GetPluginRegistrationMap()->end(); ++it) {
if (device_types.find(it->first) == device_types.end()) continue;
static absl::once_flag plugin_optimizer_flag;
absl::call_once(plugin_optimizer_flag, [&]() {
LOG(INFO) << "Plugin optimizer for device_type " << it->first
<< " is enabled.";
});
optimizer_list.emplace_back(
std::unique_ptr<CustomGraphOptimizer>(it->second()));
}
return optimizer_list;
}
void PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
const Creator& optimizer_creator, const std::string& device_type,
ConfigList& configs) {
auto ret = GetPluginConfigMap()->insert({device_type, configs});
if (!ret.second) {
LOG(FATAL) << "PluginGraphOptimizer with device_type "
<< device_type << " is registered twice.";
}
GetPluginRegistrationMap()->insert({device_type, optimizer_creator});
}
void PluginGraphOptimizerRegistry::PrintPluginConfigsIfConflict(
const std::set<string>& device_types) {
bool init = false, conflict = false;
ConfigList plugin_configs;
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (!init) {
plugin_configs = cur_plugin_configs;
init = true;
} else {
if (!(plugin_configs == cur_plugin_configs)) {
conflict = true;
break;
}
}
}
if (!conflict) return;
LOG(WARNING) << "Plugins have conflicting configs. Potential performance "
"regression may happen.";
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
string logs = "";
strings::StrAppend(&logs, "disable_model_pruning\t\t",
cur_plugin_configs.disable_model_pruning, "\n");
for (auto const& pair : cur_plugin_configs.toggle_config) {
strings::StrAppend(&logs, pair.first, string(32 - pair.first.size(), ' '),
(pair.second != RewriterConfig::OFF), "\n");
}
LOG(WARNING) << "Plugin's configs for device_type " << device_type << ":\n"
<< logs;
}
}
ConfigList PluginGraphOptimizerRegistry::GetPluginConfigs(
bool use_plugin_optimizers, const std::set<string>& device_types) {
if (!use_plugin_optimizers) return DefaultPluginConfigs();
ConfigList ret_plugin_configs = DefaultPluginConfigs();
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (cur_plugin_configs.disable_model_pruning == true)
ret_plugin_configs.disable_model_pruning = true;
for (auto& pair : cur_plugin_configs.toggle_config) {
if (cur_plugin_configs.toggle_config[pair.first] == RewriterConfig::OFF)
ret_plugin_configs.toggle_config[pair.first] = RewriterConfig::OFF;
}
}
return ret_plugin_configs;
}
bool PluginGraphOptimizerRegistry::IsConfigsConflict(
ConfigList& user_config, ConfigList& plugin_config) {
if (plugin_config == DefaultPluginConfigs()) return false;
if (user_config.disable_model_pruning != plugin_config.disable_model_pruning)
return true;
for (auto& pair : user_config.toggle_config) {
if ((user_config.toggle_config[pair.first] == RewriterConfig::ON) &&
(plugin_config.toggle_config[pair.first] == RewriterConfig::OFF))
return true;
}
return false;
}
}
} | #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
static const char* kTestOptimizerName = "Test";
static const char* kTestPluginOptimizerName = "TestPlugin";
class TestGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER_AS(TestGraphOptimizer, "StaticRegister");
TEST(CustomGraphOptimizerRegistryTest, DynamicRegistration) {
std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
std::unique_ptr<const CustomGraphOptimizer> test_optimizer;
ASSERT_EQ(
0, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
EXPECT_EQ(nullptr, test_optimizer);
CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
[]() { return new TestGraphOptimizer; }, "DynamicRegister");
optimizers = CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
ASSERT_EQ(
1, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(CustomGraphOptimizerRegistryTest, StaticRegistration) {
const std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
EXPECT_EQ(1,
std::count(optimizers.begin(), optimizers.end(), "StaticRegister"));
std::unique_ptr<const CustomGraphOptimizer> test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("StaticRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(GraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestGraphOptimizer; };
EXPECT_DEATH(CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
creator, "StaticRegister"),
"twice");
}
class TestPluginGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestPluginOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
TEST(PluginGraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestPluginGraphOptimizer; };
ConfigList config_list;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "CPU",
config_list);
EXPECT_DEATH(PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
creator, "GPU", config_list),
"twice");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b429e49a-77aa-4098-91af-c00083cc61fb | cpp | tensorflow/tensorflow | debug_stripper | tensorflow/core/grappler/optimizers/debug_stripper.cc | tensorflow/core/grappler/optimizers/debug_stripper_test.cc | #include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
bool can_optimize = false;
for (const NodeDef& node : item.graph.node()) {
if (IsAssert(node) || IsCheckNumerics(node) || IsPrint(node)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return errors::Aborted("Nothing to do.");
}
*output = item.graph;
for (NodeDef& node : *output->mutable_node()) {
if (IsAssert(node) || node.op() == "PrintV2") {
node.set_op("NoOp");
EraseRegularNodeAttributes(&node);
for (string& inp : *node.mutable_input()) {
if (!IsControlInput(inp)) {
inp = AsControlDependency(NodeName(inp));
}
}
} else if (IsCheckNumerics(node) || node.op() == "Print") {
node.set_op("Identity");
protobuf::Map<string, AttrValue> new_attr;
if (node.attr().find("T") != node.attr().end()) {
new_attr.insert({"T", node.attr().at("T")});
}
node.mutable_attr()->swap(new_attr);
for (int i = 1, end = node.input_size(); i < end; ++i) {
if (!IsControlInput(node.input(i))) {
*node.mutable_input(i) = AsControlDependency(NodeName(node.input(i)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DebugStripperTest : public GrapplerTest {};
TEST_F(DebugStripperTest, OutputEqualToInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output add = ops::Add(s, x, y);
Output result = ops::Identity(s, add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
EXPECT_EQ(optimizer.Optimize(nullptr, item, &output),
errors::Aborted("Nothing to do."));
}
TEST_F(DebugStripperTest, StripAssertOnTwoOutputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({6}));
auto split =
ops::Split(s.WithOpName("split"), 0, input, 2);
Output x = split[0];
Output y = split[1];
Output ge = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), ge, {x, y});
Output add = ops::Add(
s.WithOpName("add").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
for (const string& input : node.input()) {
if (IsControlInput(input)) {
EXPECT_EQ(input.find(':'), -1);
}
}
}
}
TEST_F(DebugStripperTest, StripAssertFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto greaterequal = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), greaterequal, {x, y});
Output add = ops::Add(
s.WithOpName("z").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "GreaterEqual") {
count++;
EXPECT_EQ("GreaterEqual", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Assert") {
count++;
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("^GreaterEqual", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^Assert", node.input(2));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripCheckNumericsFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto check1 = ops::CheckNumerics(s.WithOpName("CheckNumerics1"), x, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("CheckNumerics2"), y, "foo");
Output add = ops::Add(s.WithOpName("z"), check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "CheckNumerics1") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "CheckNumerics2") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("CheckNumerics1", node.input(0));
EXPECT_EQ("CheckNumerics2", node.input(1));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output print = ops::Print(s.WithOpName("Print"), x, {x});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Print") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ(1, node.attr_size());
}
}
EXPECT_EQ(2, output.node_size());
Tensor x_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"Print"}, {{"x", x_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"Print"}, {{"x", x_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintV2FromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), string("Hello"), {});
Operation print = ops::PrintV2(s.WithOpName("PrintV2"), x);
Output y =
ops::Identity(s.WithOpName("y").WithControlDependencies({print}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "PrintV2") {
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^x", node.input(0));
EXPECT_EQ(0, node.attr_size());
} else if (node.name() == "y") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^PrintV2", node.input(1));
}
}
EXPECT_EQ(3, output.node_size());
Tensor expected = EvaluateNodes(item.graph, {"y"}, {})[0];
Tensor optimized = EvaluateNodes(output, {"y"}, {})[0];
EXPECT_EQ(expected.scalar<tstring>()(), optimized.scalar<tstring>()());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/debug_stripper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/debug_stripper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e6415567-b938-42d5-9086-3264067878f8 | cpp | tensorflow/tensorflow | implementation_selector | tensorflow/core/grappler/optimizers/implementation_selector.cc | tensorflow/core/grappler/optimizers/implementation_selector_test.cc | #include "tensorflow/core/grappler/optimizers/implementation_selector.h"
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
constexpr char kConstOp[] = "Const";
constexpr char kCaseOp[] = "Case";
constexpr char kStatelessCaseOp[] = "StatelessCase";
constexpr char kDeviceIndexOp[] = "DeviceIndex";
string FindForwardNode(utils::MutableNodeView* backward_node) {
const int last_input_index = backward_node->NumRegularFanins() - 1;
const utils::MutableFanoutView& input =
backward_node->GetRegularFanin(last_input_index);
if (IsIdentity(*input.node_view()->node())) {
return input.node_view()->node()->input(0);
} else if (IsPartitionedCall(*input.node_view()->node()) ||
IsStatefulPartitionedCall(*input.node_view()->node())) {
return backward_node->node()->input(last_input_index);
} else {
return "";
}
}
void UpdateForwardIdentityNodeDtype(utils::MutableNodeView* forward_node,
const DataTypeVector& dtypes) {
const auto& fanouts_vector = forward_node->GetRegularFanouts();
for (int pos = 0, pos_limit = fanouts_vector.size(); pos < pos_limit; ++pos) {
const auto& fanouts_at_pos = fanouts_vector[pos];
for (const auto& fanout : fanouts_at_pos) {
if ("Identity" == fanout.node_view()->GetOp()) {
(*fanout.node_view()->node()->mutable_attr())["T"].set_type(
dtypes[pos]);
VLOG(3) << "Updated DTYPE for Identity node: "
<< fanout.node_view()->node()->DebugString();
}
}
}
}
Status UpdateNodeDef(utils::MutableNodeView* node_view, const string& funcName,
const FunctionApiInfo& apiInfo) {
NodeDef* node_def = node_view->node();
VLOG(3) << "Node def before swap is: " << node_def->DebugString();
node_def->mutable_attr()->find("f")->second.mutable_func()->set_name(
funcName);
auto tin = node_def->mutable_attr()->find("Tin");
tin->second.mutable_list()->clear_type();
for (const auto& tin_dtype : apiInfo.input_arg_dtypes()) {
tin->second.mutable_list()->add_type(tin_dtype);
}
auto tout = node_def->mutable_attr()->find("Tout");
tout->second.mutable_list()->clear_type();
for (const auto& tout_dtype : apiInfo.output_arg_dtypes()) {
tout->second.mutable_list()->add_type(tout_dtype);
}
if (apiInfo.function_type() == FunctionApiInfo::BACKWARD) {
std::vector<std::string> control_deps;
for (int i = node_def->input_size() - 1; i >= 0; --i) {
if (!IsControlInput(node_def->input(i))) break;
control_deps.push_back(node_def->input(i));
node_def->mutable_input()->RemoveLast();
}
const int prev_input_size = node_def->input_size();
const int diff = prev_input_size - apiInfo.input_arg_dtypes().size();
if (diff >= 0) {
for (int i = 0; i < diff; ++i) node_def->mutable_input()->RemoveLast();
} else {
const string last_input = FindForwardNode(node_view);
const std::vector<string> name_index = ::absl::StrSplit(last_input, ':');
if (name_index.size() != 2) {
return errors::InvalidArgument(
"Invalid format of input node name: ", last_input,
" Expected: {forward_node_name}:{index}");
}
const absl::string_view node_name = name_index[0];
int last_index;
if (!::absl::SimpleAtoi(name_index[1], &last_index)) {
return errors::InvalidArgument(
"The index of input node is expected to be number, got: ",
name_index[1]);
}
for (int i = 1; i <= -diff; ++i)
node_def->add_input(strings::StrCat(node_name, ":", i + last_index));
}
for (std::string& control : control_deps)
node_def->add_input(std::move(control));
} else if (apiInfo.function_type() == FunctionApiInfo::FORWARD) {
UpdateForwardIdentityNodeDtype(node_view, apiInfo.output_arg_dtypes());
}
VLOG(3) << "Node def after swap is: " << node_def->DebugString();
return absl::OkStatus();
}
Status ImplementationSelector::LoadFunctions(const GraphDef& graph) {
lib_info_ = std::make_unique<FunctionLibraryApiInfo>();
TF_RETURN_IF_ERROR(lib_info_->Init(graph.library()));
return absl::OkStatus();
}
Status ImplementationSelector::MaybeOptimizeFunctionCall(
utils::MutableNodeView* node_view) const {
NodeDef* node_def = node_view->node();
std::vector<string> function_attribute_names;
for (const auto& attr : node_def->attr()) {
if (attr.second.has_func() &&
lib_info_->GetApiInfo(attr.second.func().name()) != nullptr) {
function_attribute_names.emplace_back(attr.first);
}
}
if (function_attribute_names.empty() &&
lib_info_->GetApiInfo(node_def->op()) == nullptr) {
return absl::OkStatus();
}
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(node_def->device(), &parsed_name) ||
!parsed_name.has_type) {
return errors::Internal("Could not parse device name:", node_def->device());
}
VLOG(2) << "Op " << node_def->name() << " runs on " << node_def->device()
<< " = (" << parsed_name.type << ")";
for (const auto& attr_name : function_attribute_names) {
string function_name = node_def->attr().at(attr_name).func().name();
if (::absl::StrContains(function_name, "_specialized_for_")) continue;
std::vector<string> equiv_func_names;
TF_RETURN_IF_ERROR(lib_info_->GetEquivalentImplementations(
function_name, &equiv_func_names));
for (const auto& func_name : equiv_func_names) {
const auto& func_api_info = lib_info_->GetApiInfo(func_name);
if (func_api_info->preferred_device() == parsed_name.type) {
VLOG(2) << "Swapping: " << function_name << " TO: " << func_name;
TF_RETURN_IF_ERROR(UpdateNodeDef(node_view, func_name, *func_api_info));
break;
}
}
}
if (lib_info_->GetApiInfo(node_def->op()) != nullptr &&
!::absl::StrContains(node_def->op(), "_specialized_for_")) {
std::vector<string> equiv_func_names;
TF_RETURN_IF_ERROR(lib_info_->GetEquivalentImplementations(
node_def->op(), &equiv_func_names));
for (const string& func_name : equiv_func_names) {
const auto func_api_info = lib_info_->GetApiInfo(func_name);
if (func_api_info->preferred_device() == parsed_name.type) {
node_def->set_op(func_name);
break;
}
}
}
return absl::OkStatus();
}
Status FindDeviceIndex(const utils::MutableNodeView* device_index_node,
const string& device, int* index) {
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(device, &parsed_name) ||
!parsed_name.has_type) {
return errors::Internal("Could not parse device name:", device);
}
const auto& device_list =
device_index_node->GetAttr("device_names")->list().s();
auto it = absl::c_find(device_list, parsed_name.type);
if (it != device_list.end()) {
*index = it - device_list.begin();
} else {
*index = device_list.size();
}
return absl::OkStatus();
}
void RewriteDeviceIndexOp(utils::MutableNodeView* device_index_node,
int index) {
auto node = device_index_node->node();
node->set_op(kConstOp);
EraseRegularNodeAttributes(node);
(*node->mutable_attr())["dtype"].set_type(DT_INT32);
auto* tensor = (*node->mutable_attr())["value"].mutable_tensor();
tensor->set_dtype(DT_INT32);
tensor->add_int_val(index);
VLOG(2) << "Node after rewriting:" << node->DebugString();
}
Status ImplementationSelector::SelectDeviceIndex(GraphDef* graph) const {
Status status;
VLOG(2) << "graph before rewriting device index:" << graph->DebugString();
utils::MutableGraphView graph_view(graph, &status);
TF_RETURN_IF_ERROR(status);
const int num_nodes = graph_view.NumNodes();
for (int k = 0; k < num_nodes; ++k) {
auto* node_view = graph_view.GetNode(k);
if (node_view->GetOp() != kDeviceIndexOp) {
continue;
}
VLOG(2) << "Found a node to rewrite the device index";
for (const auto& fanouts : node_view->GetRegularFanouts()) {
for (const auto& fanout : fanouts) {
if (fanout.node_view()->GetOp() != kCaseOp &&
fanout.node_view()->GetOp() != kStatelessCaseOp)
continue;
int index;
Status status =
FindDeviceIndex(node_view, fanout.node_view()->GetDevice(), &index);
if (status.ok()) {
RewriteDeviceIndexOp(node_view, index);
}
}
}
}
return absl::OkStatus();
}
Status ImplementationSelector::SelectImplementation(GraphDef* graph) const {
if (!graph->has_library()) {
VLOG(2) << "Skipping graph since it does not have function def";
return absl::OkStatus();
}
if (lib_info_->empty()) {
VLOG(2) << "Skipping optimization since lib_info is empty";
return absl::OkStatus();
}
Status status;
utils::MutableGraphView graph_view(graph, &status);
TF_RETURN_IF_ERROR(status);
const int num_nodes = graph_view.NumNodes();
for (int k = 0; k < num_nodes; ++k) {
TF_RETURN_IF_ERROR(MaybeOptimizeFunctionCall(graph_view.GetNode(k)));
}
return absl::OkStatus();
}
Status ImplementationSelector::Optimize(Cluster* cluster,
const GrapplerItem& item,
GraphDef* optimized_graph) {
auto status = LoadFunctions(item.graph);
if (!status.ok()) {
VLOG(2) << "Skipping optimization due to error while loading function "
<< "libraries: " << status;
return errors::Aborted("Skipped Optimization");
}
*optimized_graph = item.graph;
status = SelectDeviceIndex(optimized_graph);
if (!status.ok()) {
*optimized_graph = item.graph;
VLOG(2) << "Could not rewrite device index due to error:" << status;
}
return SelectImplementation(optimized_graph);
}
}
} | #include "tensorflow/core/grappler/optimizers/implementation_selector.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char CpuDevice[] = "/device:CPU:0";
constexpr char GpuDevice[] = "/device:GPU:0";
constexpr char TpuDevice[] = "/device:TPU_REPLICATED_CORE";
class ImplementationSelectorTest : public GrapplerTest {};
TEST_F(ImplementationSelectorTest, NoUpdate) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {CpuDevice});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<CustomGraphOptimizer> optimizer(new ImplementationSelector);
ASSERT_NE(nullptr, optimizer);
TF_ASSERT_OK(optimizer->Init());
GraphDef output;
const Status status = optimizer->Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), output.node_size());
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndex) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexStatelessCase) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "StatelessCase", {"x"}, {{"T", DT_FLOAT}}, GpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexMultiOps) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexNotFound) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, TpuDevice)});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
}
}
TEST_F(ImplementationSelectorTest, SelectDeviceIndexError) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, "")});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("DeviceIndex", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, TwoTypesOfSwapImplementation) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("GPU");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, NoSwapWithImplementsOnly) {
using test::function::NDef;
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
AttrValue device_names;
device_names.mutable_list()->add_s("CPU");
device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
device_names.mutable_list()->add_s("GPU");
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
item.graph = test::function::GDef(
{NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
CpuDevice),
NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
GpuDevice),
NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
}
if (node.name() == "y1") {
EXPECT_EQ("XTimesTwo", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementation) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XAddX();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("GPU");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, GpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 5);
for (const NodeDef& node : output.node()) {
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementationTpu) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("times_two");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto tpu_def = test::function::XAddX();
auto* func2_attr = tpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("times_two");
(*func2_attr)["api_preferred_device"].set_s("TPU_REPLICATED_CORE");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, TpuDevice),
NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, TpuDevice),
NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, tpu_def});
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 5);
for (const NodeDef& node : output.node()) {
if (node.name() == "y1") {
EXPECT_EQ("XAddX", node.op());
} else if (node.name() == "y2") {
EXPECT_EQ("XTimesTwo", node.op());
}
}
}
TEST_F(ImplementationSelectorTest, SwapImplementationEval) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();
auto* func_attr = cpu_def.mutable_attr();
(*func_attr)["api_implements"].set_s("random_boost");
(*func_attr)["api_preferred_device"].set_s("CPU");
auto gpu_def = test::function::XTimesFour();
auto* func2_attr = gpu_def.mutable_attr();
(*func2_attr)["api_implements"].set_s("random_boost");
(*func2_attr)["api_preferred_device"].set_s("GPU");
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, CpuDevice),
NDef("y", "XTimesFour", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, CpuDevice)},
{cpu_def, gpu_def});
const Tensor input = test::AsScalar<float>(1.0f);
item.fetch = {"z"};
item.feed.emplace_back("x", input);
const auto four_times_boosted_tensor = EvaluateFetchNodes(item);
test::ExpectTensorEqual<float>(four_times_boosted_tensor[0],
test::AsScalar<float>(4.0f));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
GrapplerItem optimized = item.WithGraph(std::move(output));
const auto twice_boosted_tensor = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(twice_boosted_tensor[0],
test::AsScalar<float>(2.0f));
}
TEST_F(ImplementationSelectorTest, SwapImplementationWithGradient) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionDef boost_1 = FDH::Create(
"Boost1", {"x:float"}, {"z:float", "s:float"}, {},
{{{"boost"}, "Add", {"x", "x"}, {{"T", DT_FLOAT}}},
FDH::Const("one", 1.0f)},
{{"z", "boost:z:0"}, {"s", "one:output:0"}});
auto* boost_1_attr = boost_1.mutable_attr();
(*boost_1_attr)["api_implements"].set_s("random_boost");
(*boost_1_attr)["api_preferred_device"].set_s("CPU");
(*boost_1_attr)["backward_function_name"].set_s("BoostCpuGradient");
FunctionDef boost_1_gradient = FDH::Create(
"Boost1Gradient", {"x:float", "s:float"}, {"dx:float"}, {},
{FDH::Const("two", 2.0f),
{{"grad"}, "Mul", {"x", "two:output:0"}, {{"T", DT_FLOAT}}}},
{{"dx", "grad:z:0"}});
auto* boost_1_grad_attr = boost_1_gradient.mutable_attr();
(*boost_1_grad_attr)["api_implements"].set_s("random_boost");
(*boost_1_grad_attr)["api_preferred_device"].set_s("CPU");
(*boost_1_grad_attr)["forward_function_name"].set_s("BoostCpu");
FunctionDef boost_2_func = FDH::Create(
"Boost2", {"x:float"}, {"z:float", "s1:float", "s2:float"}, {},
{FDH::Const("four", 4.0f),
{{"boost"}, "Mul", {"x", "four:output:0"}, {{"T", DT_FLOAT}}},
FDH::Const("one", 1.0f),
FDH::Const("two", 2.0f)},
{{"z", "boost:z:0"}, {"s1", "one:output:0"}, {"s2", "two:output:0"}});
auto* boost_2_attr = boost_2_func.mutable_attr();
(*boost_2_attr)["api_implements"].set_s("random_boost");
(*boost_2_attr)["api_preferred_device"].set_s("GPU");
(*boost_2_attr)["backward_function_name"].set_s("BoostGpuGradient");
FunctionDef boost_2_gradient = FDH::Create(
"Boost2Gradient", {"x:float", "s1:float", "s2:float"}, {"dx:float"}, {},
{FDH::Const("four", 4.0f),
{{"grad"}, "Mul", {"x", "four:output:0"}, {{"T", DT_FLOAT}}}},
{{"dx", "grad:z:0"}});
auto* boost_2_grad_attr = boost_2_gradient.mutable_attr();
(*boost_2_grad_attr)["api_implements"].set_s("random_boost");
(*boost_2_grad_attr)["api_preferred_device"].set_s("GPU");
(*boost_2_grad_attr)["forward_function_name"].set_s("BoostGpu");
const auto forward =
NDef("lstm/StatefulPartitionedCall", "StatefulPartitionedCall", {"input"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("Boost2")}},
CpuDevice);
const auto backward =
NDef("gradient/lstm/StatefulPartitionedCall", "StatefulPartitionedCall",
{"input", "lstm/StatefulPartitionedCall:1",
"lstm/StatefulPartitionedCall:2"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("Boost2Gradient")}},
CpuDevice);
ImplementationSelector optimizer;
GraphDef output;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("input", "Placeholder", {}, {{"dtype", DT_FLOAT}}, CpuDevice),
forward, backward,
NDef("output", "Identity", {"lstm/StatefulPartitionedCall:0"},
{{"T", DT_FLOAT}}, CpuDevice)},
{boost_1, boost_1_gradient, boost_2_func, boost_2_gradient});
const Tensor input = test::AsScalar<float>(1.0f);
item.fetch = {"output"};
item.feed.emplace_back("input", input);
const auto four_times_boosted_tensor = EvaluateFetchNodes(item);
test::ExpectTensorEqual<float>(four_times_boosted_tensor[0],
test::AsScalar<float>(4.0f));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
GrapplerItem optimized = item.WithGraph(std::move(output));
const auto twice_boosted_tensor = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(twice_boosted_tensor[0],
test::AsScalar<float>(2.0f));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/implementation_selector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/implementation_selector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de5fe46c-970f-4768-becc-1f57168e0534 | cpp | tensorflow/tensorflow | auto_mixed_precision | tensorflow/core/grappler/optimizers/auto_mixed_precision.cc | tensorflow/core/grappler/optimizers/auto_mixed_precision_test.cc | #include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision_lists.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
bool ShouldSimulateGpu() {
bool is_enabled = [] {
bool ret = false;
string var;
TF_CHECK_OK(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "", &var));
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU",
false, &ret));
return ret;
}();
return is_enabled;
}
#if GOOGLE_CUDA
const std::pair<int, int> kMinGPUArch = {7, 0};
#else
const std::pair<int, int> kMinGPUArch = {0, 0};
#endif
const char kSuffix[] = "AutoMixedPrecision";
const char kCastToFp16[] = "CastToFp16";
const char kCastToBf16[] = "CastToBf16";
const char kCastToFp32[] = "CastToFp32";
#if GOOGLE_CUDA
std::pair<int, int> GetDeviceGPUArch(
const DeviceProperties& device_properties) {
if (device_properties.type() != "GPU") return {0, 0};
string arch_str = device_properties.environment().at("architecture");
std::vector<string> split_arch_str = str_util::Split(arch_str, '.');
if (split_arch_str.empty()) {
return {0, 0};
}
int major, minor;
if (!strings::safe_strto32(split_arch_str[0], &major)) {
return {0, 0};
}
if (split_arch_str.size() > 1) {
if (strings::safe_strto32(split_arch_str[1], &minor)) {
return {major, minor};
} else {
return {0, 0};
}
} else {
return {major, 0};
}
}
#endif
bool HasFastFP16Support(const DeviceProperties& props) {
#if GOOGLE_CUDA
return GetDeviceGPUArch(props) >= kMinGPUArch;
#elif TENSORFLOW_USE_ROCM
absl::flat_hash_set<std::string> FP16SupportedDevices = {{"gfx906"},
{"gfx908"}};
std::string gcnArchName = props.environment().at("architecture");
std::vector<std::string> gpu_arch = absl::StrSplit(gcnArchName, ":");
return !gpu_arch.empty() && FP16SupportedDevices.contains(gpu_arch[0]);
#endif
return ShouldSimulateGpu();
}
struct TypeAttrId {
static constexpr int kSingleType = -1;
explicit TypeAttrId(const string& _attr_name, int _type_index = kSingleType)
: attr_name(_attr_name),
type_index(_type_index),
fixed_type(DT_INVALID) {}
explicit TypeAttrId(DataType _fixed_type)
: attr_name(), type_index(kSingleType), fixed_type(_fixed_type) {}
bool operator==(const TypeAttrId& other) const {
return attr_name == other.attr_name && type_index == other.type_index &&
fixed_type == other.fixed_type;
}
bool operator<(const TypeAttrId& other) const {
return std::make_tuple(attr_name, type_index, fixed_type) <
std::make_tuple(other.attr_name, other.type_index, other.fixed_type);
}
template <typename H>
friend H AbslHashValue(H h, const TypeAttrId& ta) {
return H::combine(std::move(h), ta.attr_name, ta.type_index, ta.fixed_type);
}
string DebugString() const {
if (!attr_name.empty()) {
if (type_index == kSingleType) {
return attr_name;
} else {
return strings::StrCat(attr_name, "[", type_index, "]");
}
} else {
return tensorflow::DataTypeString(fixed_type);
}
}
string attr_name;
int type_index;
DataType fixed_type;
};
DataType GetDataType(const NodeDef& node, const TypeAttrId& type_attr) {
if (type_attr.attr_name.empty()) {
return type_attr.fixed_type;
}
if (!node.attr().count(type_attr.attr_name)) {
return DT_INVALID;
}
const AttrValue& attr_value = node.attr().at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
return attr_value.type();
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return DT_INVALID;
}
return attr_value.list().type(type_attr.type_index);
}
}
bool SetDataType(NodeDef* node, const TypeAttrId& type_attr, DataType type) {
if (type_attr.attr_name.empty() || !node->attr().count(type_attr.attr_name)) {
return false;
}
AttrValue& attr_value = node->mutable_attr()->at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
attr_value.set_type(type);
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return false;
}
attr_value.mutable_list()->set_type(type_attr.type_index, type);
}
return true;
}
std::vector<std::pair<int, int>> ArgDefIndexes(const NodeDef& node, int arg_idx,
const OpDef::ArgDef& arg_def) {
std::vector<std::pair<int, int>> argdef_inds;
if (!arg_def.type_list_attr().empty()) {
int num_types = node.attr().at(arg_def.type_list_attr()).list().type_size();
for (int type_idx = 0; type_idx < num_types; ++type_idx) {
argdef_inds.push_back({arg_idx, type_idx});
}
} else {
int num_repeat = 1;
if (node.attr().count(arg_def.number_attr())) {
num_repeat = node.attr().at(arg_def.number_attr()).i();
}
argdef_inds.insert(argdef_inds.end(), num_repeat, {arg_idx, -1});
}
return argdef_inds;
}
std::vector<std::pair<int, int>> InputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.input_arg_size());
for (int arg_idx = 0; arg_idx < op_def.input_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
std::vector<std::pair<int, int>> OutputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.output_arg_size());
for (int arg_idx = 0; arg_idx < op_def.output_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
TypeAttrId GetTypeAttrId(const OpDef::ArgDef& arg_def, int arg_type_index) {
if (!arg_def.type_list_attr().empty()) {
return TypeAttrId(arg_def.type_list_attr(), arg_type_index);
} else if (!arg_def.type_attr().empty()) {
return TypeAttrId(arg_def.type_attr());
} else {
return TypeAttrId(arg_def.type());
}
}
std::vector<int> NonControlInputs(const NodeDef& node) {
std::vector<int> pos;
for (int i = 0; i < node.input_size(); i++) {
if (!IsControlInput(node.input(i))) {
pos.push_back(i);
}
}
return pos;
}
class NodeTypeAttrMap {
public:
NodeTypeAttrMap() {}
explicit NodeTypeAttrMap(const GraphDef& graph) { TF_CHECK_OK(Init(graph)); }
Status Init(const GraphDef& graph) {
if (graph_ != nullptr) {
return errors::InvalidArgument("NodeTypeAttrMap is already initialized.");
}
graph_ = &graph;
function_library_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), graph.library()));
for (const NodeDef& node : graph.node()) {
TF_RETURN_IF_ERROR(AddNode(node));
}
return absl::OkStatus();
}
bool is_initialized() const { return graph_ != nullptr; }
absl::flat_hash_set<TypeAttrId> GetTypeAttrs(const NodeDef& node) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
absl::flat_hash_set<TypeAttrId> type_attrs;
const auto iter = type2io_.find(&node);
CHECK(iter != type2io_.end());
for (const auto& key_value : iter->second) {
type_attrs.insert(key_value.first);
}
return type_attrs;
}
const absl::flat_hash_set<int>& GetInputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).first;
}
const absl::flat_hash_set<int>& GetOutputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).second;
}
TypeAttrId GetInputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
const auto iter = io2type_.find(&node);
DCHECK(iter != io2type_.end())
<< "Node " << node.name() << " doesn't exist in a graph";
auto type_vec = io2type_.at(&node).first;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
TypeAttrId GetOutputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
auto type_vec = io2type_.at(&node).second;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
private:
Status AddNode(const NodeDef& node) {
const OpDef* op_def_ptr = nullptr;
TF_RETURN_IF_ERROR(function_library_->LookUpOpDef(node.op(), &op_def_ptr));
const OpDef& op_def = *op_def_ptr;
auto& type2io_entry = type2io_[&node];
auto& io2type_entry = io2type_[&node];
auto input_arg_inds = InputPortArgDefIndexes(node, op_def);
if (NonControlInputs(node).size() != input_arg_inds.size()) {
return errors::InvalidArgument(
"Expected ", node.op(), " node ", node.name(), " to have ",
input_arg_inds.size(), " non-control input(s), but got ",
node.input_size());
}
io2type_entry.first.reserve(input_arg_inds.size());
for (int i = 0; i < static_cast<int>(input_arg_inds.size()); ++i) {
const auto& arg_inds = input_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].first.insert(i);
io2type_entry.first.push_back(type_attr);
}
auto output_arg_inds = OutputPortArgDefIndexes(node, op_def);
io2type_entry.second.reserve(output_arg_inds.size());
for (int i = 0; i < static_cast<int>(output_arg_inds.size()); ++i) {
const auto& arg_inds = output_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].second.insert(i);
io2type_entry.second.push_back(type_attr);
}
for (const auto& attr : node.attr()) {
const string& attr_name = attr.first;
if (!attr_name.empty() && attr_name[0] == '_') continue;
const AttrValue& attr_value = attr.second;
const OpDef::AttrDef* attr_def = FindAttr(attr_name, op_def);
if (!attr_def) {
return errors::InvalidArgument("AttrDef not found for attribute ",
attr_name, " of node ", node.name());
}
if (attr_def->type() == "type") {
type2io_entry[TypeAttrId(attr_name)];
} else if (attr_def->type() == "list(type)") {
for (int i = 0; i < attr_value.list().type_size(); ++i) {
type2io_entry[TypeAttrId(attr_name, i)];
}
}
}
return absl::OkStatus();
}
const GraphDef* graph_ = nullptr;
std::unique_ptr<FunctionLibraryDefinition> function_library_;
typedef absl::flat_hash_set<int> IntSet;
typedef absl::flat_hash_map<TypeAttrId, std::pair<IntSet, IntSet>> Type2IOMap;
absl::flat_hash_map<const NodeDef*, Type2IOMap> type2io_;
typedef std::vector<TypeAttrId> TypeAttrIdVec;
absl::flat_hash_map<const NodeDef*, std::pair<TypeAttrIdVec, TypeAttrIdVec>>
io2type_;
};
struct NodeTypeId {
NodeTypeId(const NodeDef* _node, const TypeAttrId& _type_attr)
: node(_node), type_attr(_type_attr) {}
const NodeDef* node;
TypeAttrId type_attr;
bool operator==(const NodeTypeId& other) const {
return node == other.node && type_attr == other.type_attr;
}
template <typename H>
friend H AbslHashValue(H h, const NodeTypeId& nt) {
return H::combine(std::move(h), nt.node, nt.type_attr);
}
};
struct NodeTypeIdEdge {
NodeTypeIdEdge(const NodeTypeId& _src, const NodeTypeId& _dst)
: src(_src), dst(_dst) {}
NodeTypeId src;
NodeTypeId dst;
};
class GraphTypeTopologyView {
public:
GraphTypeTopologyView() = default;
explicit GraphTypeTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
Status InitializeFromGraph(const GraphDef& graph,
const NodeTypeAttrMap& node_type_map);
Status AddEphemeralEdges(absl::Span<const NodeTypeIdEdge> ephemeral_edges);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
bool HasNode(absl::string_view node_name, const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(int node_idx) const;
const absl::optional<int> GetNodeIndex(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const absl::optional<int> GetNodeIndex(const NodeTypeId& node) const;
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
struct NodeTypeKey : public std::pair<absl::string_view, TypeAttrId> {
typedef std::pair<absl::string_view, TypeAttrId> Base;
using Base::pair;
template <typename H>
friend H AbslHashValue(H h, const NodeTypeKey& nt) {
return H::combine(std::move(h), nt.first, nt.second);
}
};
bool skip_invalid_edges_ = false;
const GraphDef* graph_ = nullptr;
int num_nodes_ = 0;
std::vector<NodeTypeId> node_type_attrs_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
absl::flat_hash_map<NodeTypeKey, int> node_type_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_;
std::vector<absl::InlinedVector<int, 2>> fanouts_;
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
Status GraphTypeTopologyView::InitializeFromGraph(
const GraphDef& graph, const NodeTypeAttrMap& node_type_map) {
if (graph_ != nullptr) {
return errors::InvalidArgument(
"GraphTypeTopologyView is already initialized.");
}
graph_ = &graph;
int num_nodedefs = graph.node_size();
node_name_to_index_.rehash(num_nodedefs);
node_type_attrs_.reserve(num_nodedefs);
node_type_name_to_index_.rehash(num_nodedefs);
for (int node_idx = 0; node_idx < num_nodedefs; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
for (const TypeAttrId& type_attr : node_type_map.GetTypeAttrs(node)) {
int node_type_idx = node_type_attrs_.size();
node_type_name_to_index_.emplace(NodeTypeKey(node.name(), type_attr),
node_type_idx);
node_type_attrs_.emplace_back(&node, type_attr);
}
}
num_nodes_ = node_type_attrs_.size();
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
const NodeTypeId& node_type = node_type_attrs_.at(node_type_idx);
auto input_ports =
node_type_map.GetInputPorts(*node_type.node, node_type.type_attr);
fanins_[node_type_idx].reserve(input_ports.size());
for (int port : input_ports) {
const string& input = node_type.node->input(port);
TensorId tensor = ParseTensorName(input);
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat(
"Non-existent input ", input, " in node ", node_type.node->name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
const NodeDef& input_node = graph_->node(input_idx);
TypeAttrId input_type_attr =
node_type_map.GetOutputTypeAttr(input_node, tensor.index());
const auto it2 = node_type_name_to_index_.find(
NodeTypeKey(input_node.name(), input_type_attr));
if (it2 == node_type_name_to_index_.end()) {
if (!skip_invalid_edges_) {
return errors::InvalidArgument("Did not find type attr ",
input_type_attr.DebugString(),
" in node ", input_node.name());
}
continue;
}
int input_node_type_idx = it2->second;
fanins_[node_type_idx].push_back(input_node_type_idx);
fanouts_[input_node_type_idx].push_back(node_type_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
Status GraphTypeTopologyView::AddEphemeralEdges(
absl::Span<const NodeTypeIdEdge> ephemeral_edges) {
for (const NodeTypeIdEdge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
int src_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.src.node->name(), edge.src.type_attr));
int dst_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.dst.node->name(), edge.dst.type_attr));
fanins_[dst_node_type_idx].push_back(src_node_type_idx);
fanouts_[src_node_type_idx].push_back(dst_node_type_idx);
}
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
bool GraphTypeTopologyView::HasNode(absl::string_view node_name,
const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it != node_type_name_to_index_.end();
}
const NodeTypeId* GraphTypeTopologyView::GetNode(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it == node_type_name_to_index_.end()
? nullptr
: &node_type_attrs_.at(it->second);
}
const NodeTypeId* GraphTypeTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &node_type_attrs_.at(node_idx);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
DCHECK(it != node_type_name_to_index_.end())
<< "Node doesn't exist in a graph";
return it == node_type_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
const NodeTypeId& node) const {
return GetNodeIndex(node.node->name(), node.type_attr);
}
const absl::InlinedVector<int, 4>& GraphTypeTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTypeTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
enum class TypeTraversalDirection {
kFollowInputs,
kFollowOutputs,
kFollowInputsAndOutputs,
};
struct DfsTypeCallbacks {
DfsTypeCallbacks() = default;
DfsTypeCallbacks(std::function<void(int)> pre, std::function<void(int)> post,
std::function<void(int, int)> back_edge)
: pre_order(std::move(pre)),
post_order(std::move(post)),
on_back_edge(std::move(back_edge)) {}
static DfsTypeCallbacks PreOrder(std::function<void(int)> pre) {
return DfsTypeCallbacks(std::move(pre), nullptr, nullptr);
}
static DfsTypeCallbacks PostOrder(std::function<void(int)> post) {
return DfsTypeCallbacks(nullptr, std::move(post), nullptr);
}
std::function<void(int)> pre_order;
std::function<void(int)> post_order;
std::function<void(int, int)> on_back_edge;
};
struct DfsTypePredicates {
DfsTypePredicates() = default;
DfsTypePredicates(std::function<bool(int)> enter,
std::function<bool(int)> advance)
: enter(std::move(enter)), advance(std::move(advance)) {}
static DfsTypePredicates Enter(std::function<bool(int)> enter) {
return DfsTypePredicates(std::move(enter), nullptr);
}
static DfsTypePredicates Advance(std::function<bool(int)> advance) {
return DfsTypePredicates(nullptr, std::move(advance));
}
std::function<bool(int)> enter;
std::function<bool(int)> advance;
};
struct DfsStackElem {
DfsStackElem(int node, bool children_visited, int src)
: node(node), children_visited(children_visited), src(src) {}
explicit DfsStackElem(int node) : DfsStackElem(node, false, -1) {}
int node;
bool children_visited;
int src;
};
enum class NodeState { kNotVisited, kVisiting, kDone };
void DfsTypeTraversal(const GraphTypeTopologyView& graph_type_view,
const absl::Span<const NodeTypeId* const> from,
const TypeTraversalDirection direction,
const DfsTypePredicates& predicates,
const DfsTypeCallbacks& callbacks) {
std::vector<DfsStackElem> stack;
stack.reserve(from.size());
for (const NodeTypeId* node : from) {
const absl::optional<int> node_idx = graph_type_view.GetNodeIndex(*node);
DCHECK(node_idx.has_value())
<< "Illegal start node: " << node->node->name();
if (node_idx.has_value()) {
stack.emplace_back(node_idx.value());
}
}
absl::flat_hash_map<int, NodeState> node_state;
while (!stack.empty()) {
DfsStackElem w = stack.back();
stack.pop_back();
NodeState& state = node_state[w.node];
if (state == NodeState::kDone) continue;
if (predicates.enter && !predicates.enter(w.node)) {
state = NodeState::kDone;
continue;
}
if (w.children_visited) {
state = NodeState::kDone;
if (callbacks.post_order) {
callbacks.post_order(w.node);
}
continue;
}
if (state == NodeState::kVisiting) {
if (callbacks.on_back_edge) {
callbacks.on_back_edge(w.src, w.node);
}
continue;
}
state = NodeState::kVisiting;
if (callbacks.pre_order) {
callbacks.pre_order(w.node);
}
stack.emplace_back(w.node, true, w.src);
if (predicates.advance && !predicates.advance(w.node)) {
continue;
}
if (direction == TypeTraversalDirection::kFollowInputs ||
direction == TypeTraversalDirection::kFollowInputsAndOutputs) {
for (const int fanin : graph_type_view.GetFanin(w.node)) {
stack.emplace_back(fanin, false, w.node);
}
}
if (direction == TypeTraversalDirection::kFollowOutputs ||
direction == TypeTraversalDirection::kFollowInputsAndOutputs) {
for (const int fanout : graph_type_view.GetFanout(w.node)) {
stack.emplace_back(fanout, false, w.node);
}
}
}
}
DataTypeSet AllowedDataTypes(const OpDef::AttrDef& attr_def) {
const auto& allowed_types = attr_def.allowed_values().list().type();
if (allowed_types.empty()) {
return AllTypes();
}
uint32 dtype_mask = 0;
for (int dtype : allowed_types) {
dtype_mask |= 1u << dtype;
}
return DataTypeSet(dtype_mask);
}
DataTypeSet AllowedDataTypes(const OpDef& op_def, const TypeAttrId& t_attr_id) {
if (t_attr_id.attr_name.empty()) {
return ToSet(t_attr_id.fixed_type);
}
const OpDef::AttrDef* attr_def = FindAttr(t_attr_id.attr_name, op_def);
CHECK(attr_def);
return AllowedDataTypes(*attr_def);
}
Status ValidateLists(const gtl::FlatSet<string>& allow_list,
const gtl::FlatSet<string>& deny_list,
const gtl::FlatSet<string>& infer_list,
const gtl::FlatSet<string>& clear_list) {
std::vector<gtl::FlatSet<string>> lists{allow_list, deny_list, infer_list,
clear_list};
std::multiset<string> counts;
for (const auto& list : lists) {
counts.insert(list.begin(), list.end());
}
bool duplicates = false;
for (const auto& s : counts) {
if (counts.count(s) > 1) {
duplicates = true;
LOG(ERROR) << "Op present in multiple lists: " << s;
}
}
if (duplicates) {
return errors::InvalidArgument("Op lists have conflicting entries");
} else {
return absl::OkStatus();
}
}
bool HasInputOrOutputRefs(const NodeDef& node) {
const OpDef* op_def;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok()) {
return true;
}
for (const auto& input : op_def->input_arg()) {
if (input.is_ref()) {
return true;
}
}
for (const auto& output : op_def->output_arg()) {
if (output.is_ref()) {
return true;
}
}
return false;
}
bool CanForceFP16(const NodeDef& node) {
return node.op() != "Const" && node.op() != "SoftmaxCrossEntropyWithLogits" &&
!IsStateful(node) && !HasInputOrOutputRefs(node);
}
int GetCudaVersion(
const std::unordered_map<string, DeviceProperties>& devices) {
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cuda");
if (it != device_env.end()) {
string cuda_version_str = it->second;
return std::stoi(cuda_version_str);
}
}
}
return 0;
}
int GetCudnnVersion(
const std::unordered_map<string, DeviceProperties>& devices) {
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cudnn");
if (it != device_env.end()) {
string cudnn_version_str = it->second;
return std::stoi(cudnn_version_str);
}
}
}
return 0;
}
std::unordered_map<string, DeviceProperties> GetDevices(Cluster* cluster) {
if (!ShouldSimulateGpu()) {
return cluster->GetDevices();
}
bool has_gpu = false;
for (const auto& device : cluster->GetDevices()) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
has_gpu = true;
break;
}
}
if (has_gpu) {
return cluster->GetDevices();
}
std::unordered_map<string, DeviceProperties> devices(cluster->GetDevices());
DeviceProperties gpu_device_properies;
gpu_device_properies.set_type("GPU");
#if GOOGLE_CUDA
gpu_device_properies.set_vendor("NVIDIA");
gpu_device_properies.mutable_environment()->insert({"architecture", "8.0"});
gpu_device_properies.mutable_environment()->insert({"cuda", "11050"});
gpu_device_properies.mutable_environment()->insert({"cudnn", "8302"});
#elif TENSORFLOW_USE_ROCM
gpu_device_properies.set_vendor("Advanced Micro Devices, Inc");
gpu_device_properies.mutable_environment()->insert(
{"architecture", "gfx908"});
#endif
devices.emplace(std::make_pair("/job:localhost/replica:0/task:0/device:GPU:0",
gpu_device_properies));
return devices;
}
class AutoMixedPrecisionImpl {
public:
enum class CastType { FP16, FP32, AUTO };
AutoMixedPrecisionImpl(Cluster* cluster,
const std::unordered_set<string>& nodes_to_preserve,
GraphDef* graph, string id,
AutoMixedPrecisionMode mode)
: devices_(GetDevices(cluster)),
virtual_placer_(devices_),
nodes_to_preserve_(nodes_to_preserve),
graph_(graph),
function_library_(OpRegistry::Global(), graph->library()),
id_(id),
graph_view_(graph),
cuda_version_(GetCudaVersion(devices_)),
cudnn_version_(GetCudnnVersion(devices_)),
num_nonvar_casts_to_f16_(0),
mode_(mode),
target_dtype_((mode_ == AutoMixedPrecisionMode::CUDA ||
mode_ == AutoMixedPrecisionMode::CPU ||
mode_ == AutoMixedPrecisionMode::FP16_CPU)
? DT_HALF
: DT_BFLOAT16) {}
Status Optimize();
private:
typedef absl::flat_hash_set<NodeTypeId> NodeTypeIdSet;
std::unique_ptr<AutoMixedPrecisionLists> get_mixed_precision_lists() const {
switch (mode_) {
case AutoMixedPrecisionMode::CUDA:
return std::make_unique<AutoMixedPrecisionListsFp16>(
cuda_version_, cudnn_version_, AutoMixedPrecisionMode::CUDA);
case AutoMixedPrecisionMode::BF16:
return std::make_unique<AutoMixedPrecisionListsMkl>();
case AutoMixedPrecisionMode::CPU:
return std::make_unique<AutoMixedPrecisionListsFp16>(
10000,
8000,
AutoMixedPrecisionMode::CPU);
case AutoMixedPrecisionMode::FP16_CPU:
return std::make_unique<AutoMixedPrecisionListsFp16>(
0, 0, AutoMixedPrecisionMode::FP16_CPU);
}
}
Status PrintDebugLogs(bool preop, size_t timestamp);
void LogSkippedNode(const NodeDef& node, const string& device_type) const;
bool MustPreserve(const NodeDef& node) const;
bool IsOnDevice(const NodeDef& node, const string& device_type) const;
bool IsOnSuitableGPUArch(const NodeDef& node) const;
bool ShouldProcess(const NodeDef& node) const;
bool NodeHasF16KernelForTypeAttr(const NodeDef& node, TypeAttrId taid) const;
bool NodeImplicitlyReadsNonResourceVariable(const NodeDef& node) const;
void ConvertBatchNormOpsToV2();
bool SupportsF16(const NodeTypeId& node_type) const;
bool SupportsF16DataType(const NodeTypeId& node_type) const;
bool IsQuantized(const NodeTypeId& node_type) const;
const NodeTypeId* GetTensorListFloat32NodeTypeId(const NodeDef& node) const;
bool IsSourceOrSinkOp(const string& op) const;
void FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* clusters,
absl::flat_hash_set<int>* deny_set) const;
void FindTensorListImplicitFloat32Edges(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
std::vector<NodeTypeIdEdge>* implicit_fp32_edges) const;
void AddAllowlistOps(absl::flat_hash_set<int>* allow_set) const;
void RemoveAllowsetWithFp32(absl::flat_hash_set<int>* allow_set) const;
void PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const;
void ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* deny_set) const;
void AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
void AddInferToAllowIfFollowAllow(const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
void PropagateAllowThroughClear(const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
Status ForceColorMatchOnRecurrentEdges(
absl::flat_hash_set<int>* allow_set) const;
void MakeCastsAllowIfAllOutputsAllow(
absl::flat_hash_set<int>* allow_set) const;
NodeDef BuildCastNode(const MutableGraphView::OutputPort& src, bool to_f16,
const string& device) const;
absl::StatusOr<NodeDef*> InsertCastNodeAtFanout(
const absl::flat_hash_set<int>& allow_set, const bool src_is_allow,
const CastType& cast_type, MutableGraphView::OutputPort& src);
absl::StatusOr<DataType> GetCastToType(const NodeDef* node) const;
void CollectOutputPorts(
const TypeAttrId& type_attr, NodeDef* node,
std::vector<MutableGraphView::OutputPort>& output_ports) const;
Status ChangeTypeAttrsAndAddCasts(const absl::flat_hash_set<int>& allow_set);
std::unordered_map<string, DeviceProperties> devices_;
VirtualPlacer virtual_placer_;
std::unordered_set<string> nodes_to_preserve_;
GraphDef* graph_;
FunctionLibraryDefinition function_library_;
string id_;
MutableGraphView graph_view_;
int cuda_version_;
int cudnn_version_;
int num_nonvar_casts_to_f16_;
NodeTypeAttrMap node_type_map_;
GraphTypeTopologyView graph_type_view_;
bool force_all_fp16_;
bool treat_infer_as_deny_;
AutoMixedPrecisionMode mode_;
gtl::FlatSet<string> f16_allowlist_;
gtl::FlatSet<string> f16_denylist_;
gtl::FlatSet<string> f16_inferlist_;
gtl::FlatSet<string> f16_clearlist_;
absl::flat_hash_set<const NodeDef*> should_process_nodes_;
DataType target_dtype_;
};
NodeDef AutoMixedPrecisionImpl::BuildCastNode(
const MutableGraphView::OutputPort& src, bool to_f16,
const string& device) const {
DataType src_type = to_f16 ? DT_FLOAT : target_dtype_;
DataType dst_type = to_f16 ? target_dtype_ : DT_FLOAT;
const char* cast_string = !to_f16 ? kCastToFp32
: target_dtype_ == DT_HALF ? kCastToFp16
: kCastToBf16;
int id = 0;
std::string name;
do {
name = absl::StrCat(src.node->name(), "-", src.port_id, "-", cast_string,
"-", id, "-", kSuffix);
++id;
} while (graph_view_.GetNode(name));
NodeDef node;
node.set_name(name);
node.set_op("Cast");
node.set_device(device);
node.add_input(strings::StrCat(src.node->name(), ":", src.port_id));
(*node.mutable_attr())["SrcT"].set_type(src_type);
(*node.mutable_attr())["DstT"].set_type(dst_type);
(*node.mutable_attr())["Truncate"].set_b(false);
return node;
}
bool AutoMixedPrecisionImpl::NodeHasF16KernelForTypeAttr(
const NodeDef& node, TypeAttrId taid) const {
NodeDef node_copy(node);
if (node.device().empty()) {
string device_name = virtual_placer_.get_canonical_device_name(node);
node_copy.set_device(device_name);
}
if (!SetDataType(&node_copy, taid, target_dtype_)) {
return false;
}
return IsKernelRegisteredForNode(node_copy).ok();
}
Status AutoMixedPrecisionImpl::PrintDebugLogs(bool preop, size_t timestamp) {
string prepend_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LOG_PATH", "", &prepend_path));
if (prepend_path.empty()) return absl::OkStatus();
string suffix =
strings::StrCat("_", preop ? "preop" : kSuffix, "_", id_, "_", timestamp);
string fname =
io::JoinPath(prepend_path, strings::StrCat("graphdef", suffix, ".pb"));
std::fstream f;
f.open(fname.c_str(), std::fstream::out | std::fstream::binary);
f << graph_->SerializeAsString();
f.close();
LOG(INFO) << "Saved " << (preop ? "pre-optimization" : "post-optimization")
<< " graph as binary to " << fname;
fname = io::JoinPath(prepend_path,
strings::StrCat("graphdef", suffix, ".pb.txt"));
f.open(fname.c_str(), std::fstream::out);
f << graph_->DebugString();
f.close();
LOG(INFO) << "Saved " << (preop ? "pre-optimization" : "post-optimization")
<< " graph as text to " << fname;
if (!preop) {
fname = io::JoinPath(prepend_path,
strings::StrCat("paintbuckets", suffix, ".txt"));
f.open(fname.c_str(), std::fstream::out);
std::unique_ptr<AutoMixedPrecisionLists> mp_lists =
get_mixed_precision_lists();
f << "AllowList:\n";
for (const auto& x : mp_lists->AllowList()) {
f << x << "\n";
}
f << "\nDenyList:\n";
for (const auto& x : mp_lists->DenyList()) {
f << x << "\n";
}
f << "\nInferList:\n";
for (const auto& x : mp_lists->InferList()) {
f << x << "\n";
}
f << "\nClearList:\n";
for (const auto& x : mp_lists->ClearList()) {
f << x << "\n";
}
f.close();
LOG(INFO) << "Saved paint bucket info to " << fname;
}
return absl::OkStatus();
}
void AutoMixedPrecisionImpl::LogSkippedNode(const NodeDef& node,
const string& device_type) const {
VLOG(2) << "Skipping " << node.op() << " node " << node.name()
<< " because it "
<< (MustPreserve(node)
? "must be preserved"
: absl::StrFormat(
"is not on the %s, or the %s arch is not suitable",
device_type, device_type));
}
bool AutoMixedPrecisionImpl::MustPreserve(const NodeDef& node) const {
return nodes_to_preserve_.count(node.name());
}
bool AutoMixedPrecisionImpl::IsOnDevice(const NodeDef& node,
const string& device_type) const {
string device_name;
if (node.device().empty()) {
device_name = virtual_placer_.get_canonical_device_name(node);
} else {
device_name = node.device();
}
string device;
string not_used;
if (DeviceNameUtils::SplitDeviceName(device_name, ¬_used, &device) &&
absl::StrContains(absl::AsciiStrToLower(device),
absl::AsciiStrToLower(device_type))) {
return true;
}
return false;
}
bool AutoMixedPrecisionImpl::IsOnSuitableGPUArch(const NodeDef& node) const {
return HasFastFP16Support(virtual_placer_.get_device(node));
}
bool AutoMixedPrecisionImpl::ShouldProcess(const NodeDef& node) const {
return should_process_nodes_.count(&node);
}
bool IsFloat32(const NodeTypeId& node_type) {
return GetDataType(*node_type.node, node_type.type_attr) ==
DataType::DT_FLOAT;
}
bool IsTensorListOp(const string& op) {
return absl::StrContains(op, "TensorList");
}
bool IsTensorListReaderOp(const string& op) {
static const gtl::FlatSet<string> tensor_list_reader_ops = {
"TensorListConcat", "TensorListConcatV2", "TensorListGather",
"TensorListGetItem", "TensorListPopBack", "TensorListStack"};
return tensor_list_reader_ops.count(op);
}
bool IsTensorListWriterOp(const string& op) {
static const gtl::FlatSet<string> tensor_list_writer_ops = {
"TensorListFromTensor", "TensorListPushBack",
"TensorListPushBackBatch", "TensorListScatter",
"TensorListScatterV2", "TensorListScatterIntoExistingList",
"TensorListSetItem", "TensorListSplit"};
return tensor_list_writer_ops.count(op);
}
bool AutoMixedPrecisionImpl::SupportsF16(const NodeTypeId& node_type) const {
const OpDef* op_def;
Status status =
OpRegistry::Global()->LookUpOpDef(node_type.node->op(), &op_def);
if (!status.ok()) return false;
return AllowedDataTypes(*op_def, node_type.type_attr)
.Contains(target_dtype_) &&
NodeHasF16KernelForTypeAttr(*node_type.node, node_type.type_attr);
}
bool AutoMixedPrecisionImpl::SupportsF16DataType(
const NodeTypeId& node_type) const {
const OpDef* op_def;
Status status =
OpRegistry::Global()->LookUpOpDef(node_type.node->op(), &op_def);
if (!status.ok()) return false;
return AllowedDataTypes(*op_def, node_type.type_attr).Contains(target_dtype_);
}
bool AutoMixedPrecisionImpl::IsQuantized(const NodeTypeId& node_type) const {
for (const TypeAttrId& type_attr :
node_type_map_.GetTypeAttrs(*node_type.node)) {
if (DataTypeIsQuantized(GetDataType(*node_type.node, type_attr))) {
return true;
}
}
return false;
}
void AutoMixedPrecisionImpl::ConvertBatchNormOpsToV2() {
for (int node_idx = 0; node_idx < graph_->node_size(); ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
if (!ShouldProcess(*node)) continue;
bool changed = false;
if (node->op() == "FusedBatchNorm") {
VLOG(2) << "Changing op of " << node->op() << " node " << node->name()
<< " to FusedBatchNormV2";
node->set_op("FusedBatchNormV2");
changed = true;
} else if (node->op() == "FusedBatchNormGrad") {
VLOG(2) << "Changing op of " << node->op() << " node " << node->name()
<< " to FusedBatchNormGradV2";
node->set_op("FusedBatchNormGradV2");
changed = true;
}
if (changed) {
(*node->mutable_attr())["U"].set_type(DT_FLOAT);
}
}
}
bool ShouldIgnorePerformance() {
static bool is_enabled = [] {
bool ret = false;
TF_CHECK_OK(ReadBoolFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE",
false, &ret));
return ret;
}();
return is_enabled;
}
Status AutoMixedPrecisionImpl::Optimize() {
string optimization_level;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL", "", &optimization_level));
optimization_level = absl::AsciiStrToUpper(optimization_level);
force_all_fp16_ = optimization_level == "UNSAFE_FORCE_ALL";
if (force_all_fp16_ && (mode_ == AutoMixedPrecisionMode::BF16 ||
mode_ == AutoMixedPrecisionMode::FP16_CPU)) {
return errors::InvalidArgument(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL cannot be set to "
"UNSAFE_FORCE_ALL when oneDNN is used");
}
treat_infer_as_deny_ = optimization_level == "TREAT_INFER_AS_DENY";
VLOG(2) << "Optimization Level: " << optimization_level;
std::unique_ptr<AutoMixedPrecisionLists> mp_lists =
get_mixed_precision_lists();
f16_allowlist_ = mp_lists->AllowList();
f16_denylist_ = mp_lists->DenyList();
if (treat_infer_as_deny_) {
for (const auto& op : mp_lists->InferList()) {
f16_denylist_.insert(op);
}
} else {
f16_inferlist_ = mp_lists->InferList();
}
f16_clearlist_ = mp_lists->ClearList();
TF_RETURN_IF_ERROR(ValidateLists(f16_allowlist_, f16_denylist_,
f16_inferlist_, f16_clearlist_));
size_t timestamp = Env::Default()->NowMicros() / 1000;
TF_RETURN_IF_ERROR(PrintDebugLogs( true, timestamp));
VLOG(2) << "Identifying nodes that should be processed";
for (const NodeDef& node : graph_->node()) {
bool should_process;
string device_type;
switch (mode_) {
case AutoMixedPrecisionMode::CUDA:
device_type = DEVICE_GPU;
should_process =
!MustPreserve(node) && IsOnDevice(node, device_type) &&
(ShouldIgnorePerformance() || IsOnSuitableGPUArch(node));
break;
case AutoMixedPrecisionMode::BF16:
case AutoMixedPrecisionMode::CPU:
case AutoMixedPrecisionMode::FP16_CPU:
device_type = DEVICE_CPU;
should_process = !MustPreserve(node) && IsOnDevice(node, device_type);
break;
}
if (should_process) {
should_process_nodes_.insert(&node);
} else {
LogSkippedNode(node, device_type);
}
}
VLOG(2) << "Converting FusedBatchNorm* ops to V2";
ConvertBatchNormOpsToV2();
VLOG(2) << "Building node type map for graph";
TF_RETURN_IF_ERROR(node_type_map_.Init(*graph_));
VLOG(2) << "Constructing graph type attribute topology view";
TF_RETURN_IF_ERROR(
graph_type_view_.InitializeFromGraph(*graph_, node_type_map_));
absl::flat_hash_set<int> deny_set;
std::vector<absl::flat_hash_set<const NodeDef*>> tensor_list_clusters;
FindFloat32TensorListOpClustersAndDenylistUnsafe(&tensor_list_clusters,
&deny_set);
std::vector<NodeTypeIdEdge> ephemeral_edges;
for (const auto& cluster : tensor_list_clusters) {
VLOG(1) << "Found safe Tensor List cluster of size " << cluster.size();
for (const NodeDef* node : cluster) {
VLOG(2) << " Cluster member: " << node->op() << " node " << node->name();
}
FindTensorListImplicitFloat32Edges(cluster, &ephemeral_edges);
}
TF_RETURN_IF_ERROR(graph_type_view_.AddEphemeralEdges(ephemeral_edges));
absl::flat_hash_set<int> allow_set;
VLOG(2) << "Beginning pass 1 to add allowlist ops";
AddAllowlistOps(&allow_set);
VLOG(2) << "Finished pass 1";
if (allow_set.empty()) {
LOG(INFO) << "No allowlist ops found, nothing to do";
return absl::OkStatus();
}
VLOG(2) << "Beginning pass 2 to propagate deny forwards from denylist ops "
"through clear/inferlist ops";
PropagateDenyFwdThroughClearAndInfer(&deny_set);
VLOG(2) << "Finished pass 2";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Beginning pass 3 to set clear and infer nodes to allow if they "
"are between allow ops";
AddClearAndInferToAllowIfBetweenAllow(deny_set, &allow_set);
VLOG(2) << "Finished pass 3";
VLOG(2) << "Beginning pass 4 to add infer list ops to allow if they "
"directly follow allow nodes";
AddInferToAllowIfFollowAllow(deny_set, &allow_set);
VLOG(2) << "Finished pass 4";
VLOG(2) << "Beginning pass 5 to propagate allow from allow nodes through "
"clearlist ops";
PropagateAllowThroughClear(deny_set, &allow_set);
VLOG(2) << "Finished pass 5";
VLOG(2) << "Beginning pass 6 to remove some nodes which could not be changed "
"to F16"
"from allow set";
RemoveAllowsetWithFp32(&allow_set);
VLOG(2) << "Finished pass 6";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Forcing color match on loop edges";
TF_RETURN_IF_ERROR(ForceColorMatchOnRecurrentEdges(&allow_set));
VLOG(2) << "Finding existing casts that can be made allow";
MakeCastsAllowIfAllOutputsAllow(&allow_set);
VLOG(2) << "Beginning final pass to change type attributes and insert Cast "
"ops at paint boundaries";
TF_RETURN_IF_ERROR(ChangeTypeAttrsAndAddCasts(allow_set));
VLOG(2) << "Finished final pass";
TF_RETURN_IF_ERROR(PrintDebugLogs( false, timestamp));
return absl::OkStatus();
}
const NodeTypeId* AutoMixedPrecisionImpl::GetTensorListFloat32NodeTypeId(
const NodeDef& node) const {
if (!IsTensorListOp(node.op())) return nullptr;
for (const TypeAttrId& type_attr : node_type_map_.GetTypeAttrs(node)) {
const NodeTypeId* node_type =
graph_type_view_.GetNode(node.name(), type_attr);
if (node_type && node_type->type_attr.fixed_type == DT_INVALID &&
node_type->type_attr.type_index == TypeAttrId::kSingleType &&
IsFloat32(*node_type)) {
return node_type;
}
}
return nullptr;
}
bool AutoMixedPrecisionImpl::IsSourceOrSinkOp(const string& op) const {
const gtl::FlatSet<string> source_and_sink_ops = {
"_Arg",
"_Retval",
"OptionalFromValue",
"OptionalGetValue",
"PartitionedCall",
"Placeholder",
"StatefulPartitionedCall",
};
return source_and_sink_ops.count(op) || function_library_.Find(op);
}
void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* tensor_list_clusters,
absl::flat_hash_set<int>* deny_set) const {
absl::flat_hash_set<const NodeDef*> tensor_list_prop_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) ||
root.type_attr.fixed_type != DataType::DT_VARIANT ||
!GetTensorListFloat32NodeTypeId(*root.node) ||
tensor_list_prop_set.count(root.node)) {
continue;
}
const NodeTypeId* root_fp32 = GetTensorListFloat32NodeTypeId(*root.node);
const absl::optional<int> maybe_root_fp32_idx =
graph_type_view_.GetNodeIndex(*root_fp32);
DCHECK(maybe_root_fp32_idx.has_value())
<< "Type attribute " << root_fp32->type_attr.DebugString()
<< " of node " << root.node->name() << " not found in graph view";
int root_fp32_idx = maybe_root_fp32_idx.value();
absl::flat_hash_set<const NodeDef*> cluster({root.node});
DfsTypeTraversal(graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputsAndOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return !tensor_list_prop_set.count(item.node);
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
const NodeDef* node = item.node;
if (GetTensorListFloat32NodeTypeId(*node)) {
cluster.insert(node);
if (!ShouldProcess(*node)) {
deny_set->insert(root_fp32_idx);
}
} else if (IsSourceOrSinkOp(node->op())) {
deny_set->insert(root_fp32_idx);
}
}));
tensor_list_clusters->push_back(cluster);
}
}
void AutoMixedPrecisionImpl::FindTensorListImplicitFloat32Edges(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
std::vector<NodeTypeIdEdge>* implicit_fp32_edges) const {
for (const NodeDef* root_node : tensor_list_nodes) {
if (!IsTensorListReaderOp(root_node->op())) continue;
NodeTypeId root(root_node, TypeAttrId(DataType::DT_VARIANT));
const NodeTypeId* root_fp32 = GetTensorListFloat32NodeTypeId(*root.node);
CHECK(root_fp32) << "No float32 type attribute found for "
<< root.node->op() << " node " << root.node->name();
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return ShouldProcess(*item.node);
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
if (IsTensorListWriterOp(item.node->op())) {
const NodeTypeId* item_fp32 =
GetTensorListFloat32NodeTypeId(*item.node);
CHECK(item_fp32)
<< "No float32 type attribute found for " << item.node->op()
<< " node " << item.node->name();
VLOG(2) << "Adding ephemeral float32 edge from "
<< item_fp32->node->op() << " node "
<< item_fp32->node->name() << " to "
<< root_fp32->node->op() << " node "
<< root_fp32->node->name();
implicit_fp32_edges->emplace_back(*item_fp32, *root_fp32);
}
}));
}
}
void AutoMixedPrecisionImpl::AddAllowlistOps(
absl::flat_hash_set<int>* allow_set) const {
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node)) continue;
bool force_allow = force_all_fp16_ && CanForceFP16(*root.node);
if (f16_allowlist_.count(root.node->op()) || force_allow) {
bool inserted = allow_set->insert(root_idx).second;
if (VLOG_IS_ON(2) && inserted) {
VLOG(2) << "Painting type " << root.type_attr.DebugString()
<< " of node " << root.node->name() << " ALLOW because its op "
<< root.node->op() << " is on the allowlist";
}
}
}
}
void AutoMixedPrecisionImpl::PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const {
if (force_all_fp16_) return;
absl::flat_hash_set<int> upstream_of_deny_or_infer_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!(f16_denylist_.count(root.node->op()) ||
f16_inferlist_.count(root.node->op()))) {
continue;
}
DfsTypeTraversal(graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!upstream_of_deny_or_infer_set.count(idx) &&
f16_clearlist_.count(item.node->op()));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
upstream_of_deny_or_infer_set.insert(idx);
}));
}
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (deny_set->count(root_idx) || !f16_denylist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
return idx == root_idx || (!deny_set->count(idx) &&
upstream_of_deny_or_infer_set.count(idx));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
bool inserted = deny_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " DENY";
}
}));
}
}
void AutoMixedPrecisionImpl::AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
absl::flat_hash_set<int> downstream_of_allow_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || !f16_allowlist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!downstream_of_allow_set.count(idx) &&
!f16_allowlist_.count(item.node->op()) &&
!deny_set.count(idx) && ShouldProcess(*item.node) &&
IsFloat32(item) && SupportsF16(item) &&
(f16_clearlist_.count(item.node->op()) ||
f16_inferlist_.count(item.node->op())));
}),
DfsTypeCallbacks::PreOrder(
[&](int idx) { downstream_of_allow_set.insert(idx); }));
}
absl::flat_hash_set<int> upstream_of_allow_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || upstream_of_allow_set.count(root_idx) ||
!f16_allowlist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
return idx == root_idx || (!upstream_of_allow_set.count(idx) &&
downstream_of_allow_set.count(idx));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
upstream_of_allow_set.insert(idx);
bool inserted = allow_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " ALLOW";
}
}));
}
}
void AutoMixedPrecisionImpl::PropagateAllowThroughClear(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
absl::flat_hash_set<int> clear_prop_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || clear_prop_set.count(root_idx) ||
!allow_set->count(root_idx)) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputsAndOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!allow_set->count(idx) && !deny_set.count(idx) &&
ShouldProcess(*item.node) && IsFloat32(item) &&
SupportsF16(item) &&
(f16_clearlist_.count(item.node->op())) &&
!NodeImplicitlyReadsNonResourceVariable(*item.node));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
clear_prop_set.insert(idx);
bool inserted = allow_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " ALLOW";
}
}));
}
}
void AutoMixedPrecisionImpl::AddInferToAllowIfFollowAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
if (mode_ != AutoMixedPrecisionMode::BF16) {
return;
}
for (int item_idx = 0; item_idx < graph_type_view_.num_nodes(); ++item_idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(item_idx);
if (!ShouldProcess(*item.node) || deny_set.count(item_idx) ||
allow_set->count(item_idx) || !f16_inferlist_.count(item.node->op()) ||
!IsFloat32(item) || !SupportsF16DataType(item)) {
continue;
}
bool has_allow_fanin = false;
for (const int fanin : graph_type_view_.GetFanin(item_idx)) {
if (deny_set.count(fanin)) {
has_allow_fanin = false;
break;
}
if (allow_set->count(fanin)) {
has_allow_fanin = true;
}
}
if (has_allow_fanin) {
bool inserted = allow_set->insert(item_idx).second;
if (VLOG_IS_ON(2) && inserted) {
VLOG(2) << "Painting type " << item.type_attr.DebugString() << " of "
<< item.node->op() << " node " << item.node->name() << " ALLOW";
}
}
}
}
void AutoMixedPrecisionImpl::RemoveAllowsetWithFp32(
absl::flat_hash_set<int>* allow_set) const {
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (f16_allowlist_.count(root.node->op()) && allow_set->count(root_idx) &&
(!SupportsF16DataType(root) || IsQuantized(root))) {
auto erased = allow_set->erase(root_idx);
if (VLOG_IS_ON(2) && erased) {
VLOG(2) << "UnPainting type " << root.type_attr.DebugString()
<< " of node " << root.node->name() << " ALLOW because its op "
<< root.node->op() << " is not support F16 DataType";
}
}
}
}
Status AutoMixedPrecisionImpl::ForceColorMatchOnRecurrentEdges(
absl::flat_hash_set<int>* allow_set) const {
for (const NodeDef& node : graph_->node()) {
if (node.op() == "NextIteration") {
GraphView::OutputPort output_port(&node, 0);
const auto& fanout = graph_view_.GetFanout(output_port);
std::vector<int> merge_idxs;
merge_idxs.reserve(fanout.size());
bool any_merge_is_not_allow = false;
for (const auto& output : fanout) {
const NodeDef& merge_node = *output.node;
if (merge_node.op() != "Merge") {
return errors::FailedPrecondition(
"Expected Merge node after NextIteration, got ", merge_node.op());
}
const absl::optional<int> maybe_merge_idx =
graph_type_view_.GetNodeIndex(merge_node.name(), TypeAttrId("T"));
if (!maybe_merge_idx.has_value()) {
return errors::Internal("Type attribute T of Merge node ",
merge_node.name(),
" not found in graph view");
}
int merge_idx = maybe_merge_idx.value();
merge_idxs.push_back(merge_idx);
any_merge_is_not_allow =
any_merge_is_not_allow || !allow_set->count(merge_idx);
}
const absl::optional<int> maybe_nextiter_idx =
graph_type_view_.GetNodeIndex(node.name(), TypeAttrId("T"));
if (!maybe_nextiter_idx.has_value()) {
return errors::Internal("Type attribute T of NextIteration node ",
node.name(), " not found in graph view");
}
int nextiter_idx = maybe_nextiter_idx.value();
if (any_merge_is_not_allow) {
for (int merge_idx : merge_idxs) {
if (allow_set->erase(merge_idx)) {
VLOG(2) << "Painting type T of Merge node "
<< graph_type_view_.GetNode(merge_idx)->node->name()
<< " DENY to match the color of its sibling Merge nodes "
"with common NextIteration node "
<< node.name();
}
}
if (allow_set->erase(nextiter_idx)) {
VLOG(2) << "Painting type T of NextIteration node " << node.name()
<< " DENY to match the color of its output Merge node(s)";
}
} else {
if (allow_set->insert(nextiter_idx).second) {
VLOG(2) << "Painting type T of NextIteration node " << node.name()
<< " ALLOW to match the color of its output Merge node(s)";
}
}
}
}
return absl::OkStatus();
}
void AutoMixedPrecisionImpl::ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* deny_set) const {
bool any_deny = false;
bool any_allow = false;
std::vector<int> node_type_idxs;
node_type_idxs.reserve(tensor_list_nodes.size());
for (const NodeDef* node : tensor_list_nodes) {
const NodeTypeId& node_type = *GetTensorListFloat32NodeTypeId(*node);
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node_type);
DCHECK(maybe_node_type_idx.has_value())
<< "Type attribute " << node_type.type_attr.DebugString() << " of node "
<< node->name() << " not found in graph view";
node_type_idxs.push_back(maybe_node_type_idx.value());
}
for (int node_type_idx : node_type_idxs) {
if (deny_set->count(node_type_idx)) {
any_deny = true;
break;
} else if (allow_set->count(node_type_idx)) {
any_allow = true;
}
}
if (!any_deny && !any_allow) return;
for (int node_type_idx : node_type_idxs) {
const NodeTypeId& node_type = *graph_type_view_.GetNode(node_type_idx);
VLOG(2) << "Painting type " << node_type.type_attr.DebugString() << " of "
<< node_type.node->op() << " node " << node_type.node->name() << " "
<< (any_deny ? "DENY" : "ALLOW")
<< " because at least one of its siblings is "
<< (any_deny ? "DENY" : "ALLOW");
if (any_deny) {
allow_set->erase(node_type_idx);
deny_set->insert(node_type_idx);
} else {
allow_set->insert(node_type_idx);
}
}
}
bool AutoMixedPrecisionImpl::NodeImplicitlyReadsNonResourceVariable(
const NodeDef& node) const {
if (node.op() == "Identity" || node.op() == "Enter") {
GraphView::InputPort node_input(&node, 0);
MutableGraphView::OutputPort prev_output =
graph_view_.GetRegularFanin(node_input);
const NodeDef* input = prev_output.node;
if (input && ((node.op() == "Identity" && (input->op() == "Variable" ||
input->op() == "VariableV2")) ||
(node.op() == "Enter" &&
NodeImplicitlyReadsNonResourceVariable(*input)))) {
return true;
}
}
return false;
}
void AutoMixedPrecisionImpl::MakeCastsAllowIfAllOutputsAllow(
absl::flat_hash_set<int>* allow_set) const {
int num_nodes_preop = graph_->node_size();
for (int node_idx = 0; node_idx < num_nodes_preop; ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
NodeTypeId node_type(node, TypeAttrId("DstT"));
if (node->op() != "Cast" || !IsFloat32(node_type)) {
continue;
}
bool all_fanouts_allow = true;
MutableGraphView::OutputPort src(node, 0);
const auto& fanout = graph_view_.GetFanout(src);
for (const MutableGraphView::InputPort& dst : fanout) {
TypeAttrId dst_type_attr =
node_type_map_.GetInputTypeAttr(*dst.node, dst.port_id);
const absl::optional<int> maybe_dst_type_idx =
graph_type_view_.GetNodeIndex(dst.node->name(), dst_type_attr);
DCHECK(maybe_dst_type_idx.has_value())
<< "Type attribute " << dst_type_attr.DebugString() << " of node "
<< dst.node->name() << " not found in graph view";
int dst_type_idx = maybe_dst_type_idx.value();
bool dst_is_allow = allow_set->count(dst_type_idx);
if (!dst_is_allow) {
all_fanouts_allow = false;
break;
}
}
if (!fanout.empty() && all_fanouts_allow) {
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node_type);
DCHECK(maybe_node_type_idx.has_value())
<< "Type attribute " << node_type.type_attr.DebugString()
<< " of node " << node_type.node->name()
<< " not found in graph view";
int node_type_idx = maybe_node_type_idx.value();
allow_set->insert(node_type_idx);
}
}
}
absl::StatusOr<NodeDef*> AutoMixedPrecisionImpl::InsertCastNodeAtFanout(
const absl::flat_hash_set<int>& allow_set, const bool src_is_allow,
const CastType& cast_type, MutableGraphView::OutputPort& src) {
NodeDef* added_cast_node = nullptr;
auto fanout = graph_view_.GetFanout(src);
for (const MutableGraphView::InputPort& dst : fanout) {
TypeAttrId dst_type_attr =
node_type_map_.GetInputTypeAttr(*dst.node, dst.port_id);
const absl::optional<int> maybe_dst_type_idx =
graph_type_view_.GetNodeIndex(dst.node->name(), dst_type_attr);
if (!maybe_dst_type_idx.has_value()) {
return errors::Internal("Type attribute ", dst_type_attr.DebugString(),
" of ", dst.node->op(), " node ",
dst.node->name(), " not found in graph view");
}
int dst_type_idx = maybe_dst_type_idx.value();
bool dst_is_allow = allow_set.count(dst_type_idx);
bool to_f16 = false;
bool should_cast = false;
switch (cast_type) {
case CastType::AUTO:
if (src_is_allow != dst_is_allow) {
to_f16 = dst_is_allow;
should_cast = true;
}
break;
case CastType::FP16:
to_f16 = true;
should_cast = true;
break;
case CastType::FP32:
to_f16 = false;
should_cast = true;
break;
default:
return errors::Internal("Invalid Cast Type: ",
static_cast<int>(cast_type));
}
if (!should_cast) continue;
if (added_cast_node == nullptr) {
VLOG(1) << "Inserting cast to "
<< (to_f16 ? DataTypeString(target_dtype_) : "DT_FLOAT") << " at "
<< src.node->op() << " " << src.node->name() << ":"
<< src.port_id;
added_cast_node =
graph_view_.AddNode(BuildCastNode(src, to_f16, src.node->device()));
if (to_f16 && !IsConstant(*src.node) && !IsVariable(*src.node) &&
!NodeImplicitlyReadsNonResourceVariable(*src.node)) {
++num_nonvar_casts_to_f16_;
}
}
TF_RETURN_IF_ERROR(graph_view_.UpdateRegularFaninByPort(
dst.node->name(), dst.port_id, {added_cast_node->name(), 0}));
}
return added_cast_node;
}
absl::StatusOr<DataType> AutoMixedPrecisionImpl::GetCastToType(
const NodeDef* node) const {
CHECK_EQ(node->op(), "Cast")
<< "Node " << node->name() << " is not a Cast op";
return node->attr().at("DstT").type();
}
void AutoMixedPrecisionImpl::CollectOutputPorts(
const TypeAttrId& type_attr, NodeDef* node,
std::vector<MutableGraphView::OutputPort>& output_ports) const {
for (int port_id : node_type_map_.GetOutputPorts(*node, type_attr)) {
output_ports.emplace_back(node, port_id);
}
}
Status AutoMixedPrecisionImpl::ChangeTypeAttrsAndAddCasts(
const absl::flat_hash_set<int>& allow_set) {
int num_nodes_changed = 0;
const int num_nodes_preop = graph_->node_size();
bool emulate_f16 = false;
if (mode_ == AutoMixedPrecisionMode::CPU) {
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_EMULATE_FP16",
true, &emulate_f16));
}
VLOG(1) << "Setting emulate_f16 = " << emulate_f16;
for (int node_idx = 0; node_idx < num_nodes_preop; ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
for (const TypeAttrId& type_attr : node_type_map_.GetTypeAttrs(*node)) {
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node->name(), type_attr);
if (!maybe_node_type_idx.has_value()) {
return errors::Internal("Type attribute ", type_attr.DebugString(),
" of ", node->op(), " node ", node->name(),
" not found in graph view");
}
int node_type_idx = maybe_node_type_idx.value();
if (!IsFloat32(*graph_type_view_.GetNode(node_type_idx))) continue;
bool src_is_allow = allow_set.count(node_type_idx);
std::vector<MutableGraphView::OutputPort> output_ports;
if (src_is_allow) {
if (emulate_f16) {
for (int port_id : node_type_map_.GetInputPorts(*node, type_attr)) {
VLOG(2) << "Cast to F32 at fanin of node " << node->name() << ":"
<< port_id;
MutableGraphView::InputPort dst(node, port_id);
MutableGraphView::OutputPort src = graph_view_.GetRegularFanin(dst);
NodeDef* added_cast_node = graph_view_.AddNode(
BuildCastNode(src, false, src.node->device()));
VLOG(1) << "Inserting cast to DT_FLOAT at " << src.node->op() << " "
<< src.node->name() << ":" << src.port_id;
TF_RETURN_IF_ERROR(graph_view_.UpdateRegularFaninByPort(
dst.node->name(), dst.port_id, {added_cast_node->name(), 0}));
}
for (int port_id : node_type_map_.GetOutputPorts(*node, type_attr)) {
MutableGraphView::OutputPort src(node, port_id);
VLOG(2) << "Cast to F16 at fanout of node " << node->name() << ":"
<< port_id;
TF_ASSIGN_OR_RETURN(NodeDef * added_cast_node,
InsertCastNodeAtFanout(allow_set, src_is_allow,
CastType::FP16, src));
if (added_cast_node != nullptr) {
output_ports.emplace_back(added_cast_node, 0);
}
}
} else {
VLOG(1) << "Changing type " << type_attr.DebugString() << " of "
<< node->op() << " node " << node->name() << " to "
<< DataTypeString(target_dtype_);
if (!SetDataType(node, type_attr, target_dtype_)) {
return errors::Internal("Failed to set type attribute");
}
++num_nodes_changed;
CollectOutputPorts(type_attr, node, output_ports);
}
} else {
CollectOutputPorts(type_attr, node, output_ports);
}
for (auto output_port : output_ports) {
VLOG(2) << "Cast to required data type at fanout of node "
<< output_port.node->name() << ":" << output_port.port_id;
TF_RETURN_IF_ERROR(InsertCastNodeAtFanout(allow_set, src_is_allow,
CastType::AUTO, output_port)
.status());
}
}
}
const char* type_str = target_dtype_ == DT_HALF ? "float16" : "bfloat16";
LOG(INFO) << "Converted " << num_nodes_changed << "/" << num_nodes_preop
<< " nodes to " << type_str << " precision using "
<< num_nonvar_casts_to_f16_ << " cast(s) to " << type_str
<< " (excluding Const and Variable casts)";
return absl::OkStatus();
}
int GetNumGPUs(const Cluster& cluster) {
if (ShouldSimulateGpu()) {
return 1;
}
auto devices = cluster.GetDevices();
int num_gpus = 0;
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU" &&
(ShouldIgnorePerformance() || HasFastFP16Support(device_properties))) {
num_gpus++;
}
}
return num_gpus;
}
}
Status AutoMixedPrecision::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
if (cluster == nullptr) {
return errors::InvalidArgument("cluster == nullptr");
}
#if !defined(INTEL_MKL)
if (mode_ == AutoMixedPrecisionMode::BF16) {
return errors::Unimplemented(
"The auto_mixed_precision_onednn_bfloat16 optimizer cannot be used "
"since this build of TensorFlow is not compiled with oneDNN support "
"for bfloat16. "
"For information on oneDNN builds, see: "
"https:
"tensorflow-installation-guide");
}
#endif
*output = item.graph;
int num_gpus = GetNumGPUs(*cluster);
if (num_gpus < 1 && mode_ == AutoMixedPrecisionMode::CUDA) {
VLOG(1) << "No (suitable) GPUs detected, skipping " << name()
<< " graph optimizer";
return absl::OkStatus();
}
if (mode_ == AutoMixedPrecisionMode::FP16_CPU &&
!IsAMXDataTypeSupportedByOneDNNOnThisCPU(DT_HALF) &&
!IsAVXConvertSupportedByOneDNNOnThisCPU()) {
VLOG(1) << "No support for " << name() << " graph optimizer on CPU";
return absl::OkStatus();
}
if (num_gpus >= 1 && mode_ == AutoMixedPrecisionMode::BF16) {
LOG(WARNING) << "Note: GPUs detected. Using " << name()
<< " graph optimizer configured for BFloat16 on CPUs";
}
AutoMixedPrecisionImpl optimizer(cluster, item.NodesToPreserve(), output,
item.id, mode_);
if (item.id == "tf_graph") {
LOG(INFO) << "Running " << name() << " graph optimizer";
} else {
VLOG(1) << "Running " << name() << " graph optimizer on " << item.id;
}
Status status = optimizer.Optimize();
if (!status.ok()) {
*output = item.graph;
LOG(WARNING) << name() << " graph optimizer FAILED: " << status.ToString();
}
return status;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM || INTEL_MKL
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::ContainsRegex;
using ::testing::SizeIs;
template <DataType DTYPE>
Tensor GenerateIdentityMatrix(int64_t height, int64_t width) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, TensorShape{height, width});
for (int64_t i = 0; i < height; ++i) {
for (int64_t j = 0; j < width; ++j) {
tensor.matrix<T>()(i, j) = i == j;
}
}
return tensor;
}
template <DataType DTYPE>
Tensor GenerateRandomTensorInRange(const TensorShape& shape, double minval,
double maxval) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
for (auto i = 0; i < tensor.NumElements(); i++)
tensor.flat<T>()(i) =
(random::New64() % 65536 / 65536.0) * (maxval - minval) + minval;
return tensor;
}
void VerifyGraphsEquivalent(const GraphDef& original_graph,
const GraphDef& optimized_graph,
const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
GraphView optimized_view(&optimized_graph);
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = *optimized_view.GetNode(original.name());
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
if (original.input_size() == optimized.input_size()) {
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
}
const std::pair<int, int> kMinGPUArch = {7, 0};
class AutoMixedPrecisionTest : public GrapplerTest {
protected:
void SetMode(AutoMixedPrecisionMode mode) { mode_ = mode; }
void SetUp() override {
if (mode_ == AutoMixedPrecisionMode::CUDA) {
int num_gpus = GetNumAvailableGPUs();
gpu_available_ = (num_gpus > 0);
#if GOOGLE_CUDA
gpu_available_ =
gpu_available_ && (num_gpus == GetNumAvailableGPUs(kMinGPUArch));
#else
gpu_available_ = false;
#endif
if (gpu_available_) {
virtual_cluster_.reset(new SingleMachine( 10, 1, 1));
} else {
DeviceProperties device_properties;
device_properties.set_type("GPU");
#if GOOGLE_CUDA
device_properties.mutable_environment()->insert({"architecture", "7"});
device_properties.mutable_environment()->insert({"cuda", "9010"});
#else
device_properties.mutable_environment()->insert(
{"architecture", "gfx906"});
#endif
virtual_cluster_.reset(
new VirtualCluster({{"/GPU:1", device_properties}}));
}
} else if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
DeviceProperties device_properties;
device_properties.set_type("CPU");
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
bool is_fp16_enabled_on_cpu = false;
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
is_fp16_enabled_on_cpu =
IsAMXDataTypeSupportedByOneDNNOnThisCPU(DT_HALF) ||
IsAVXConvertSupportedByOneDNNOnThisCPU();
#endif
if (!IsMKLEnabled() || !is_fp16_enabled_on_cpu) {
GTEST_SKIP() << "This device doesn't support FP16";
}
}
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
NodeDef* AddSimpleNode(const string& name, const string& op,
const std::vector<string>& inputs,
GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
if (op == "AddN" || op == "ShapeN") {
AttrValue num_inputs;
num_inputs.set_i(inputs.size());
attributes.emplace_back("N", num_inputs);
}
if (op == "ShapeN") {
AttrValue out_type;
out_type.set_type(DT_INT32);
attributes.emplace_back("out_type", out_type);
}
AttrValue type;
type.set_type(DT_FLOAT);
if (op == "Const" || op == "Placeholder" || op == "VariableV2" ||
op == "VarHandleOp" || op == "ReadVariableOp") {
attributes.emplace_back("dtype", type);
} else if (op == "SparseMatMul") {
attributes.emplace_back("Ta", type);
attributes.emplace_back("Tb", type);
} else if (op == "IdentityN") {
AttrValue type_list;
for (int i = 0; i < static_cast<int>(inputs.size()); ++i) {
type_list.mutable_list()->add_type(DT_FLOAT);
}
attributes.emplace_back("T", type_list);
} else if (op == "StackV2" || op == "StackPopV2") {
attributes.emplace_back("elem_type", type);
} else if (op == "Cast") {
attributes.emplace_back("SrcT", type);
attributes.emplace_back("DstT", type);
} else {
attributes.emplace_back("T", type);
}
return AddNode(name, op, inputs, attributes, graph);
}
void TestSimpleUnaryInferOp(
double input_min, double input_max, double atol, double rtol,
const std::function<Output(const tensorflow::Scope&, Output)>&
test_op_factory) {
int size = 128;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output eye = ops::Const(s.WithOpName("eye"),
GenerateIdentityMatrix<DT_FLOAT>(size, size));
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, eye);
Output infer1 = test_op_factory(s.WithOpName("infer1"), allow1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, eye);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_tensor = GenerateRandomTensorInRange<DT_FLOAT>(
TensorShape({size, size}), input_min, input_max);
std::vector<std::pair<string, Tensor>> feed = {{"input", input_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], atol, rtol);
}
}
std::unique_ptr<Cluster> virtual_cluster_;
bool gpu_available_;
AutoMixedPrecisionMode mode_;
};
class AutoMixedPrecisionParamTest
: public AutoMixedPrecisionTest,
public ::testing::WithParamInterface<AutoMixedPrecisionMode> {
protected:
void SetUp() override {
mode_ = GetParam();
AutoMixedPrecisionTest::SetMode(mode_);
AutoMixedPrecisionTest::SetUp();
}
AutoMixedPrecisionMode mode_;
};
TEST_P(AutoMixedPrecisionParamTest, NoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.234f, {32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, AlreadyFp16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_HALF);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output cst2 = ops::Cast(s.WithOpName("cst2"), clr1, DT_FLOAT);
Output clr2 = ops::Relu(s.WithOpName("clr2"), cst2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("DstT").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, NoInferOp) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL", "TREAT_INFER_AS_DENY",
1 );
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr4, clr4);
Output infer3 = ops::Log(s.WithOpName("infer3"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), infer3);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer3")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
unsetenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL");
}
TEST_P(AutoMixedPrecisionParamTest, BidirectionalClearChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output clr2 = ops::Relu(s.WithOpName("clr2"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
auto clr3 = ops::ShapeN(s.WithOpName("clr3"), {clr1, clr2});
Output clr4 = ops::Relu(s.WithOpName("clr4"), clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), clr4);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveFetches) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), deny1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow2);
Output deny2 = ops::Exp(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
GrapplerItem item;
item.fetch = {"allow1", "clr2", "clr3"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveCPUNodes) {
if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
GTEST_SKIP() << "This test is not required on CPU";
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
Output allow2 =
ops::MatMul(s.WithOpName("allow2").WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
infer1, infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveIdentityAfterVariable) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output var1 = ops::Variable(s.WithOpName("var1"), {32, 32}, DT_FLOAT);
Output clr1 = ops::Identity(s.WithOpName("clr1"), var1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, clr1);
Output input2 = ops::Const(s.WithOpName("input2"), 1.f / 32, {32, 32});
Output clr2 = ops::Identity(s.WithOpName("clr2"), input2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), input, clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), allow2);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto var1_tensor =
GenerateConstantTensor<DT_FLOAT>(TensorShape({32, 32}), 3.141593f);
std::vector<std::pair<string, Tensor>> feed = {{"var1", var1_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 5);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("var1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {8, 56, 56, 16});
Output weight = ops::Const(s.WithOpName("weight"), 2.f, {3, 3, 16, 16});
Output scale = ops::Const(s.WithOpName("scale"), 3.f, {16});
Output offset = ops::Const(s.WithOpName("offset"), 4.f, {16});
Output mean = ops::Const(s.WithOpName("mean"), 5.f, {0});
Output variance = ops::Const(s.WithOpName("variance"), 6.f, {0});
Output allow1 =
ops::Conv2D(s.WithOpName("allow1"), input, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
auto fbn1_op =
ops::FusedBatchNorm(s.WithOpName("fbn1"), allow1, scale, offset, mean,
variance, ops::FusedBatchNorm::DataFormat("NHWC"));
Output fbn1 = fbn1_op.y;
Output fbn1_rs1 = fbn1_op.reserve_space_1;
Output fbn1_rs2 = fbn1_op.reserve_space_2;
Output bng1 = ops::FusedBatchNormGrad(
s.WithOpName("bng1"), fbn1, allow1, scale, fbn1_rs1,
fbn1_rs2, ops::FusedBatchNormGrad::DataFormat("NHWC"))
.x_backprop;
Output infer1 = ops::Add(s.WithOpName("infer1"), fbn1, bng1);
Output allow2 =
ops::Conv2D(s.WithOpName("allow2"), infer1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->op(), "FusedBatchNormV2");
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("bng1")->op(), "FusedBatchNormGradV2");
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_P(AutoMixedPrecisionParamTest, RepeatedAndListTypeAttrs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto clr1_op = ops::IdentityN(s.WithOpName("clr1"), {allow1, allow1, allow1});
Output infer1 =
ops::AddN(s.WithOpName("infer1"),
{clr1_op.output[0], clr1_op.output[1], clr1_op.output[2]});
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
for (auto type : output_view.GetNode("clr1")->attr().at("T").list().type()) {
EXPECT_EQ(type, DT_HALF);
}
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, ExistingCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), true, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow1);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 1);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("SrcT").type(), DT_BOOL);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, RecurrentEdgeColorMismatch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output ent1 =
ops::internal::Enter(s.WithOpName("ent1"), deny1, "loop1").output;
Output mrg1 = ops::Merge(s.WithOpName("mrg1"), {ent1, ent1}).output;
Output con1 = ops::Const(s.WithOpName("con1"), false, {});
Output lpc1 = ops::LoopCond(s.WithOpName("lpc1"), con1).output;
auto swt1 = ops::Switch(s.WithOpName("swt1"), mrg1, lpc1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), swt1.output_true);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), infer1, infer1);
Output nxt1 = ops::NextIteration(s.WithOpName("nxt1"), allow1);
Output ext1 = ops::internal::Exit(s.WithOpName("ext1"), swt1.output_false);
Output fetch = ops::Identity(s.WithOpName("fetch"), ext1);
auto mrg2 = ops::Merge(s.WithOpName("mrg2"), {ent1, nxt1});
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
NodeMap node_map_original(&item.graph);
auto merge_node = node_map_original.GetNode("mrg1");
merge_node->set_input(1, "nxt1");
auto const_node = node_map_original.GetNode("con1");
const_node->add_input("^mrg1");
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ent1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("mrg1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("swt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("nxt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ext1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("mrg2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListSetGet) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::TensorListReserve(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output idx1 = ops::Const(s.WithOpName("idx1"), 1);
Output idx2 = ops::Const(s.WithOpName("idx2"), 2);
Output idx3 = ops::Const(s.WithOpName("idx3"), 3);
auto tl1w1 =
ops::TensorListSetItem(s.WithOpName("tl1w1"), tl1.handle, idx1, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 =
ops::TensorListSetItem(s.WithOpName("tl1w2"), tl1.handle, idx2, allow1);
Output tl1rs =
ops::TensorListResize(s.WithOpName("tl1rs"), tl1w2.output_handle, 6);
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
ops::TensorListGetItem(s.WithOpName("tl1r2"), tl1w3.output_handle, idx3,
shape, DT_FLOAT)
.item;
auto tl2 = ops::TensorListReserve(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListSetItem(s.WithOpName("tl2w1"), tl2.handle, idx1, input);
Output tl2r1 =
ops::TensorListGetItem(s.WithOpName("tl2r1"), tl2w1.output_handle, idx1,
shape, DT_FLOAT)
.item;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListPushPop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
auto tl1w1 =
ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 = ops::TensorListPushBack(s.WithOpName("tl1w2"),
tl1w1.output_handle, allow1);
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"),
tl1w2.output_handle, shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListPushBack(s.WithOpName("tl1w3"), tl1.handle, allow2);
Output tl1r2 = ops::TensorListPopBack(s.WithOpName("tl1r2"),
tl1w3.output_handle, shape, DT_FLOAT)
.tensor;
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, input);
Output tl2r1 = ops::TensorListPopBack(s.WithOpName("tl2r1"),
tl2w1.output_handle, shape, DT_FLOAT)
.tensor;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListFromTensor) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32};
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1 = ops::TensorListFromTensor(s.WithOpName("tl1"), allow1, shape);
Output tl1r1 = ops::TensorListStack(s.WithOpName("tl1r1"), tl1.output_handle,
shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
auto tl2 = ops::TensorListFromTensor(s.WithOpName("tl2"), allow1, shape);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.output_handle, input);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 4e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListPushBackBatchAndConcatLists) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output tl1_tl2 =
ops::Stack(s.WithOpName("tl1_tl2"), {tl1.handle, tl2.handle});
Output allow1_allow1 =
ops::Stack(s.WithOpName("allow1_allow1"), {allow1, allow1});
auto tl12w1 = ops::TensorListPushBackBatch(s.WithOpName("tl12w1"), tl1_tl2,
allow1_allow1);
OutputList tl12w1_outputs =
ops::Split(s.WithOpName("tl12w1_outputs"), 0, tl12w1.output_handles, 2)
.output;
Output scalar_shape = ops::Const(s.WithOpName("scalar_shape"), 0, {0});
Output tl12w1_output0 = ops::Reshape(s.WithOpName("tl12w1_output0"),
tl12w1_outputs[0], scalar_shape);
Output tl12w1_output1 = ops::Reshape(s.WithOpName("tl12w1_output1"),
tl12w1_outputs[1], scalar_shape);
Output tl3 = ops::TensorListConcatLists(s.WithOpName("tl3"), tl12w1_output0,
tl12w1_output1, DT_FLOAT);
Output tl3r1 =
ops::TensorListPopBack(s.WithOpName("tl3r1"), tl3, shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl3r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl3r1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListThroughFunction) {
FunctionDefLibrary function_lib;
const Tensor kShape = test::AsTensor<int32>({32, 32});
FunctionDef func1 = FunctionDefHelper::Define(
"Func1", {"ihandle: variant", "x: float"},
{"ohandle: variant", "y: float"}, {},
{
{{"tl1w1_handle"},
"TensorListPushBack",
{"ihandle", "x"},
{{"element_dtype", DT_FLOAT}}},
{{"shape"}, "Const", {}, {{"value", kShape}, {"dtype", DT_INT32}}},
{{"tl1r1_handle", "tl1r1_data"},
"TensorListPopBack",
{"tl1w1_handle", "shape"},
{{"element_dtype", DT_FLOAT}}},
{{"ohandle"}, "Identity", {"tl1r1_handle"}, {{"T", DT_VARIANT}}},
{{"y"}, "Identity", {"tl1r1_data"}, {{"T", DT_FLOAT}}},
});
function_lib.add_function()->Swap(&func1);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_CHECK_OK(s.graph()->AddFunctionLibrary(function_lib));
tensorflow::Input shape = {32, 32};
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
auto tl1w1 =
ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, infer1);
auto _infer1 = tensorflow::ops::AsNodeOut(s, infer1);
auto _tl1w1_handle = tensorflow::ops::AsNodeOut(s, tl1w1.output_handle);
auto builder =
tensorflow::NodeBuilder("Func1", "Func1", s.graph()->op_registry());
tensorflow::Node* func1_op;
TF_CHECK_OK(builder.Input(_tl1w1_handle)
.Input(_infer1)
.Finalize(s.graph(), &func1_op));
Output func1_handle(func1_op, 0);
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"), func1_handle,
shape, DT_FLOAT)
.tensor;
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), {32, 32}, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, infer1);
Output tl2r1 = ops::TensorListPopBack(s.WithOpName("tl2r1"),
tl2w1.output_handle, shape, DT_FLOAT)
.tensor;
Output allow2 = ops::MatMul(s.WithOpName("allow2"), tl1r1, tl2r1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
int GetCudaVersion(const Cluster& cluster) {
auto devices = cluster.GetDevices();
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cuda");
if (it != device_env.end()) {
string cuda_version_str = it->second;
return std::stoi(cuda_version_str);
}
}
}
return 0;
}
bool IsSupportedGPU(const Cluster& cluster) {
#ifdef GOOGLE_CUDA
return GetCudaVersion(cluster) >= 9010;
#else
return true;
#endif
}
TEST_P(AutoMixedPrecisionParamTest, BatchMatMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 33, {64, 32, 32});
Output allow1 = ops::BatchMatMul(s.WithOpName("allow1"), input, input);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
if (IsSupportedGPU(*virtual_cluster_.get())) {
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
} else {
EXPECT_EQ(output.node_size(), item.graph.node_size());
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
}
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 3.0e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, EluOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Elu(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ErfOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erf(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ErfcOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erfc(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, InvOp) {
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Inv(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, LogOp) {
TestSimpleUnaryInferOp(
0.01, 10, 1.0e-3, 2.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, Log1pOp) {
TestSimpleUnaryInferOp(
-0.99, 9, 1.0e-3, 5.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log1p(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, LogSoftmaxOp) {
TestSimpleUnaryInferOp(
-8, 8, -1, 1.0e-2,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::LogSoftmax(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ReciprocalOp) {
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Reciprocal(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SigmoidOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sigmoid(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SoftmaxOp) {
TestSimpleUnaryInferOp(
-8, 8, 2.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softmax(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SoftplusOp) {
TestSimpleUnaryInferOp(
-5, 5, 2.0e-3, 2.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softplus(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SqrtOp) {
TestSimpleUnaryInferOp(
0, 10, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sqrt(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, TanhOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Tanh(scope, input);
});
}
constexpr AutoMixedPrecisionMode kTestValues[] = {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
AutoMixedPrecisionMode::CUDA,
#endif
#if INTEL_MKL
AutoMixedPrecisionMode::FP16_CPU,
#endif
};
INSTANTIATE_TEST_SUITE_P(AutoMixedPrecisionTest, AutoMixedPrecisionParamTest,
::testing::ValuesIn(kTestValues));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
class AutoMixedPrecisionCpuTest : public GrapplerTest {
protected:
void SetUp() override {
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(AutoMixedPrecisionCpuTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::CPU};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
const int expected_cast_ops = 9;
EXPECT_EQ(output.node_size(), item.graph.node_size() + expected_cast_ops);
GraphView output_view(&output);
auto matmul_op = output_view.GetNode("allow1");
EXPECT_EQ(matmul_op->attr().at("T").type(), DT_FLOAT);
for (auto edge : output_view.GetFaninEdges(*matmul_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
auto cast_input_edges = output_view.GetFaninEdges(
*output_view.GetNode(edge.src.node->name()), false);
EXPECT_THAT(cast_input_edges, SizeIs(1));
EXPECT_THAT(edge.src.node->name(),
ContainsRegex("^" + cast_input_edges.begin()->src.node->name() +
"-0-CastToFp32-[0-9]-AutoMixedPrecision$"));
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*matmul_op, false)) {
EXPECT_EQ(edge.dst.node->op(), "Cast");
EXPECT_THAT(edge.dst.node->name(),
ContainsRegex("^" + matmul_op->name() +
"-0-CastToFp16-[0-9]-AutoMixedPrecision$"));
EXPECT_EQ(edge.dst.node->attr().at("SrcT").type(), DT_FLOAT);
EXPECT_EQ(edge.dst.node->attr().at("DstT").type(), DT_HALF);
}
}
TEST_F(AutoMixedPrecisionCpuTest, MixedFanout) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {32, 32});
Output input2 = ops::Const(s.WithOpName("input2"), 2.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input1, input2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), allow1, input2);
Output deny = ops::Exp(s.WithOpName("deny"), allow1);
Output infer = ops::Add(s.WithOpName("infer"), deny, allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), infer);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::CPU};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
const int expected_cast_ops = 10;
EXPECT_EQ(output.node_size(), item.graph.node_size() + expected_cast_ops);
GraphView output_view(&output);
auto allow1_op = output_view.GetNode("allow1");
for (auto edge : output_view.GetFaninEdges(*allow1_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*allow1_op, false)) {
EXPECT_EQ(edge.dst.node->op(), "Cast");
EXPECT_EQ(edge.dst.node->attr().at("SrcT").type(), DT_FLOAT);
EXPECT_EQ(edge.dst.node->attr().at("DstT").type(), DT_HALF);
}
auto deny_op = output_view.GetNode("deny");
for (auto edge : output_view.GetFaninEdges(*deny_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*deny_op, false)) {
EXPECT_NE(edge.dst.node->op(), "Cast");
}
}
class AutoMixedPrecisionSimulateGpuTest : public GrapplerTest {
protected:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_memory_size(1024 * 1024);
devices["/job:localhost/replica:0/task:0/device:CPU:0"] = cpu_device;
virtual_cluster_.reset(new VirtualCluster(devices));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override {
unsetenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU");
TF_CHECK_OK(virtual_cluster_->Shutdown());
}
std::unique_ptr<Cluster> virtual_cluster_;
void TestSimple(tensorflow::Scope s, bool is_optimized) {
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
AutoMixedPrecision optimizer;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
DataType expected_data_type = is_optimized ? DT_HALF : DT_FLOAT;
int expected_graph_size =
is_optimized ? item.graph.node_size() + 2 : item.graph.node_size();
EXPECT_EQ(output.node_size(), expected_graph_size);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
};
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_NoGpu) {
TestSimple(tensorflow::Scope::NewRootScope(), false);
}
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_SimulatedGpu) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "true",
1 );
TestSimple(tensorflow::Scope::NewRootScope(), true);
}
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_SimulatedGpu_CpuScope) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "true",
1 );
TestSimple(tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
false);
}
#endif
#if INTEL_MKL
class AutoMixedPrecisionMklTest : public GrapplerTest {
protected:
void SetUp() override {
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(AutoMixedPrecisionMklTest, AlreadyBf16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_BFLOAT16);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output cst2 = ops::Cast(s.WithOpName("cst2"), clr1, DT_FLOAT);
Output clr2 = ops::Relu(s.WithOpName("clr2"), cst2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("SrcT").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("DstT").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_F(AutoMixedPrecisionMklTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output deny2 = ops::Log(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
Output deny3 = ops::SparseMatMul(s.WithOpName("deny3"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny3);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_F(AutoMixedPrecisionMklTest, TensorListSetGet) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
tensorflow::Input shape = {32, 32};
auto tl1 = ops::TensorListReserve(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output idx1 = ops::Const(s.WithOpName("idx1"), 1);
Output idx2 = ops::Const(s.WithOpName("idx2"), 2);
Output idx3 = ops::Const(s.WithOpName("idx3"), 3);
auto tl1w1 =
ops::TensorListSetItem(s.WithOpName("tl1w1"), tl1.handle, idx1, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 =
ops::TensorListSetItem(s.WithOpName("tl1w2"), tl1.handle, idx2, allow1);
Output tl1rs =
ops::TensorListResize(s.WithOpName("tl1rs"), tl1w2.output_handle, 6);
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output infer1 = ops::Mul(s.WithOpName("infer1"), tl1r1, tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
ops::TensorListGetItem(s.WithOpName("tl1r2"), tl1w3.output_handle, idx3,
shape, DT_FLOAT)
.item;
auto tl2 = ops::TensorListReserve(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListSetItem(s.WithOpName("tl2w1"), tl2.handle, idx1, input);
Output tl2r1 =
ops::TensorListGetItem(s.WithOpName("tl2r1"), tl2w1.output_handle, idx1,
shape, DT_FLOAT)
.item;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_F(AutoMixedPrecisionMklTest, InferFollowUpStreamAllow) {
if (!IsMKLEnabled())
GTEST_SKIP() << "Test only applicable to MKL auto-mixed precision.";
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {8, 56, 56, 16});
Output weight = ops::Const(s.WithOpName("weight"), 2.f, {3, 3, 16, 16});
Output allow =
ops::Conv2D(s.WithOpName("allow"), input1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output input2 = ops::Const(s.WithOpName("input2"), 1.f / 32, {16});
Output infer = ops::BiasAdd(s.WithOpName("infer"), allow, input2);
Output clr = ops::Relu(s.WithOpName("clr"), infer);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
EXPECT_EQ(output_view.GetNode("input1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("weight")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("infer")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr")->attr().at("T").type(), DT_BFLOAT16);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_F(AutoMixedPrecisionMklTest, InferFollowUpStreamDeny) {
if (!IsMKLEnabled())
GTEST_SKIP() << "Test only applicable to MKL auto-mixed precision.";
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {8, 56, 56, 16});
Output input2 = ops::Const(s.WithOpName("input2"), 1.f, {16});
Output input3 = ops::Const(s.WithOpName("input3"), 1.f / 32, {16});
Output deny = ops::Pow(s.WithOpName("deny"), input1, input2);
Output infer = ops::BiasAdd(s.WithOpName("infer"), deny, input3);
Output clr = ops::Relu(s.WithOpName("clr"), infer);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size());
EXPECT_EQ(output_view.GetNode("input1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input3")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i]);
}
}
#endif
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_mixed_precision.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_mixed_precision_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecb8d878-03fe-44f2-9ce6-ad2285013004 | cpp | tensorflow/tensorflow | graph_optimizer_stage | tensorflow/core/grappler/optimizers/graph_optimizer_stage.cc | tensorflow/core/grappler/optimizers/graph_optimizer_stage_test.cc | #include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/core/graph/tensor_id.h"
namespace tensorflow {
namespace grappler {
const NodeScopeAndName ParseNodeScopeAndName(const string& node_name) {
auto pos = node_name.find_last_of('/');
if (pos == string::npos) {
return {"", node_name};
} else {
return {node_name.substr(0, pos), node_name.substr(pos + 1)};
}
};
Status GetInputNode(const GraphOptimizerContext& ctx, const string& input,
NodeDef** node) {
string node_name = NodeName(input);
NodeDef* node_by_name = ctx.node_map->GetNode(node_name);
if (node_by_name == nullptr) {
return errors::FailedPrecondition("Node ", node_name,
" doesn't exists in a node map");
}
*node = node_by_name;
return absl::OkStatus();
}
Status GetTensorProperties(const GraphOptimizerContext& ctx,
const string& tensor,
const OpInfo::TensorProperties** properties) {
if (ctx.graph_properties == nullptr) {
return errors::InvalidArgument("Graph properties are unknown.");
}
SafeTensorId tensor_id = ParseTensorName(tensor);
if (tensor_id.index() < 0) {
return errors::InvalidArgument(
"Can't get tensor properties of control dependency ", tensor);
}
const auto& output_properties =
ctx.graph_properties->GetOutputProperties(tensor_id.node());
int num_outputs = output_properties.size();
if (num_outputs == 0 || tensor_id.index() > num_outputs - 1) {
return errors::InvalidArgument(
"Node ", tensor_id.node(),
" is missing output properties at position :", tensor_id.index(),
" (num_outputs=", num_outputs, ")");
}
*properties = &output_properties[tensor_id.index()];
return absl::OkStatus();
}
NodeDef* AddCopyNode(const GraphOptimizerContext& ctx, const string& name,
const NodeDef* node_to_copy) {
CHECK(node_to_copy != nullptr);
CHECK(!ctx.node_map->NodeExists(name))
<< "Node " << name << " already exists in a graph";
NodeDef* new_node = ctx.optimized_graph->add_node();
*new_node = *node_to_copy;
new_node->set_name(name);
ctx.node_map->AddNode(name, new_node);
return new_node;
}
NodeDef* AddEmptyNode(const GraphOptimizerContext& ctx, const string& name) {
std::string new_name = name;
for (int count = 0; ctx.node_map->NodeExists(new_name); ++count) {
LOG(WARNING) << name << " already exists in the graph.";
new_name = absl::StrCat(name, "_", count);
}
NodeDef* new_node = ctx.optimized_graph->add_node();
new_node->set_name(new_name);
ctx.node_map->AddNode(new_name, new_node);
return new_node;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& node,
const string& sub_scope,
const string& prefix) {
CHECK(!sub_scope.empty() || !prefix.empty())
<< "Either optimized node name prefix or sub-scope must be non-empty";
string optimized_node_name;
if (!node.scope.empty()) {
strings::StrAppend(&optimized_node_name, node.scope, "/");
}
if (!sub_scope.empty()) {
strings::StrAppend(&optimized_node_name, sub_scope, "/");
}
if (!prefix.empty()) {
strings::StrAppend(&optimized_node_name, prefix, "_");
}
strings::StrAppend(&optimized_node_name, node.name);
return optimized_node_name;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& root,
const std::vector<string> node_names,
const string& sub_scope,
const string& prefix) {
string optimized_node_name = MakeOptimizedNodeName(root, sub_scope, prefix);
for (const string& node_name : node_names) {
auto name_and_scope = ParseNodeScopeAndName(node_name);
strings::StrAppend(&optimized_node_name, "_", name_and_scope.name);
}
return optimized_node_name;
}
}
} | #include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
class GraphOptimizerStageTest : public ::testing::Test {};
struct FakeResult {};
class FakeOptimizerStage : public GraphOptimizerStage<FakeResult> {
public:
explicit FakeOptimizerStage(const string& optimizer_name,
const string& stage_name,
const GraphOptimizerContext& ctx)
: GraphOptimizerStage(optimizer_name, stage_name, ctx) {}
~FakeOptimizerStage() override = default;
bool IsSupported(const NodeDef* node) const override { return true; }
Status TrySimplify(NodeDef* node, FakeResult* result) override {
return absl::OkStatus();
}
};
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInRoot) {
const auto scope_and_name = ParseNodeScopeAndName("Add");
EXPECT_EQ(scope_and_name.scope, "");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInScope) {
const auto scope_and_name = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(scope_and_name.scope, "a/b/c");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, OptimizedNodeName) {
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(stage.OptimizedNodeName(node), "a/b/c/my_opt/my_stg_Add");
EXPECT_EQ(stage.OptimizedNodeName(node, std::vector<string>({"Mul", "Sqrt"})),
"a/b/c/my_opt/my_stg_Add_Mul_Sqrt");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.OptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_Add");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeName) {
GraphDef graph =
GDef({NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique0");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique1");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeNameWithUsedNodeNames) {
GraphDef graph = GDef(
{NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A_unique0", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A_unique1", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique1");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique2");
}
TEST_F(GraphOptimizerStageTest, GetInputNodeAndProperties) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node->input(0), "a");
EXPECT_EQ(add_node->input(1), "b");
const OpInfo::TensorProperties* add_properties;
TF_CHECK_OK(stage.GetTensorProperties("Add", &add_properties));
EXPECT_EQ(add_properties->dtype(), DT_FLOAT);
const OpInfo::TensorProperties* a_properties;
TF_CHECK_OK(stage.GetTensorProperties("a:0", &a_properties));
EXPECT_EQ(a_properties->dtype(), DT_FLOAT_REF);
const OpInfo::TensorProperties* b_properties;
TF_CHECK_OK(stage.GetTensorProperties("b:0", &b_properties));
EXPECT_EQ(b_properties->dtype(), DT_FLOAT_REF);
}
TEST_F(GraphOptimizerStageTest, AddNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
NodeDef* add_node_copy = stage.AddCopyNode("Add_1", add_node);
EXPECT_EQ(add_node_copy->name(), "Add_1");
EXPECT_EQ(add_node_copy->op(), "Add");
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node_copy->input(0), "a");
EXPECT_EQ(add_node_copy->input(1), "b");
NodeDef* add_node_copy_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_1", &add_node_copy_by_name));
EXPECT_EQ(add_node_copy, add_node_copy_by_name);
NodeDef* empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(empty_node->name(), "Add_2");
EXPECT_EQ(empty_node->input_size(), 0);
NodeDef* empty_node_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_2", &empty_node_by_name));
EXPECT_EQ(empty_node, empty_node_by_name);
NodeDef* unique_empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(unique_empty_node->name(), "Add_2_0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/graph_optimizer_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/graph_optimizer_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cc773d3-be6d-4d81-99e4-e20270add2d0 | cpp | tensorflow/tensorflow | common_subgraph_elimination | tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc | tensorflow/core/grappler/optimizers/common_subgraph_elimination_test.cc | #include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
class Cluster;
}
}
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
class UniqueNodes {
public:
NodeDef* FindOrAddRepresentative(NodeDef* node) {
uint64 sig = ComputeSignature(*node);
std::vector<NodeDef*>& candidates = rep_[sig];
for (auto& candidate : candidates) {
if ((candidate == node) || SameNode(*candidate, *node)) {
return candidate;
}
}
candidates.push_back(node);
return node;
}
void RemoveRepresentative(NodeDef* node) {
auto it = memoized_signatures_.find(node);
if (it == memoized_signatures_.end()) return;
std::vector<NodeDef*>& candidates = rep_[it->second];
for (int i = 0, end = candidates.size(); i < end; ++i) {
if (candidates[i] == node) {
std::swap(candidates[i], candidates[candidates.size() - 1]);
candidates.resize(candidates.size() - 1);
break;
}
}
memoized_signatures_.erase(node);
}
private:
uint64 ComputeSignature(const NodeDef& node);
bool SameNode(const NodeDef& node1, const NodeDef& node2) const;
absl::flat_hash_map<uint64, std::vector<NodeDef*>> rep_;
absl::flat_hash_map<const NodeDef*, uint64> memoized_signatures_;
};
uint64 UniqueNodes::ComputeSignature(const NodeDef& node) {
auto it = memoized_signatures_.find(&node);
if (it != memoized_signatures_.end()) return it->second;
uint64 h = Hash64(node.op());
h = Hash64Combine(Hash64(node.device()), h);
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
uint64 input_hash = Hash64Combine(
Hash64(input_tensor.node().data(), input_tensor.node().size()),
std::hash<int>()(input_tensor.index()));
h = Hash64CombineUnordered(input_hash, h);
}
for (const auto& attr : node.attr()) {
uint64 attr_hash =
Hash64Combine(Hash64(attr.first), FastAttrValueHash(attr.second));
h = Hash64CombineUnordered(attr_hash, h);
}
memoized_signatures_.emplace(&node, h);
return h;
}
bool UniqueNodes::SameNode(const NodeDef& node1, const NodeDef& node2) const {
if (node1.op() != node2.op()) {
return false;
}
if (node1.device() != node2.device()) {
return false;
}
if (node1.input_size() != node2.input_size()) {
return false;
}
if (node1.attr_size() != node2.attr_size()) {
return false;
}
auto it1 = node1.input().begin();
auto it2 = node2.input().begin();
for (; it1 != node1.input().end(); ++it1, ++it2) {
if (*it1 != *it2) return false;
}
for (const auto& attr1 : node1.attr()) {
auto it = node2.attr().find(attr1.first);
if (it == node2.attr().end()) return false;
if (!AreAttrValuesEqual(attr1.second, it->second,
true)) {
return false;
}
}
return true;
}
bool CommonSubgraphElimination::CanDedup(const NodeDef& node) const {
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (IsEnter(node) || IsExit(node)) {
return false;
}
if (node.device().find("SPU") != string::npos) {
return false;
}
if (IsAssert(node) || IsPrint(node)) {
return true;
}
return IsFreeOfSideEffect(node);
}
Status CommonSubgraphElimination::DedupComputations(GraphDef* optimized_graph) {
CanonicalizeGraph(optimized_graph);
GraphTopologyView graph_view;
if (!graph_view.InitializeFromGraph(*optimized_graph).ok()) {
LOG(WARNING) << "Failed to initialize GraphTopologyView.";
return absl::OkStatus();
}
absl::flat_hash_set<const NodeDef*> feeds_inplace_op;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& root = optimized_graph->node(i);
if (feeds_inplace_op.find(&root) != feeds_inplace_op.end()) continue;
if (ModifiesInputsInPlace(root)) {
const auto is_continue_traversal = [&](const NodeDef* node) -> bool {
return node->op() == root.op() || !NeverForwardsInputs(*node);
};
DfsTraversal(graph_view, {&root}, TraversalDirection::kFollowInputs,
DfsPredicates::Advance(is_continue_traversal),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
feeds_inplace_op.insert(node);
}));
}
}
std::vector<bool> can_dedup(optimized_graph->node_size());
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& node = optimized_graph->node(i);
can_dedup[i] = (feeds_inplace_op.find(&node) == feeds_inplace_op.end()) &&
CanDedup(node);
}
bool stop = true;
std::set<int> duplicates;
UniqueNodes nodes;
NodeMap node_map(optimized_graph);
do {
stop = true;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
if (!can_dedup[i] || duplicates.find(i) != duplicates.end()) {
continue;
}
NodeDef* node = optimized_graph->mutable_node(i);
NodeDef* rep = nodes.FindOrAddRepresentative(node);
if (rep == node) {
continue;
}
const auto fanouts = node_map.GetOutputs(node->name());
for (NodeDef* fanout : fanouts) {
bool updated_fanout = false;
for (int i = 0; i < fanout->input_size(); ++i) {
string* fanout_input = fanout->mutable_input(i);
const int position =
NodePositionIfSameNode(*fanout_input, node->name());
if (position < -1) {
continue;
} else {
if (!updated_fanout) {
nodes.RemoveRepresentative(fanout);
}
updated_fanout = true;
if (position > 0) {
*fanout_input = StrCat(rep->name(), ":", position);
} else if (position == 0) {
*fanout_input = rep->name();
} else {
*fanout_input = StrCat("^", rep->name());
}
}
}
if (updated_fanout) {
node_map.UpdateInput(fanout->name(), node->name(), rep->name());
CanonicalizeNode(fanout);
}
}
if (fetch_nodes_known_) {
node->Clear();
}
duplicates.insert(i);
stop = false;
}
} while (!stop);
if (fetch_nodes_known_ && !duplicates.empty()) {
EraseNodesFromGraph(duplicates, optimized_graph);
}
return absl::OkStatus();
}
Status CommonSubgraphElimination::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
*optimized_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
return DedupComputations(optimized_graph);
}
}
} | #include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer_test_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
}
class CommonSubgraphEliminationTest : public ArithmeticOptimizerTest {};
TEST_F(CommonSubgraphEliminationTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
CommonSubgraphElimination optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {3.14, 2.7}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.14, 2.7}, {1, 2});
Output div = ops::Div(s.WithOpName("div"), c1, c2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 2);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 2);
EXPECT_EQ(new_div->input(0), "c1");
EXPECT_EQ(new_div->input(1), "c1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDeduppingAssertAndCheckNumerics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_BOOL, ops::Placeholder::Shape({}));
Output c = ops::Const(s.WithOpName("c"), {3.14, 2.7}, {1, 2});
auto check1 = ops::CheckNumerics(s.WithOpName("check1"), c, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("check2"), c, "foo");
auto assert1 = ops::Assert(s.WithOpName("assert1"), p, {c});
auto assert2 = ops::Assert(s.WithOpName("assert2"), p, {c});
Output div = ops::Div(s.WithOpName("div").WithControlDependencies(
{assert1.operation, assert2.operation}),
check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
Tensor bool_t(DT_BOOL, TensorShape({}));
bool_t.scalar<bool>().setConstant(true);
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}});
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 6);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 3);
EXPECT_EQ(new_div->input(0), "check1");
EXPECT_EQ(new_div->input(1), "check2");
EXPECT_EQ(new_div->input(2), "^assert1");
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}});
EXPECT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupCommutative) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {1.0f, 2.0f}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.0f, 4.0f}, {1, 2});
Output mul1 = ops::Mul(s.WithOpName("mul1"), c1, c2);
Output mul2 = ops::Mul(s.WithOpName("mul2"), c2, c1);
Output div1 = ops::Div(s.WithOpName("div1"), mul1, mul2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div1"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 4);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_c2 = node_map.GetNode("c2");
ASSERT_NE(new_c2, nullptr);
const NodeDef* new_mul1 = node_map.GetNode("mul1");
ASSERT_NE(new_mul1, nullptr);
ASSERT_EQ(new_mul1->input_size(), 2);
EXPECT_EQ(new_mul1->input(0), "c1");
EXPECT_EQ(new_mul1->input(1), "c2");
const NodeDef* new_div1 = node_map.GetNode("div1");
ASSERT_NE(new_div1, nullptr);
ASSERT_EQ(new_div1->input_size(), 2);
EXPECT_EQ(new_div1->input(0), "mul1");
EXPECT_EQ(new_div1->input(1), "mul1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/common_subgraph_elimination_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e7bdc26-f49d-4dd4-b20e-93050f04e4da | cpp | tensorflow/tensorflow | static_schedule | tensorflow/core/grappler/optimizers/static_schedule.cc | tensorflow/core/grappler/optimizers/static_schedule_test.cc | #include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include <deque>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace grappler {
static Costs::NanoSeconds PredictExecutionTime(
const GraphProperties& properties, const OpLevelCostEstimator& estimator,
const VirtualPlacer& placer, const NodeDef& node) {
OpContext op_context;
op_context.op_info.set_op(node.op());
*op_context.op_info.mutable_attr() = node.attr();
std::vector<OpInfo::TensorProperties> inputs =
properties.GetInputProperties(node.name());
for (auto& input : inputs) {
op_context.op_info.add_inputs()->Swap(&input);
}
std::vector<OpInfo::TensorProperties> outputs =
properties.GetOutputProperties(node.name());
for (auto& output : outputs) {
op_context.op_info.add_outputs()->Swap(&output);
}
DeviceProperties device = placer.get_device(node);
op_context.op_info.mutable_device()->Swap(&device);
Costs::NanoSeconds estimate =
estimator.PredictCosts(op_context).execution_time;
return std::max(estimate, Costs::NanoSeconds(1));
}
Status EstimateEarliestExecutionTimes(
const GrapplerItem& item, const Cluster* cluster,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* completion_times) {
std::unordered_map<string, const NodeDef*> name_map;
std::unordered_map<const NodeDef*, int> pending_inputs;
std::deque<const NodeDef*> ready_nodes;
for (const NodeDef& node : item.graph.node()) {
name_map[node.name()] = &node;
if (node.input_size() == 0) {
ready_nodes.push_back(&node);
(*completion_times)[&node] = 0;
} else if (IsMerge(node)) {
pending_inputs[&node] = 1;
} else {
pending_inputs[&node] = node.input_size();
}
}
std::unordered_map<const NodeDef*, std::vector<const NodeDef*>> fanouts;
for (const NodeDef& node : item.graph.node()) {
for (const string& input : node.input()) {
string node_name = NodeName(input);
auto it = name_map.find(node_name);
if (it == name_map.end()) {
return errors::InvalidArgument(
strings::StrCat("Unknown input node ", input));
}
const NodeDef* fanin = it->second;
fanouts[fanin].push_back(&node);
}
}
name_map.clear();
GraphProperties properties(item);
TF_RETURN_IF_ERROR(
properties.InferStatically(true,
false,
false));
OpLevelCostEstimator estimator;
VirtualPlacer placer(cluster->GetDevices());
while (!ready_nodes.empty()) {
const NodeDef* node = ready_nodes.front();
ready_nodes.pop_front();
Costs::NanoSeconds execution_time =
PredictExecutionTime(properties, estimator, placer, *node);
Costs::NanoSeconds completion_time =
execution_time + (*completion_times)[node];
(*completion_times)[node] = completion_time;
for (const NodeDef* fanout : fanouts[node]) {
int pending = pending_inputs[fanout];
if (pending == 0) {
continue;
} else if (pending == 1) {
ready_nodes.push_back(fanout);
}
pending_inputs[fanout]--;
Costs::NanoSeconds ready_time =
std::max(completion_time, (*completion_times)[fanout]);
(*completion_times)[fanout] = ready_time;
}
}
return absl::OkStatus();
}
Status EstimateRequiredTimes(
const GrapplerItem& item, const Cluster* cluster,
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>&
execution_times,
std::unordered_map<const NodeDef*, Costs::NanoSeconds>* required_times) {
std::unordered_map<string, const NodeDef*> name_map;
for (const NodeDef& node : item.graph.node()) {
name_map[node.name()] = &node;
(*required_times)[&node] = Costs::NanoSeconds::max();
}
std::unordered_map<const NodeDef*, int> pending_fanouts;
for (const NodeDef& node : item.graph.node()) {
for (const string& input : node.input()) {
string node_name = NodeName(input);
auto it = name_map.find(node_name);
if (it == name_map.end()) {
return errors::InvalidArgument(
strings::StrCat("Unknown input node ", input));
}
const NodeDef* fanin = it->second;
pending_fanouts[fanin] += 1;
}
}
std::deque<const NodeDef*> ready_nodes;
for (const NodeDef& node : item.graph.node()) {
if (pending_fanouts[&node] == 0) {
auto it = execution_times.find(&node);
if (it != execution_times.end()) {
(*required_times)[&node] = it->second;
}
ready_nodes.push_back(&node);
}
}
GraphProperties properties(item);
TF_RETURN_IF_ERROR(
properties.InferStatically(true,
false,
false));
OpLevelCostEstimator estimator;
VirtualPlacer placer(cluster->GetDevices());
while (!ready_nodes.empty()) {
const NodeDef* node = ready_nodes.front();
ready_nodes.pop_front();
Costs::NanoSeconds execution_time =
PredictExecutionTime(properties, estimator, placer, *node);
Costs::NanoSeconds required_time = (*required_times)[node] - execution_time;
for (const string& fanin_name : node->input()) {
const NodeDef* fanin = name_map[NodeName(fanin_name)];
(*required_times)[fanin] =
std::min((*required_times)[fanin], required_time);
int pending = pending_fanouts[fanin];
if (pending == 0) {
continue;
} else if (pending == 1) {
ready_nodes.push_back(fanin);
}
pending_fanouts[fanin]--;
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/static_schedule.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class StaticScheduleTest : public ::testing::Test {
public:
std::unique_ptr<VirtualCluster> CreateVirtualCluster() const {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
std::unordered_map<string, DeviceProperties> devices;
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
return std::unique_ptr<VirtualCluster>(new VirtualCluster(devices));
}
};
std::vector<Costs::NanoSeconds> GetOrderedTimes(
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>
completion_times) {
std::map<Costs::NanoSeconds, std::string> ordered_completion_times;
for (const auto& node_def_time : completion_times) {
ordered_completion_times[node_def_time.second] =
node_def_time.first->name();
}
std::vector<Costs::NanoSeconds> ordered_times;
for (const auto& time_node_name : ordered_completion_times) {
ordered_times.push_back(time_node_name.first);
}
return ordered_times;
}
std::vector<std::string> GetOrderedNodeNames(
const std::unordered_map<const NodeDef*, Costs::NanoSeconds>
completion_times) {
std::map<Costs::NanoSeconds, std::string> ordered_completion_times;
for (const auto& node_def_time : completion_times) {
ordered_completion_times[node_def_time.second] =
node_def_time.first->name();
}
std::vector<std::string> ordered_node_names;
for (const auto& time_node_name : ordered_completion_times) {
ordered_node_names.push_back(time_node_name.second);
}
return ordered_node_names;
}
TEST_F(StaticScheduleTest, BasicGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> completion_times;
Status status =
EstimateEarliestExecutionTimes(item, cluster.get(), &completion_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), completion_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(completion_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[0], Costs::NanoSeconds(1));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(completion_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"Const/Const", "x", "Sign", "Sign_1",
"Sign_2", "Sign_3", "y"}));
}
TEST_F(StaticScheduleTest, BasicGraphWithCtrlDependencies) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::AddN(s.WithOpName("b"), {a});
Output c = ops::Identity(s.WithOpName("c"), b);
Output d = ops::Identity(s.WithOpName("d"), c);
Output e = ops::AddN(s.WithOpName("e"), {d});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_EQ("c", item.graph.node(2).name());
EXPECT_EQ("e", item.graph.node(4).name());
*item.graph.mutable_node(4)->add_input() = "^c";
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> completion_times;
Status status =
EstimateEarliestExecutionTimes(item, cluster.get(), &completion_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), completion_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(completion_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[0], Costs::NanoSeconds(1));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(completion_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"a", "b", "c", "d", "e"}));
}
TEST_F(StaticScheduleTest, RequiredTimes) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
std::unique_ptr<VirtualCluster> cluster(CreateVirtualCluster());
std::unordered_map<const NodeDef*, Costs::NanoSeconds> execution_times;
for (const NodeDef& node : item.graph.node()) {
execution_times[&node] = 0;
}
std::unordered_map<const NodeDef*, Costs::NanoSeconds> required_times;
Status status = EstimateRequiredTimes(item, cluster.get(), execution_times,
&required_times);
TF_EXPECT_OK(status);
EXPECT_EQ(item.graph.node_size(), required_times.size());
std::vector<Costs::NanoSeconds> ordered_times =
GetOrderedTimes(required_times);
for (int i = 0; i < ordered_times.size(); ++i) {
if (i > 0) {
EXPECT_GT(ordered_times[i], ordered_times[i - 1]);
}
}
EXPECT_EQ(ordered_times[ordered_times.size() - 1], Costs::NanoSeconds(0));
std::vector<std::string> ordered_node_names =
GetOrderedNodeNames(required_times);
EXPECT_EQ(ordered_node_names,
(std::vector<std::string>{"Const/Const", "x", "Sign", "Sign_1",
"Sign_2", "Sign_3", "y"}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/static_schedule.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/static_schedule_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36acac8a-49c4-4e97-8cea-2d5a67db0444 | cpp | tensorflow/tensorflow | loop_optimizer | tensorflow/core/grappler/optimizers/loop_optimizer.cc | tensorflow/core/grappler/optimizers/loop_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
namespace {
using TensorVector = absl::InlinedVector<TensorValue, 4UL>;
class LoopInvariantNodeMotionOptimizer {
public:
explicit LoopInvariantNodeMotionOptimizer(GraphDef* optimized_graph)
: optimized_graph_(optimized_graph) {}
virtual ~LoopInvariantNodeMotionOptimizer() = default;
Status Optimize();
private:
Status FindInvariantNodes(NodeDef* node);
Status RevertInvariantNodes();
Status MoveInvariantNodes(const int frame_id);
Status HandleInvariantNode(NodeDef* node, const int num_outputs,
const int frame_id);
Status HandleConst(NodeDef* node, const int num_outputs, const int frame_id);
Status HandleInvariantEnter(NodeDef* node, const int num_outputs);
GraphDef* optimized_graph_;
std::unique_ptr<NodeMap> node_map_;
std::map<NodeDef*, int> invariant_nodes_;
std::set<int> empty_set_;
std::vector<std::set<int>> frame_children_;
std::vector<int> frame_parent_;
std::map<int, const NodeDef*> loop_cond_;
std::map<int, std::vector<NodeDef*>> invariant_enters_;
int new_enter_id_;
};
Status LoopInvariantNodeMotionOptimizer::HandleInvariantEnter(
NodeDef* node, const int num_outputs) {
auto consumers = node_map_->GetOutputs(node->name());
std::vector<string> enter_control_inputs;
string enter_input;
for (auto& input : node->input()) {
if (IsControlInput(input)) {
enter_control_inputs.push_back(input);
} else {
enter_input = input;
}
}
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
if (NodeName(consumer->input(i)) == node->name()) {
consumer->set_input(i, enter_input);
node_map_->AddOutput(NodeName(enter_input), consumer->name());
node_map_->RemoveOutput(node->name(), consumer->name());
}
}
for (auto& control_input : enter_control_inputs) {
consumer->add_input(control_input);
node_map_->AddOutput(NodeName(control_input), consumer->name());
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::HandleConst(NodeDef* node,
const int num_outputs,
const int frame_id) {
NodeDef* const_node = nullptr;
if (num_outputs == 0) {
const_node = node;
node_map_->RemoveInputs(node->name());
node->clear_input();
} else {
const string const_node_name =
AddPrefixToNodeName(node->name(), kLoopOptimizer);
const_node = node_map_->GetNode(const_node_name);
if (const_node == nullptr) {
const_node = optimized_graph_->add_node();
const_node->set_name(const_node_name);
const_node->set_op("Const");
const_node->set_device(node->device());
*const_node->mutable_attr() = node->attr();
node_map_->AddNode(const_node->name(), const_node);
}
auto consumers = node_map_->GetOutputs(node->name());
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
if (NodeName(consumer->input(i)) == node->name()) {
if (IsControlInput(consumer->input(i))) {
*consumer->mutable_input(i) = AsControlDependency(*const_node);
} else {
*consumer->mutable_input(i) = const_node->name();
}
node_map_->AddOutput(const_node->name(), consumer->name());
node_map_->RemoveOutput(node->name(), consumer->name());
}
}
}
}
}
if (frame_parent_[frame_id] != -1) {
int parent_id = frame_parent_[frame_id];
auto loop_cond_it = loop_cond_.find(parent_id);
if (loop_cond_it == loop_cond_.end()) {
return errors::InvalidArgument("Frame ", frame_id,
" doesn't have a LoopCond node");
}
auto& loop_cond_name = loop_cond_it->second->name();
NodeDef* switch_node = nullptr;
for (auto* node : node_map_->GetOutputs(loop_cond_name)) {
if (node->op() == "Switch") {
switch_node = node;
break;
}
}
if (!switch_node) {
return errors::InvalidArgument("LoopCond node of Frame ", frame_id,
" doesn't connect to any Switch node");
}
string switch_output = StrCat(switch_node->name(), ":1");
const string ctrl_dep = ConstantFolding::AddControlDependency(
switch_output, optimized_graph_, node_map_.get());
const_node->add_input(ctrl_dep);
node_map_->AddOutput(NodeName(ctrl_dep), const_node->name());
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::HandleInvariantNode(
NodeDef* node, const int num_outputs, const int frame_id) {
for (int i = 0; i < node->input_size(); ++i) {
if (IsControlInput(node->input(i))) {
node->mutable_input()->SwapElements(i, node->input_size() - 1);
node->mutable_input()->RemoveLast();
}
}
if (num_outputs == 0) {
return absl::OkStatus();
}
DataTypeVector input_types;
DataTypeVector output_types;
OpRegistryInterface* op_registry = OpRegistry::Global();
const OpRegistrationData* op_reg_data = nullptr;
TF_RETURN_IF_ERROR(op_registry->LookUp(node->op(), &op_reg_data));
TF_RETURN_IF_ERROR(InOutTypesForNode(*node, op_reg_data->op_def, &input_types,
&output_types));
auto consumers = node_map_->GetOutputs(node->name());
string fname = invariant_enters_[frame_id][0]->attr().at("frame_name").s();
int piterations =
invariant_enters_[frame_id][0]->attr().at("parallel_iterations").i();
for (auto* consumer : consumers) {
if (!invariant_nodes_.count(consumer)) {
for (int i = 0; i < consumer->input_size(); ++i) {
int port;
string node_name = ParseNodeName(consumer->input(i), &port);
if (node_name != node->name()) {
continue;
}
if (port < 0) {
return errors::InvalidArgument(
"Invariant node should not have control outputs "
"to variant node");
}
DataType output_type = output_types[port];
NodeDef* new_enter = optimized_graph_->add_node();
new_enter->set_op("Enter");
new_enter->set_device(node->device());
new_enter->set_name(AddPrefixToNodeName(
StrCat(fname, "_enter_", new_enter_id_++), kLoopOptimizer));
AttrValue data_type;
data_type.set_type(output_type);
new_enter->mutable_attr()->insert({"T", data_type});
AttrValue frame_name;
frame_name.set_s(fname);
new_enter->mutable_attr()->insert({"frame_name", frame_name});
AttrValue is_const;
is_const.set_b(true);
new_enter->mutable_attr()->insert({"is_constant", is_const});
AttrValue parallel_iterations;
parallel_iterations.set_i(piterations);
new_enter->mutable_attr()->insert(
{"parallel_iterations", parallel_iterations});
new_enter->add_input(consumer->input(i));
*consumer->mutable_input(i) = new_enter->name();
node_map_->AddNode(new_enter->name(), new_enter);
node_map_->AddOutput(node->name(), new_enter->name());
node_map_->AddOutput(new_enter->name(), consumer->name());
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::MoveInvariantNodes(
const int frame_id) {
for (auto iter = invariant_nodes_.begin(); iter != invariant_nodes_.end();
++iter) {
auto* invariant_node = iter->first;
const int num_outputs = iter->second;
if (IsEnter(*invariant_node)) {
TF_RETURN_IF_ERROR(HandleInvariantEnter(invariant_node, num_outputs));
} else if (IsConstant(*invariant_node)) {
TF_RETURN_IF_ERROR(HandleConst(invariant_node, num_outputs, frame_id));
} else {
TF_RETURN_IF_ERROR(
HandleInvariantNode(invariant_node, num_outputs, frame_id));
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::RevertInvariantNodes() {
std::deque<const NodeDef*> reverted_nodes;
for (auto iter = invariant_nodes_.begin(); iter != invariant_nodes_.end();) {
bool erased = false;
const auto* node = iter->first;
if (!IsConstant(*node) && !IsEnter(*node) && iter->second > 0) {
auto& consumers = node_map_->GetOutputs(node->name());
for (auto* consumer : consumers) {
if (!invariant_nodes_.count(consumer)) {
for (const auto& input : consumer->input()) {
if (IsControlInput(input) && NodeName(input) == node->name()) {
reverted_nodes.push_back(node);
invariant_nodes_.erase(iter++);
erased = true;
break;
}
}
if (erased) break;
}
}
}
if (!erased) ++iter;
}
while (!reverted_nodes.empty()) {
const auto* node = reverted_nodes.front();
reverted_nodes.pop_front();
std::set<NodeDef*> producers;
for (const auto& input : node->input()) {
auto* producer = node_map_->GetNode(input);
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
if (IsControlInput(input) && !IsConstant(*producer) &&
!IsEnter(*producer)) {
reverted_nodes.push_back(producer);
invariant_nodes_.erase(iter);
} else {
producers.insert(producer);
}
}
}
for (auto* producer : producers) {
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
++iter->second;
}
}
for (auto* consumer : node_map_->GetOutputs(node->name())) {
auto iter = invariant_nodes_.find(consumer);
if (iter != invariant_nodes_.end()) {
reverted_nodes.push_back(consumer);
invariant_nodes_.erase(iter);
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::FindInvariantNodes(
NodeDef* start_node) {
std::vector<NodeDef*> stack;
stack.reserve(32);
stack.push_back(start_node);
while (!stack.empty()) {
NodeDef* node = stack.back();
stack.pop_back();
auto consumers = node_map_->GetOutputs(node->name());
invariant_nodes_.emplace(node, consumers.size());
for (auto* consumer : consumers) {
if (invariant_nodes_.count(consumer) || ModifiesFrameInfo(*consumer)) {
continue;
}
bool is_invariant = true;
for (const auto& input : consumer->input()) {
if (!IsControlInput(input)) {
const string name = NodeName(input);
auto* producer = node_map_->GetNode(name);
if (!invariant_nodes_.count(producer)) {
if (IsConstant(*producer)) {
invariant_nodes_.insert(
std::make_pair(producer, node_map_->GetOutputs(name).size()));
} else {
is_invariant = false;
break;
}
}
}
}
if (is_invariant) {
std::set<NodeDef*> producers;
for (const auto& input : consumer->input()) {
auto* producer = node_map_->GetNode(input);
producers.insert(producer);
}
for (auto* producer : producers) {
auto iter = invariant_nodes_.find(producer);
if (iter != invariant_nodes_.end()) {
--iter->second;
}
}
stack.push_back(consumer);
}
}
}
return absl::OkStatus();
}
Status LoopInvariantNodeMotionOptimizer::Optimize() {
node_map_.reset(new NodeMap(optimized_graph_));
FrameView frame_view;
TF_RETURN_IF_ERROR(frame_view.InferFromGraph(*optimized_graph_));
frame_parent_.resize(frame_view.num_frames(), -1);
frame_children_.resize(frame_view.num_frames());
std::deque<int> worklist;
for (const NodeDef& node : optimized_graph_->node()) {
const std::vector<int>& frame_ids = frame_view.Frames(node);
if (frame_ids.size() >= 3) {
for (unsigned int i = 1; i < frame_ids.size() - 1; ++i) {
frame_parent_[frame_ids[i]] = frame_ids[i - 1];
frame_children_[frame_ids[i]].insert(frame_ids[i + 1]);
}
}
if (frame_ids.size() >= 2) {
frame_children_[frame_ids[0]].insert(frame_ids[1]);
frame_parent_[frame_ids.back()] = frame_ids[frame_ids.size() - 2];
}
if (!frame_ids.empty()) {
frame_children_[frame_ids.back()] = empty_set_;
if (node.op() == "LoopCond") {
if (loop_cond_.count(frame_ids.back())) {
return errors::InvalidArgument(
"Loop ", frame_ids.back(),
" has more than one LoopCond node: ", node.name(), " and ",
loop_cond_[frame_ids.back()]->name());
}
loop_cond_[frame_ids.back()] = &node;
}
if (IsEnter(node) && node.attr().at("is_constant").b()) {
invariant_enters_[frame_ids.back()].push_back(
const_cast<NodeDef*>(&node));
}
}
}
for (size_t i = 0; i < frame_children_.size(); i++) {
if (frame_children_[i].empty()) {
worklist.push_back(i);
}
}
while (!worklist.empty()) {
int frame_id = worklist.front();
new_enter_id_ = 0;
worklist.pop_front();
if (frame_parent_[frame_id] != -1) {
int parent_id = frame_parent_[frame_id];
frame_children_[parent_id].erase(frame_id);
if (frame_children_[parent_id].empty()) {
worklist.push_back(parent_id);
}
}
if (invariant_enters_[frame_id].empty()) {
continue;
}
invariant_nodes_.clear();
for (auto* enter : invariant_enters_[frame_id]) {
TF_RETURN_IF_ERROR(FindInvariantNodes(enter));
}
TF_RETURN_IF_ERROR(RevertInvariantNodes());
TF_RETURN_IF_ERROR(MoveInvariantNodes(frame_id));
}
return absl::OkStatus();
}
std::vector<int> GetStackPushNodesToConvert(
const GraphTopologyView& graph_view,
const std::unordered_set<string>& nodes_to_preserve, int stack_node_idx) {
VLOG(1) << "Stack node: " << graph_view.graph()->node(stack_node_idx).name();
const std::unordered_set<string> op_types_to_traverse(
{"Stack", "StackV2", "Enter", "RefEnter", "Switch", "RefSwitch",
"_SwitchN", "Identity", "RefIdentity"});
const auto is_op_to_traverse = [&](const NodeDef* node) -> bool {
return op_types_to_traverse.find(node->op()) != op_types_to_traverse.end();
};
std::vector<int> nodes_to_convert;
std::vector<int> fanouts;
DfsTraversal(graph_view, {graph_view.GetNode(stack_node_idx)},
TraversalDirection::kFollowOutputs,
DfsPredicates::Advance(is_op_to_traverse),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
const absl::optional<int> idx = graph_view.GetNodeIndex(*node);
fanouts.push_back(idx.value());
}));
for (int fanout_idx : fanouts) {
const NodeDef& fanout_node = graph_view.graph()->node(fanout_idx);
VLOG(1) << "Fanout " << fanout_idx << " : " << fanout_node.name();
if (IsStackPushOp(fanout_node)) {
if (graph_view.HasNode(fanout_node.input(0))) {
const NodeDef* stack_node = graph_view.GetNode(fanout_node.input(0));
while (stack_node->op() != "Stack" && stack_node->op() != "StackV2" &&
stack_node->input_size() > 0 &&
graph_view.HasNode(stack_node->input(0))) {
stack_node = graph_view.GetNode(stack_node->input(0));
}
if (nodes_to_preserve.find(stack_node->name()) ==
nodes_to_preserve.end()) {
nodes_to_convert.push_back(fanout_idx);
}
} else {
nodes_to_convert.push_back(fanout_idx);
}
} else if (IsStackOp(fanout_node) || IsStackCloseOp(fanout_node) ||
op_types_to_traverse.find(fanout_node.op()) !=
op_types_to_traverse.end()) {
continue;
} else if (!IsStackPopOp(fanout_node) ||
(!graph_view.GetFanout(fanout_idx).empty() ||
nodes_to_preserve.find(fanout_node.name()) !=
nodes_to_preserve.end())) {
nodes_to_convert.clear();
break;
}
}
return nodes_to_convert;
}
Status RemoveStackOps(const std::unordered_set<string>& nodes_to_preserve,
GraphDef* optimized_graph) {
NodeMap node_map(optimized_graph);
GraphTopologyView graph_view;
TF_RETURN_IF_ERROR(graph_view.InitializeFromGraph(*optimized_graph));
for (int node_idx = 0; node_idx < optimized_graph->node_size(); ++node_idx) {
if (IsStackOp(optimized_graph->node(node_idx))) {
for (int push_node_idx : GetStackPushNodesToConvert(
graph_view, nodes_to_preserve, node_idx)) {
NodeDef* push_node = optimized_graph->mutable_node(push_node_idx);
VLOG(1) << "Converting " << push_node_idx << " : "
<< push_node->DebugString();
if (push_node->attr().count("swap_memory") != 0) {
push_node->mutable_attr()->erase("swap_memory");
}
push_node->set_op("Identity");
push_node->mutable_input()->SwapElements(0, 1);
const string ctrl_dep = ConstantFolding::AddControlDependency(
push_node->input(1), optimized_graph, &node_map);
push_node->set_input(1, ctrl_dep);
VLOG(1) << "After converting: " << push_node->DebugString();
}
}
}
return absl::OkStatus();
}
bool IsSimpleBinaryOperator(const NodeDef& node) {
return (IsLess(node) || IsLessEqual(node) || IsGreater(node) ||
IsGreaterEqual(node) || IsEqual(node));
}
Status EvaluateBoolOpForConstantOperands(const NodeDef& op_node,
const NodeDef& constant_operand_0,
const NodeDef& constant_operand_1,
DeviceBase* cpu_device,
ResourceMgr* resource_mgr,
bool* value) {
VLOG(4) << "Evaluate bool op: op_node=" << op_node.name()
<< " input0=" << constant_operand_0.name()
<< " input1=" << constant_operand_1.name();
TensorVector inputs;
const TensorProto& raw_val_0 = constant_operand_0.attr().at("value").tensor();
Tensor value_0(raw_val_0.dtype(), raw_val_0.tensor_shape());
CHECK(value_0.FromProto(raw_val_0));
inputs.emplace_back(&value_0);
const TensorProto& raw_val_1 = constant_operand_1.attr().at("value").tensor();
Tensor value_1(raw_val_1.dtype(), raw_val_1.tensor_shape());
CHECK(value_1.FromProto(raw_val_1));
inputs.emplace_back(&value_1);
TensorVector outputs;
TF_RETURN_IF_ERROR(
EvaluateNode(op_node, inputs, cpu_device, resource_mgr, &outputs));
if (outputs.size() != 1 || outputs[0].tensor == nullptr) {
return Status(absl::StatusCode::kInvalidArgument, "Expected one output.");
}
*value = outputs[0].tensor->scalar<bool>()();
delete outputs[0].tensor;
return absl::OkStatus();
}
bool IsReallyConstant(const NodeDef& node,
const absl::flat_hash_set<string>& feed_nodes) {
if (!IsConstant(node)) {
return false;
}
return feed_nodes.find(node.name()) == feed_nodes.end();
}
Status CheckForDeadFanout(const MutableGraphView& view,
const NodeDef& switch_node, const NodeMap& node_map,
const absl::flat_hash_set<string>& feed_nodes,
DeviceBase* cpu_device, ResourceMgr* resource_mgr,
bool* has_dead_fanout, int* dead_fanout) {
*has_dead_fanout = false;
GraphView::InputPort switch_loopcond_port(&switch_node, 1);
const NodeDef* switch_predicate =
view.GetRegularFanin(switch_loopcond_port).node;
if (IsReallyConstant(*switch_predicate, feed_nodes)) {
VLOG(3) << "Found switch node with constant predicate:"
<< " switch_node=" << switch_node.name()
<< " switch_predicate=" << switch_predicate->name();
Tensor selector;
CHECK(selector.FromProto(switch_predicate->attr().at("value").tensor()));
*has_dead_fanout = true;
*dead_fanout = selector.scalar<bool>()() ? 0 : 1;
return absl::OkStatus();
}
GraphView::InputPort switch_input_port(&switch_node, 0);
const NodeDef* switch_input = view.GetRegularFanin(switch_input_port).node;
if (!IsMerge(*switch_input) || !IsLoopCond(*switch_predicate)) {
return absl::OkStatus();
}
VLOG(4) << "Try to find a zero iteration while loop:"
<< " switch_node=" << switch_node.name();
NodeDef* switch_ctrl_node = view.GetRegularFanin({switch_predicate, 0}).node;
if (!switch_ctrl_node || !IsSimpleBinaryOperator(*switch_ctrl_node)) {
return absl::OkStatus();
}
NodeDef* merge_node = nullptr;
NodeDef* constant_ctrl_input = nullptr;
int constant_index = 0;
for (int i = 0; i < switch_ctrl_node->input().size(); ++i) {
const string& input = switch_ctrl_node->input(i);
if (IsControlInput(input)) continue;
NodeDef* node = view.GetNode(switch_ctrl_node->input(i));
if (IsMerge(*node)) {
merge_node = node;
}
if (IsReallyConstant(*node, feed_nodes)) {
constant_ctrl_input = node;
constant_index = i;
}
}
if (merge_node == nullptr || constant_ctrl_input == nullptr) {
return absl::OkStatus();
}
NodeDef* enter_node = nullptr;
NodeDef* constant_init_node = nullptr;
for (const auto& input : merge_node->input()) {
NodeDef* node = node_map.GetNode(input);
if (IsEnter(*node)) {
enter_node = node;
}
if (IsReallyConstant(*node, feed_nodes)) {
constant_init_node = node;
}
}
if (enter_node != nullptr) {
if (constant_init_node != nullptr) return absl::OkStatus();
for (const auto& input : enter_node->input()) {
NodeDef* node = node_map.GetNode(input);
if (IsReallyConstant(*node, feed_nodes)) {
constant_init_node = node;
}
}
}
if (constant_init_node == nullptr) {
return absl::OkStatus();
}
VLOG(4) << "Check if loop will be 0 iterations:"
<< "\n| switch_node : " << switch_node.name()
<< "\n| switch_ctrl_node : " << switch_ctrl_node->name()
<< "\n| merge_node : " << merge_node->name()
<< "\n| constant_ctrl_input: " << constant_ctrl_input->name()
<< "\n| enter_node : "
<< (enter_node ? enter_node->name() : "<n/a>")
<< "\n| constant_init_node : " << constant_init_node->name();
NodeDef* operand_0 =
constant_index ? constant_init_node : constant_ctrl_input;
NodeDef* operand_1 =
constant_index ? constant_ctrl_input : constant_init_node;
bool constant_switch_value;
TF_RETURN_IF_ERROR(EvaluateBoolOpForConstantOperands(
*switch_ctrl_node, *operand_0, *operand_1, cpu_device, resource_mgr,
&constant_switch_value));
if (constant_switch_value == false) {
VLOG(3) << "Remove 0 iteration while loop:"
<< " switch_node=" << switch_node.name();
*has_dead_fanout = true;
*dead_fanout = 1;
} else {
VLOG(4) << "Was not able to prove that loop has 0 iterations.";
}
return absl::OkStatus();
}
}
LoopOptimizer::LoopOptimizer()
: opt_level_(RewriterConfig::ON),
cpu_device_(nullptr),
options_(LoopOptimizerOptions::Default(RewriterConfig::ON)) {}
LoopOptimizer::LoopOptimizer(RewriterConfig::Toggle opt_level,
DeviceBase* cpu_device)
: opt_level_(opt_level),
cpu_device_(cpu_device),
options_(LoopOptimizerOptions::Default(RewriterConfig::ON)) {
resource_mgr_.reset(new ResourceMgr());
}
Status LoopOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
if (!options_.enable_loop_invariant_node_motion &&
!options_.enable_stack_push_removal &&
!options_.enable_dead_branch_removal) {
return errors::Aborted("Nothing to do.");
}
*optimized_graph = item.graph;
if (options_.enable_loop_invariant_node_motion) {
LoopInvariantNodeMotionOptimizer linm_optimizer(optimized_graph);
TF_RETURN_IF_ERROR(linm_optimizer.Optimize());
}
if (options_.enable_stack_push_removal) {
TF_RETURN_IF_ERROR(RemoveStackOps(item.NodesToPreserve(), optimized_graph));
}
if (options_.enable_dead_branch_removal) {
NodeMap node_map(optimized_graph);
absl::flat_hash_set<string> feed_nodes;
for (const auto& feed : item.feed) {
feed_nodes.insert(NodeName(feed.first));
}
TF_RETURN_IF_ERROR(RemoveDeadBranches(item.NodesToPreserve(), node_map,
feed_nodes, optimized_graph));
}
return absl::OkStatus();
}
static Status update_identity_node_type(NodeDef* sw_node) {
if (sw_node->has_experimental_type() &&
(sw_node->experimental_type().type_id() == TFT_PRODUCT)) {
FullTypeDef old_t = sw_node->experimental_type();
if (old_t.args_size() != 2) {
return errors::Internal(
"When converting Switch or Merge node '", sw_node->name(),
"' to Identity, full type of original node describes ",
old_t.args_size(), " outputs, not 2.\n", old_t.DebugString());
}
FullTypeDef new_t;
new_t.set_type_id(TFT_PRODUCT);
*(new_t.add_args()) = old_t.args()[0];
*(sw_node->mutable_experimental_type()) = new_t;
}
return absl::OkStatus();
}
Status LoopOptimizer::RemoveDeadBranches(
const std::unordered_set<string>& nodes_to_preserve, NodeMap& node_map,
const absl::flat_hash_set<string>& feed_nodes, GraphDef* optimized_graph) {
std::unordered_set<const NodeDef*> dead_nodes;
std::unordered_map<NodeDef*, std::set<int>> dead_merge_inputs;
absl::flat_hash_set<GraphView::OutputPort> identity_switches;
MutableGraphView view(optimized_graph);
for (const NodeDef& node : optimized_graph->node()) {
if (!IsSwitch(node)) {
continue;
}
if (node.op() == "_SwitchN") {
continue;
}
if (nodes_to_preserve.find(node.name()) != nodes_to_preserve.end()) {
continue;
}
int dead_fanout;
bool has_dead_fanout;
TF_RETURN_IF_ERROR(CheckForDeadFanout(view, node, node_map, feed_nodes,
cpu_device_, resource_mgr_.get(),
&has_dead_fanout, &dead_fanout));
if (!has_dead_fanout) {
continue;
}
GraphView::OutputPort dead(&node, dead_fanout);
SetVector<MutableGraphView::InputPort, absl::Hash<MutableGraphView::Port>>
zombie_inputs;
for (const MutableGraphView::InputPort& port : view.GetFanout(dead)) {
if (dead_nodes.find(port.node) == dead_nodes.end()) {
zombie_inputs.PushBack(port);
}
}
std::unordered_set<const NodeDef*> local_dead_nodes = dead_nodes;
std::unordered_map<NodeDef*, std::set<int>> local_dead_merge_inputs =
dead_merge_inputs;
bool found_node_to_preserve = false;
while (!found_node_to_preserve && !zombie_inputs.Empty()) {
MutableGraphView::InputPort dead = zombie_inputs.PopBack();
if (nodes_to_preserve.find(dead.node->name()) !=
nodes_to_preserve.end()) {
found_node_to_preserve = true;
break;
}
if (local_dead_nodes.find(dead.node) != local_dead_nodes.end()) {
continue;
}
if (IsMerge(*dead.node)) {
const int num_data_inputs = dead.node->attr().at("N").i();
if (num_data_inputs > 2) {
found_node_to_preserve = true;
break;
}
MutableGraphView::OutputPort value_index(dead.node, 1);
const absl::flat_hash_set<MutableGraphView::InputPort>& index_fanout =
view.GetFanout(value_index);
if (!index_fanout.empty()) {
found_node_to_preserve = true;
break;
}
bool fully_dead = false;
if (dead.port_id >= 0) {
local_dead_merge_inputs[dead.node].insert(dead.port_id);
if (local_dead_merge_inputs[dead.node].size() == num_data_inputs) {
fully_dead = true;
}
} else {
local_dead_merge_inputs.insert({dead.node, {}});
}
if (fully_dead) {
local_dead_merge_inputs.erase(dead.node);
local_dead_nodes.insert(dead.node);
for (const MutableGraphView::InputPort& port :
view.GetFanouts(*dead.node, true)) {
zombie_inputs.PushBack(port);
}
}
} else if (dead.node->op() == "ControlTrigger") {
found_node_to_preserve = true;
break;
} else {
if (local_dead_nodes.insert(dead.node).second) {
for (const MutableGraphView::InputPort& dead_fanout :
view.GetFanouts(*dead.node, true)) {
zombie_inputs.PushBack(dead_fanout);
}
}
}
}
if (!found_node_to_preserve) {
std::swap(dead_nodes, local_dead_nodes);
std::swap(dead_merge_inputs, local_dead_merge_inputs);
identity_switches.insert(dead);
VLOG(3) << "Found no nodes to preserve in fanout of switch node: "
<< node.name() << ", fanout port: " << dead_fanout;
}
}
std::vector<int> nodes_idx_to_delete;
nodes_idx_to_delete.reserve(dead_nodes.size());
for (int i = 0; i < optimized_graph->node_size(); ++i) {
if (dead_nodes.count(&optimized_graph->node(i)))
nodes_idx_to_delete.push_back(i);
}
absl::flat_hash_set<absl::string_view> dead_node_names;
dead_node_names.reserve(dead_nodes.size());
for (const NodeDef* dead_node : dead_nodes) {
dead_node_names.insert(dead_node->name());
}
for (const auto& itr : dead_merge_inputs) {
NodeDef* merge_node = itr.first;
if (dead_nodes.find(merge_node) != dead_nodes.end()) {
continue;
}
const std::set<int>& dead_inputs = itr.second;
const int num_data_inputs = merge_node->attr().at("N").i();
if (merge_node->input_size() != num_data_inputs) {
LOG(WARNING)
<< "Skipping loop optimization for Merge node with control input: "
<< merge_node->name();
return absl::OkStatus();
} else if (dead_inputs.size() != 1 || num_data_inputs != 2) {
LOG(WARNING) << "Skipping loop optimization for Merge node ("
<< merge_node->name()
<< ") with unexpected dead_inputs.size() ("
<< dead_inputs.size() << " or num_data_inputs"
<< num_data_inputs;
return absl::OkStatus();
}
}
for (const auto& itr : dead_merge_inputs) {
NodeDef* merge_node = itr.first;
if (dead_nodes.find(merge_node) != dead_nodes.end()) {
continue;
}
VLOG(3) << "Merge node before cleanup: " << merge_node->DebugString();
const std::set<int>& dead_inputs = itr.second;
int index = *dead_inputs.begin();
auto* inputs = merge_node->mutable_input();
inputs->SwapElements(1, index);
inputs->SwapElements(1, merge_node->input_size() - 1);
inputs->RemoveLast();
merge_node->set_op("Identity");
merge_node->mutable_attr()->erase("N");
TF_RETURN_IF_ERROR(update_identity_node_type(merge_node));
VLOG(3) << "Merge node after cleanup: " << merge_node->DebugString();
}
for (const auto& id_switch : identity_switches) {
NodeDef* sw_node = const_cast<NodeDef*>((id_switch.node));
int dead_port_id = id_switch.port_id;
NodeDef* pred = node_map.GetNode(sw_node->input(1));
if (IsReallyConstant(*pred, feed_nodes) && sw_node->op() == "Switch") {
int live_port_id = (dead_port_id + 1) % 2;
string live_output_name = sw_node->name();
if (live_port_id == 1) {
live_output_name = StrCat(sw_node->name(), ":1");
}
auto consumers = node_map.GetOutputs(sw_node->name());
for (auto* consumer : consumers) {
for (int i = 0; i < consumer->input_size(); ++i) {
if (consumer->input(i) == live_output_name) {
consumer->set_input(i, sw_node->name());
node_map.UpdateInput(consumer->name(), live_output_name,
sw_node->name());
}
}
}
VLOG(3) << "Switch node before cleanup: " << sw_node->DebugString();
const string ctrl_dep = ConstantFolding::AddControlDependency(
pred->name(), optimized_graph, &node_map);
node_map.UpdateInput(sw_node->name(), pred->name(), ctrl_dep);
sw_node->set_input(1, ctrl_dep);
sw_node->set_op("Identity");
TF_RETURN_IF_ERROR(update_identity_node_type(sw_node));
VLOG(3) << "Switch node after cleanup: " << sw_node->DebugString();
}
}
EraseNodesFromGraph(std::move(nodes_idx_to_delete), optimized_graph);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/loop_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class LoopOptimizerTest : public GrapplerTest {
protected:
void AddEnterNode(const string& name, const string& frame,
const bool is_constant, const int piterations,
const std::vector<string>& inputs, GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
AttrValue type;
type.set_type(DT_FLOAT);
attributes.emplace_back("T", type);
AttrValue frame_name;
frame_name.set_s(frame);
attributes.emplace_back("frame_name", frame_name);
AttrValue is_const;
is_const.set_b(is_constant);
attributes.emplace_back("is_constant", is_const);
AttrValue parallel_iterations;
parallel_iterations.set_i(piterations);
attributes.emplace_back("parallel_iterations", parallel_iterations);
AddNode(name, "Enter", inputs, attributes, graph);
}
void AddSimpleNode(const string& name, const string& op,
const std::vector<string>& inputs, GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
AttrValue type;
type.set_type(DT_FLOAT);
attributes.emplace_back("T", type);
AddNode(name, op, inputs, attributes, graph);
}
void EnableOnlyLoopInvariantNodeMotion(LoopOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.enable_loop_invariant_node_motion = true;
}
void EnableOnlyStackPushRemoval(LoopOptimizer* optimizer) {
DisableAllStages(optimizer);
optimizer->options_.enable_stack_push_removal = true;
}
private:
void DisableAllStages(LoopOptimizer* optimizer) {
LoopOptimizer::LoopOptimizerOptions options;
options.enable_loop_invariant_node_motion = false;
options.enable_stack_push_removal = false;
optimizer->options_ = options;
}
};
TEST_F(LoopOptimizerTest, Basic) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
const auto* variant_add_node = view.GetNode("VariantAdd");
ASSERT_NE(variant_add_node, nullptr);
const auto* variant_add_node_def = variant_add_node->node();
ASSERT_EQ(frames.Frames(*variant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*variant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
const auto* variant_add_node = view.GetNode("VariantAdd");
ASSERT_NE(variant_add_node, nullptr);
const auto* variant_add_node_def = variant_add_node->node();
ASSERT_EQ(frames.Frames(*variant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*variant_add_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, Const) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("Const", "Const", {"^Identity"}, &graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "Const"}, &graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
const auto* const_node = view.GetNode("Const");
ASSERT_NE(const_node, nullptr);
const auto* const_node_node_def = const_node->node();
ASSERT_EQ(frames.Frames(*const_node_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*const_node_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
const auto* const_node = view.GetNode("Const");
ASSERT_NE(const_node, nullptr);
const auto* const_node_node_def = const_node->node();
ASSERT_EQ(frames.Frames(*const_node_node_def).size(), 0);
}
}
TEST_F(LoopOptimizerTest, ControlOutput) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"VariantAdd", "Less/y", "^InvariantAdd"},
&graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"VariantAdd"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoop1) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"VariantAdd"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "InvariantEnter2"},
&graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_node_def).back(), 0);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 0);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
const auto* invariant_add_node = view.GetNode("InvariantAdd");
ASSERT_NE(invariant_add_node, nullptr);
const auto* invariant_add_node_def = invariant_add_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_node_def).size(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoop2) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"InvariantAdd"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "InvariantEnter2"},
&graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 0);
const auto* variant_add_2_node = view.GetNode("VariantAdd2");
ASSERT_NE(variant_add_2_node, nullptr);
const auto* variant_add_2_node_def = variant_add_2_node->node();
ASSERT_EQ(frames.Frames(*variant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*variant_add_2_node_def).back(), 1);
}
}
TEST_F(LoopOptimizerTest, NestedLoopConst1) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"VariantAdd"}, &graph);
AddSimpleNode("Const2", "Const", {"^Identity2"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "Const2"}, &graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 0);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 1);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 0);
}
}
TEST_F(LoopOptimizerTest, NestedLoopConst2) {
GraphDef graph;
AddSimpleNode("In", "Identity", {}, &graph);
AddEnterNode("InvariantEnter", "while/while_context", true, 1, {"In"},
&graph);
AddSimpleNode("InvariantAdd", "Add", {"InvariantEnter", "InvariantEnter"},
&graph);
AddSimpleNode("VariantAdd", "Add", {"InvariantAdd", "Identity"}, &graph);
AddEnterNode("VariantEnter", "while/while_context", false, 1, {"In"}, &graph);
AddSimpleNode("Merge", "Merge", {"VariantEnter", "NextIteration"}, &graph);
AddSimpleNode("Less/y", "Const", {"^Identity"}, &graph);
AddSimpleNode("Less", "Less", {"Exit2", "Less/y"}, &graph);
AddSimpleNode("LoopCond", "LoopCond", {"Less"}, &graph);
AddSimpleNode("Switch", "Switch", {"Merge", "LoopCond"}, &graph);
AddSimpleNode("Identity", "Identity", {"Switch:1"}, &graph);
AddSimpleNode("NextIteration", "NextIteration", {"Exit2"}, &graph);
AddSimpleNode("Exit", "Exit", {"Switch"}, &graph);
AddSimpleNode("Out", "Identity", {"Exit"}, &graph);
AddEnterNode("InvariantEnter2", "while/while/while_context", true, 1,
{"InvariantAdd"}, &graph);
AddSimpleNode("Const2", "Const", {"^Identity2"}, &graph);
AddSimpleNode("InvariantAdd2", "Add", {"InvariantEnter2", "Const2"}, &graph);
AddSimpleNode("VariantAdd2", "Add", {"InvariantAdd2", "Identity2"}, &graph);
AddEnterNode("VariantEnter2", "while/while/while_context", false, 1,
{"VariantEnter"}, &graph);
AddSimpleNode("Merge2", "Merge", {"VariantEnter2", "NextIteration2"}, &graph);
AddSimpleNode("Less2/y", "Const", {"^Identity2"}, &graph);
AddSimpleNode("Less2", "Less", {"VariantAdd2", "Less2/y"}, &graph);
AddSimpleNode("LoopCond2", "LoopCond", {"Less2"}, &graph);
AddSimpleNode("Switch2", "Switch", {"Merge2", "LoopCond2"}, &graph);
AddSimpleNode("Identity2", "Identity", {"Switch2:1"}, &graph);
AddSimpleNode("NextIteration2", "NextIteration", {"VariantAdd2"}, &graph);
AddSimpleNode("Exit2", "Exit", {"Switch2"}, &graph);
GrapplerItem item;
item.graph = graph;
LoopOptimizer optimizer;
EnableOnlyLoopInvariantNodeMotion(&optimizer);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
{
Status status;
utils::GraphView view(&graph, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*invariant_add_2_node_def).back(), 1);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 2);
EXPECT_EQ(frames.Frames(*const_2_node_def).back(), 1);
}
{
Status status;
utils::GraphView view(&output, &status);
TF_ASSERT_OK(status);
FrameView frames;
TF_EXPECT_OK(frames.InferFromGraphView(view));
EXPECT_EQ(frames.num_frames(), 2);
const auto* invariant_add_2_node = view.GetNode("InvariantAdd2");
ASSERT_NE(invariant_add_2_node, nullptr);
const auto* invariant_add_2_node_def = invariant_add_2_node->node();
ASSERT_EQ(frames.Frames(*invariant_add_2_node_def).size(), 0);
const auto* const_2_node = view.GetNode("Const2");
ASSERT_NE(const_2_node, nullptr);
const auto* const_2_node_def = const_2_node->node();
ASSERT_EQ(frames.Frames(*const_2_node_def).size(), 0);
}
}
void VerifyGraphsEqual(const GraphDef& original_graph,
const GraphDef& optimized_graph, const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(optimized.name(), original.name()) << func;
EXPECT_EQ(optimized.op(), original.op()) << func;
ASSERT_EQ(optimized.input_size(), original.input_size()) << func;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(optimized.input(j), original.input(j)) << func;
}
}
}
TEST_F(LoopOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushNoOp) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("pop1", "StackPopV2", {"stack1"}, &graph);
AddSimpleNode("id1", "Identity", {"pop1"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter2_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter2_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter2_stack2", "enter2_c"}, &graph);
AddSimpleNode("pop2", "StackPopV2", {"enter2_stack2"}, &graph);
AddSimpleNode("id2", "Identity", {"pop2"}, &graph);
AddSimpleNode("stack3", "StackV2", {}, &graph);
AddSimpleNode("push3", "StackPushV2", {"stack3", "c"}, &graph);
AddSimpleNode("stop", "StopGradient", {"stack3"}, &graph);
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushNoPopButStackLives) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter2_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter2_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter2_stack2", "enter2_c"}, &graph);
item.keep_ops.push_back("stack1");
item.keep_ops.push_back("stack2");
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsEqual(item.graph, output, __FUNCTION__);
}
TEST_F(LoopOptimizerTest, RemovePushWithoutMatchingPop) {
GrapplerItem item;
GraphDef& graph = item.graph;
AddSimpleNode("c", "Const", {}, &graph);
AddSimpleNode("stack1", "StackV2", {}, &graph);
AddSimpleNode("push1", "StackPushV2", {"stack1", "c"}, &graph);
AddSimpleNode("stack2", "StackV2", {}, &graph);
AddEnterNode("enter_c", "frame_name", false, 1, {"c"}, &graph);
AddEnterNode("enter_stack2", "frame_name", false, 1, {"stack2"}, &graph);
AddSimpleNode("push2", "StackPushV2", {"enter_stack2", "enter_c"}, &graph);
AddSimpleNode("stack3", "StackV2", {}, &graph);
AddSimpleNode("push3", "StackPushV2", {"stack3", "c"}, &graph);
AddSimpleNode("pop3", "StackPopV2", {"stack3"}, &graph);
AddSimpleNode("stack4", "StackV2", {}, &graph);
AddSimpleNode("push4", "StackPushV2", {"stack4", "c"}, &graph);
AddSimpleNode("pop4", "StackPopV2", {"stack4"}, &graph);
item.fetch.push_back("pop4");
LoopOptimizer optimizer;
EnableOnlyStackPushRemoval(&optimizer);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(output.node_size(), 13);
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "push1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "c");
EXPECT_EQ(node.input(1), "^stack1");
} else if (node.name() == "push2") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "enter_c");
EXPECT_EQ(node.input(1), "^enter_stack2");
} else if (node.name() == "push3") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "c");
EXPECT_EQ(node.input(1), "^stack3");
} else {
const NodeDef& orig_node = item.graph.node(i);
EXPECT_EQ(node.ShortDebugString(), orig_node.ShortDebugString());
}
}
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesConstantCondition) {
Scope scope = Scope::NewRootScope();
Output v_in = ops::Const<float>(scope.WithOpName("v_in"), {123.0}, {});
Output ctrl1 = ops::Const(scope.WithOpName("ctrl1"), false, TensorShape({}));
ops::Switch s1(scope.WithOpName("switch1"), v_in, ctrl1);
Output square1 = ops::Square(scope.WithOpName("square1"), s1.output_false);
Output sqrt1 = ops::Sqrt(scope.WithOpName("sqrt1"), s1.output_true);
FullTypeDef* s1_t =
s1.operation.node()->mutable_def()->mutable_experimental_type();
s1_t->set_type_id(TFT_PRODUCT);
s1_t->add_args()->set_type_id(TFT_TENSOR);
s1_t->mutable_args(0)->add_args()->set_type_id(TFT_FLOAT);
s1_t->add_args()->set_type_id(TFT_TENSOR);
s1_t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
EXPECT_EQ(s1.operation.node()->num_outputs(),
s1.operation.node()->def().experimental_type().args_size());
Output ctrl2 = ops::Const(scope.WithOpName("ctrl2"), true, TensorShape({}));
ops::Switch s2(scope.WithOpName("switch2"), v_in, ctrl2);
Output square2 = ops::Square(scope.WithOpName("square2"), s2.output_false);
Output sqrt2 = ops::Sqrt(scope.WithOpName("sqrt2"), s2.output_true);
Output ctrl3 = ops::Const(scope.WithOpName("ctrl3"), false, TensorShape({}));
ops::Switch s3(scope.WithOpName("switch3"), v_in, ctrl3);
Output square3 = ops::Square(scope.WithOpName("square3"), s3.output_false);
Output sqrt3 = ops::Sqrt(scope.WithOpName("sqrt3"), s3.output_true);
Output ctrl4 = ops::Const(scope.WithOpName("ctrl4"), false, TensorShape({}));
ops::Switch s4(scope.WithOpName("switch4"), v_in, ctrl4);
Output square4 = ops::Square(scope.WithOpName("square4"), s4.output_false);
Output sqrt4 = ops::Sqrt(scope.WithOpName("sqrt4"), s4.output_true);
ops::Merge m1(scope.WithOpName("m1"), {square1, sqrt1});
FullTypeDef* m1_t =
m1.operation.node()->mutable_def()->mutable_experimental_type();
m1_t->set_type_id(TFT_PRODUCT);
m1_t->add_args()->set_type_id(TFT_TENSOR);
m1_t->mutable_args(0)->add_args()->set_type_id(TFT_FLOAT);
m1_t->add_args()->set_type_id(TFT_TENSOR);
m1_t->mutable_args(1)->add_args()->set_type_id(TFT_INT32);
EXPECT_EQ(m1.operation.node()->num_outputs(),
m1.operation.node()->def().experimental_type().args_size());
ops::Merge m2(scope.WithOpName("m2"), {v_in, square1});
ops::Merge m3(scope.WithOpName("m3"), {v_in, sqrt1});
ops::Merge m4(scope.WithOpName("m4"), {square1, sqrt2});
ops::Merge m5(scope.WithOpName("m5"), {square2, sqrt1});
ops::Switch s5(scope.WithOpName("switch5"), v_in, ctrl1);
Output id1 = ops::Identity(scope.WithOpName("id1"), s5.output_false);
Output id2 = ops::Identity(scope.WithOpName("id2"), s5.output_true);
ops::Merge m8(scope.WithOpName("m8"), {id1, id2});
ops::Switch s6(scope.WithOpName("switch6"), v_in, ctrl1);
Output id3 = ops::Identity(scope.WithOpName("id3"), s6.output_false);
Output id4 = ops::Identity(scope.WithOpName("id4"), s6.output_true);
ops::Merge m9(scope.WithOpName("m9"), {id3, id4});
GrapplerItem item;
item.fetch.push_back("m8");
item.fetch.push_back("id4");
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
LoopOptimizer optimizer(RewriterConfig::AGGRESSIVE, nullptr);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_CHECK_OK(status);
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "Square1");
EXPECT_NE(node.name(), "Sqrt2");
EXPECT_NE(node.name(), "m5");
if (node.name() == "m1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "square1");
EXPECT_EQ(node.experimental_type().args_size(), 1);
} else if (node.name() == "m2") {
EXPECT_EQ(node.op(), "Merge");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "square1");
} else if (node.name() == "m3") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "v_in");
} else if (node.name() == "m4") {
EXPECT_EQ(node.op(), "Merge");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "square1");
EXPECT_EQ(node.input(1), "sqrt2");
} else if (node.name() == "m8") {
EXPECT_EQ(node.op(), "Merge");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "id1");
EXPECT_EQ(node.input(1), "id2");
} else if (node.name() == "m9") {
EXPECT_EQ(node.op(), "Merge");
ASSERT_EQ(2, node.input_size());
EXPECT_EQ(node.input(0), "id3");
EXPECT_EQ(node.input(1), "id4");
} else if (node.name() == "switch1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "^ctrl1");
EXPECT_EQ(node.experimental_type().args_size(), 1);
} else if (node.name() == "switch2") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "^ctrl2");
} else if (node.name() == "switch3") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "^ctrl3");
} else if (node.name() == "switch4") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "^ctrl4");
} else if (node.name() == "switch5") {
EXPECT_EQ(node.op(), "Switch");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "ctrl1");
} else if (node.name() == "switch6") {
EXPECT_EQ(node.op(), "Switch");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "ctrl1");
}
}
auto tensors_expected = EvaluateNodes(item.graph, {"m8", "m9"});
ASSERT_EQ(tensors_expected.size(), 2);
auto tensors = EvaluateNodes(output, {"m8", "m9"});
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
test::ExpectTensorNear<float>(tensors_expected[1], tensors[1], 1e-6);
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesConstantCondition2) {
Scope scope = Scope::NewRootScope();
Output v_in = ops::Const<float>(scope.WithOpName("v_in"), {123.0}, {});
Output ctrl1 = ops::Const(scope.WithOpName("ctrl1"), true, TensorShape({}));
ops::Switch s1(scope.WithOpName("switch1"), v_in, ctrl1);
Output square1 = ops::Square(scope.WithOpName("square1"), s1.output_false);
Output add1 =
ops::Add(scope.WithOpName("add1"), s1.output_true, s1.output_true);
Output const2 = ops::Const<float>(scope.WithOpName("const2"), {20.0}, {});
Output add2 = ops::Add(scope.WithOpName("add2"), s1.output_true, const2);
Output sub1 = ops::Sub(scope.WithOpName("sub1"), add1, add2);
ops::Merge m1(scope.WithOpName("m1"), {square1, sub1});
Output add3 = ops::Add(scope.WithOpName("add3"), m1.output, const2);
GrapplerItem item;
item.fetch.push_back("add3");
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
LoopOptimizer optimizer(RewriterConfig::AGGRESSIVE, nullptr);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_CHECK_OK(status);
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "Square1");
if (node.name() == "m1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "sub1");
} else if (node.name() == "switch1") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "v_in");
EXPECT_EQ(node.input(1), "^ctrl1");
}
}
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesFullyRemoveDeadBranches) {
const string gdef_ascii = R"EOF(
node {
name: "episodicreplaybuffer_add_readvariableop_resource"
op: "_Arg"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_RESOURCE
}
}
attr {
key: "index"
value {
i: 0
}
}
}
node {
name: "EpisodicReplayBuffer/add/and_1/x"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
bool_val: true
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/begin_episode"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
bool_val: false
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Switch"
op: "Switch"
input: "EpisodicReplayBuffer/add/and_1/x"
input: "EpisodicReplayBuffer/add/and_1/x"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/NoOp"
op: "NoOp"
input: "^EpisodicReplayBuffer/add/and_1/x"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch"
op: "Switch"
input: "EpisodicReplayBuffer/add/and_1/x"
input: "EpisodicReplayBuffer/add/and_1/x"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
attr {
key: "_class"
value {
list {
s: "loc:@EpisodicReplayBuffer/add/assert_equal/All"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch_1"
op: "Switch"
input: "EpisodicReplayBuffer/add/begin_episode"
input: "EpisodicReplayBuffer/add/and_1/x"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
attr {
key: "_class"
value {
list {
s: "loc:@EpisodicReplayBuffer/add/begin_episode"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch_2"
op: "Switch"
input: "EpisodicReplayBuffer/add/begin_episode"
input: "EpisodicReplayBuffer/add/and_1/x"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
attr {
key: "_class"
value {
list {
s: "loc:@EpisodicReplayBuffer/add/end_episode"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/switch_f"
op: "Identity"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Switch"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/control_dependency"
op: "Const"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/NoOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
tensor_content: "\001"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert"
op: "Assert"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch_1"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert/Switch_2"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
list {
type: DT_BOOL
type: DT_BOOL
}
}
}
attr {
key: "summarize"
value {
i: 3
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/control_dependency_1"
op: "Identity"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/switch_f"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Assert"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
attr {
key: "_class"
value {
list {
s: "loc:@EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/switch_f"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge"
op: "Merge"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/control_dependency_1"
input: "EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/control_dependency"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "EpisodicReplayBuffer/add/FloorMod/y"
op: "Const"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_INT64
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {
}
int64_val: 5000
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/ReadVariableOp"
op: "ReadVariableOp"
input: "episodicreplaybuffer_add_readvariableop_resource"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_INT64
}
}
}
node {
name: "EpisodicReplayBuffer/add/Less/y"
op: "Const"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_INT64
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {
}
int64_val: 0
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/Less"
op: "Less"
input: "EpisodicReplayBuffer/add/ReadVariableOp"
input: "EpisodicReplayBuffer/add/Less/y"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_INT64
}
}
}
node {
name: "EpisodicReplayBuffer/add/or"
op: "LogicalOr"
input: "EpisodicReplayBuffer/add/begin_episode"
input: "EpisodicReplayBuffer/add/Less"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
}
node {
name: "EpisodicReplayBuffer/add/get_episode_id/pred_id"
op: "Identity"
input: "EpisodicReplayBuffer/add/or"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "EpisodicReplayBuffer/add/get_episode_id/Switch"
op: "Switch"
input: "EpisodicReplayBuffer/add/or"
input: "EpisodicReplayBuffer/add/or"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "EpisodicReplayBuffer/add/get_episode_id/critical_section_execute/AssignVariableOp/Switch"
op: "Switch"
input: "episodicreplaybuffer_add_readvariableop_resource"
input: "EpisodicReplayBuffer/add/get_episode_id/pred_id"
input: "^EpisodicReplayBuffer/add/ReadVariableOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_RESOURCE
}
}
attr {
key: "_class"
value {
list {
s: "loc:@EpisodicReplayBuffer/add/ReadVariableOp/resource"
}
}
}
}
node {
name: "EpisodicReplayBuffer/add/get_episode_id/critical_section_execute/ReadVariableOp_3"
op: "ReadVariableOp"
input: "^EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_INT64
}
}
}
library {
}
versions {
producer: 27
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
item.fetch = {
"EpisodicReplayBuffer/add/get_episode_id/critical_section_execute/"
"ReadVariableOp_3"};
LoopOptimizer optimizer(RewriterConfig::AGGRESSIVE, nullptr);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_CHECK_OK(status);
bool found_merge = false;
for (const auto& node : output.node()) {
if (node.name() ==
"EpisodicReplayBuffer/add/assert_equal/Assert/AssertGuard/Merge") {
found_merge = true;
}
}
EXPECT_TRUE(found_merge)
<< "Merge node was deleted, but it shouldn't have been.";
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesZeroIterWhile) {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 20
}
}
}
}
node {
name: "while/Enter"
op: "Enter"
input: "Const"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "frame_name"
value {
s: "while/while/"
}
}
attr {
key: "is_constant"
value {
b: false
}
}
attr {
key: "parallel_iterations"
value {
i: 1
}
}
}
node {
name: "while/Merge"
op: "Merge"
input: "while/Enter"
input: "while/NextIteration"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Less/y"
op: "Const"
input: "^while/Merge"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 10
}
}
}
}
node {
name: "while/Less"
op: "Less"
input: "while/Merge"
input: "while/Less/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/LoopCond"
op: "LoopCond"
input: "while/Less"
}
node {
name: "while/Switch"
op: "Switch"
input: "while/Merge"
input: "while/LoopCond"
attr {
key: "T"
value {
type: DT_INT32
}
}
attr {
key: "_class"
value {
list {
s: "loc:@while/Merge"
}
}
}
}
node {
name: "while/Identity"
op: "Identity"
input: "while/Switch:1"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/add/y"
op: "Const"
input: "^while/Identity"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
}
int_val: 1
}
}
}
}
node {
name: "while/add"
op: "Add"
input: "while/Identity"
input: "while/add/y"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/NextIteration"
op: "NextIteration"
input: "while/add"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
node {
name: "while/Exit"
op: "Exit"
input: "while/Switch"
attr {
key: "T"
value {
type: DT_INT32
}
}
}
versions {
producer: 21
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
item.fetch = {"while/Exit"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
LoopOptimizer optimizer(RewriterConfig::AGGRESSIVE, nullptr);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_CHECK_OK(status);
auto tensors_got = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors_got.size(), 1);
test::ExpectTensorEqual<int32>(tensors_got[0], tensors_expected[0]);
int nodes_present = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "while/add") {
LOG(ERROR) << "while/add is present after optimization";
} else if (node.name() == "while/add/y") {
LOG(ERROR) << "while/add/y is present after optimization";
} else if (node.name() == "while/NextIteration") {
LOG(ERROR) << "while/NextIteration is present after optimization";
} else if (node.name() == "while/Identity") {
LOG(ERROR) << "while/Identity is present after optimization";
}
++nodes_present;
}
EXPECT_EQ(nodes_present, 8);
}
TEST_F(LoopOptimizerTest, RemoveDeadBranchesConstantFeed) {
const string gdef_ascii = R"EOF(
node {
name: "Const"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_STRING
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {
dim {
size: 1
}
}
string_val: "I\'m a value!"
}
}
}
}
node {
name: "cond/Switch_1"
op: "Switch"
input: "Const"
input: "Const_1"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_STRING
}
}
attr {
key: "_class"
value {
list {
s: "loc:@Const"
}
}
}
}
node {
name: "Const_1"
op: "Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_BOOL
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_BOOL
tensor_shape {
}
bool_val: true
}
}
}
}
node {
name: "cond/Switch"
op: "Switch"
input: "Const_1"
input: "Const_1"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "cond/switch_t"
op: "Identity"
input: "cond/Switch:1"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_BOOL
}
}
}
node {
name: "cond/Const"
op: "Const"
input: "^cond/switch_t"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "dtype"
value {
type: DT_STRING
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {
dim {
size: 1
}
}
string_val: ""
}
}
}
}
node {
name: "cond/Merge"
op: "Merge"
input: "cond/Switch_1"
input: "cond/Const"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "N"
value {
i: 2
}
}
attr {
key: "T"
value {
type: DT_STRING
}
}
}
node {
name: "Identity"
op: "Identity"
input: "cond/Merge"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "T"
value {
type: DT_STRING
}
}
}
library {
}
versions {
producer: 27
}
)EOF";
GrapplerItem item;
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &item.graph));
item.fetch = {"Identity"};
Tensor feed_tensor(DT_BOOL, {});
feed_tensor.flat<bool>()(0) = false;
item.feed.push_back({"Const_1", feed_tensor});
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
LoopOptimizer optimizer(RewriterConfig::AGGRESSIVE, nullptr);
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_CHECK_OK(status);
auto tensors_got = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors_got.size(), 1);
test::ExpectTensorEqual<tstring>(tensors_got[0], tensors_expected[0]);
EXPECT_EQ(output.node_size(), 8);
bool found = false;
for (const NodeDef& node : output.node()) {
if (node.name() == "cond/Merge") {
EXPECT_EQ(node.op(), "Merge");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "cond/Switch_1");
EXPECT_EQ(node.input(1), "cond/Const");
found = true;
break;
}
}
EXPECT_TRUE(found);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/loop_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/loop_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9fab66cc-e819-44fc-9536-dadf7e67c235 | cpp | tensorflow/tensorflow | generic_layout_optimizer_transposer | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_test.cc | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include <algorithm>
#include <numeric>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kOptimizedSuffix[] = "LayoutOptimizer";
constexpr char kAttrKSize[] = "ksize";
constexpr char kAttrStrides[] = "strides";
constexpr char kAttrDilations[] = "dilations";
constexpr char kAttrExplicitPaddings[] = "explicit_paddings";
constexpr char kAttrDataFormat[] = "data_format";
constexpr char kAttrIsTraining[] = "is_training";
constexpr char kAttrValue[] = "value";
constexpr char kAttrN[] = "N";
constexpr char kAttrT[] = "T";
constexpr char kAttrNumSplit[] = "num_split";
constexpr char kAttrNumOuts[] = "num_outs";
constexpr char kAttrKeepDims[] = "keep_dims";
constexpr char kAttrSqueezeDims[] = "squeeze_dims";
constexpr char kOpTranspose[] = "Transpose";
constexpr char kOpDataFormatVecPermute[] = "DataFormatVecPermute";
constexpr char kOpDataFormatDimMap[] = "DataFormatDimMap";
constexpr char kOpConst[] = "Const";
constexpr char kReshape[] = "Reshape";
constexpr char kReshapeConst[] = "ReshapeConst";
constexpr int kRank = 4;
constexpr int kUnknownRank = -1;
constexpr int kInvalidRank = -2;
inline bool AttrDataFormatMatch(const utils::MutableNodeView& node,
absl::string_view src_data_format,
bool* missing) {
const auto* attr = node.GetAttr(kAttrDataFormat);
if (attr != nullptr) {
return attr->s() == src_data_format;
}
*missing = true;
return false;
}
inline bool AttrDataFormatMatch(const utils::MutableNodeView& node,
absl::string_view src_data_format) {
bool missing = false;
return AttrDataFormatMatch(node, src_data_format, &missing);
}
bool IsNonFloatingConv2D(const utils::MutableNodeView& node) {
if (IsConv2D(*node.node()) || IsConv2DBackpropInput(*node.node())) {
const auto* attr = node.GetAttr(kAttrT);
if (attr != nullptr) {
return !kDataTypeIsFloating.Contains(attr->type());
}
}
return false;
}
bool IsNonFloatingConv3D(const utils::MutableNodeView& node) {
if (IsConv3D(*node.node())) {
const auto* attr = node.GetAttr(kAttrT);
if (attr != nullptr) {
return !kDataTypeIsFloating.Contains(attr->type());
}
}
return false;
}
bool IsComparisonOp(const NodeDef& node) {
bool is_compare = IsApproximateEqual(node) || IsEqual(node) ||
IsGreater(node) || IsGreaterEqual(node) || IsLess(node) ||
IsLessEqual(node) || IsNotEqual(node);
return is_compare;
}
std::vector<int> GetRegularFaninPorts(const utils::MutableNodeView& node) {
const int num_regular_fanins = node.NumRegularFanins();
std::vector<int> values(num_regular_fanins);
std::iota(values.begin(), values.end(), 0);
return values;
}
std::vector<int> GetConcatDataFaninPorts(const utils::MutableNodeView& node) {
const auto* n_attr = node.GetAttr(kAttrN);
const int n = n_attr != nullptr ? n_attr->i() : 0;
const int start = (node.GetOp() == "Concat") ? 1 : 0;
const int end = start + n;
std::vector<int> values(end - start);
std::iota(values.begin(), values.end(), start);
return values;
}
struct ComparatorByNodeNameAndIndex {
bool operator()(const utils::MutableFaninView& node1,
const utils::MutableFaninView& node2) const {
auto* node1_view = node1.node_view();
auto* node2_view = node2.node_view();
auto name_compare = node1_view->GetName().compare(node2_view->GetName());
if (name_compare == 0) {
return node1.index() < node2.index();
}
return name_compare < 0;
}
};
bool IsHostMemory(const NodeDef& node, int output_port) {
if (node.attr().contains("_xla_input") && node.attr().at("_xla_input").b())
return false;
DeviceNameUtils::ParsedName parsed_name;
if (DeviceNameUtils::ParseFullName(node.device(), &parsed_name)) {
DeviceType device_type(parsed_name.type);
Status s = FindKernelDef(device_type, node, nullptr, nullptr);
if (s.ok()) {
tensorflow::MemoryTypeVector in_mtypes;
tensorflow::MemoryTypeVector out_mtypes;
s = tensorflow::MemoryTypesForNode(OpRegistry::Global(), device_type,
node, &in_mtypes, &out_mtypes);
if (s.ok()) {
if (out_mtypes[output_port] == HOST_MEMORY) {
return true;
}
}
} else {
return true;
}
}
return false;
}
std::vector<int> GetDimensionIndicesFromLabel(
const absl::flat_hash_map<char, int>& dim_indices,
absl::Span<const char> labels) {
std::vector<int> indices;
indices.reserve(labels.size());
for (const auto& label : labels) {
indices.push_back(dim_indices.at(label));
}
return indices;
}
class ScopedDataFormatUpgrader {
public:
ScopedDataFormatUpgrader(TransposeContext* context, int rank)
: context_(context) {
if (rank == 5 && IsSupportedDataFormat(context_->src_format) &&
IsSupportedDataFormat(context_->dst_format)) {
old_src_format_ = context_->src_format;
old_dst_format_ = context_->dst_format;
std::string new_src_format = GetUpgradedDataFormat(context_->src_format);
std::string new_dst_format = GetUpgradedDataFormat(context_->dst_format);
context_->AssignDeviceAndDataFormats(context_->target_device,
new_src_format, new_dst_format);
upgraded_ = true;
}
}
ScopedDataFormatUpgrader(const ScopedDataFormatUpgrader&) = delete;
ScopedDataFormatUpgrader& operator=(const ScopedDataFormatUpgrader&) = delete;
~ScopedDataFormatUpgrader() {
if (upgraded_) {
context_->AssignDeviceAndDataFormats(context_->target_device,
old_src_format_, old_dst_format_);
}
}
private:
bool IsSupportedDataFormat(absl::string_view data_format) {
return data_format == "NHWC" || data_format == "NCHW";
}
std::string GetUpgradedDataFormat(absl::string_view data_format) {
if (data_format == "NHWC") {
return "NDHWC";
}
DCHECK_EQ(data_format, "NCHW");
return "NCDHW";
}
TransposeContext* context_ = nullptr;
bool upgraded_ = false;
std::string old_src_format_;
std::string old_dst_format_;
};
}
Status TransposeContext::InitializeTransposeContext(bool assume_valid_feeds,
const GrapplerItem& item,
const Cluster* cluster,
TransposeContext* context) {
DCHECK(context != nullptr);
context->graph_properties = std::make_unique<GraphProperties>(item);
TF_RETURN_IF_ERROR(
context->graph_properties->InferStatically(assume_valid_feeds));
TF_RETURN_IF_ERROR(
context->graph_properties->AnnotateOutputShapes(&context->graph));
Status status;
context->graph_view =
std::make_unique<utils::MutableGraphView>(&context->graph, &status);
TF_RETURN_IF_ERROR(status);
context->num_nodes = context->graph.node_size();
const auto& nodes_to_preserve = item.NodesToPreserve();
context->nodes_to_preserve = absl::flat_hash_set<string>(
nodes_to_preserve.begin(), nodes_to_preserve.end());
TF_RETURN_IF_ERROR(context->frames.InferFromGraph(context->graph));
return absl::OkStatus();
}
void TransposeContext::AssignDeviceAndDataFormats(
absl::string_view target_device, absl::string_view src_format,
absl::string_view dst_format) {
this->target_device = string(target_device);
this->src_format = string(src_format);
this->dst_format = string(dst_format);
this->src_dim_indices = GetDimensionIndices(src_format);
this->dst_dim_indices = GetDimensionIndices(dst_format);
this->src_to_dst = GetPermutation(this->src_dim_indices, dst_format);
this->dst_to_src = GetPermutation(this->dst_dim_indices, src_format);
}
bool Transposer::ShouldProcess(const TransposeContext& context,
const utils::MutableNodeView& node) const {
const auto* node_def = node.node();
const string& device_name = GetDeviceName(*node_def);
string device;
string task;
const bool is_on_target_device =
DeviceNameUtils::SplitDeviceName(device_name, &task, &device) &&
absl::StrContains(absl::AsciiStrToLower(device),
absl::AsciiStrToLower(context.target_device));
const bool data_format_match = !IsLayoutSensitiveOp(*node_def) ||
AttrDataFormatMatch(node, context.src_format);
const bool is_integer_conv2d = IsNonFloatingConv2D(node);
const bool is_integer_conv3d = IsNonFloatingConv3D(node);
return is_on_target_device && data_format_match && !is_integer_conv2d &&
!is_integer_conv3d &&
!context.nodes_to_preserve.contains(node_def->name()) &&
!(node.NumRegularFanouts() == 0 && node.NumControlledFanouts() == 0);
}
Status Transposer::CreateConstPermNode(TransposeContext* context,
absl::string_view node_name,
absl::string_view device,
absl::Span<const int> permutation,
absl::string_view control_node_name,
utils::MutationNewNode* added_node) {
auto* graph_view = context->graph_view.get();
DCHECK(!graph_view->HasNode(node_name));
NodeDef node;
node.set_name(string(node_name));
node.set_op(kOpConst);
node.set_device(string(device));
if (!control_node_name.empty()) {
node.add_input(string(control_node_name));
}
AttrValue attr_data_type;
attr_data_type.set_type(DT_INT32);
node.mutable_attr()->insert({"dtype", attr_data_type});
AttrValue attr_tensor;
Tensor tensor(DT_INT32, TensorShape({(long long)permutation.size()}));
for (int i = 0, end = permutation.size(); i < end; i++) {
tensor.flat<int>()(i) = permutation[i];
}
tensor.AsProtoTensorContent(attr_tensor.mutable_tensor());
node.mutable_attr()->insert({"value", attr_tensor});
Status status;
*added_node =
graph_view->GetMutationBuilder()->AddNode(std::move(node), &status);
return status;
}
Status Transposer::CreateTransposeNode(
TransposeContext* context, absl::string_view name_format,
const DataType& data_type, absl::string_view device,
TensorShapeProto fanin_shape, absl::Span<const int> permutation,
absl::string_view control_node_name, utils::MutationNewNode* added_node,
string* transpose_node_name) {
const string node_name = absl::Substitute(name_format, kOpTranspose);
auto* graph_view = context->graph_view.get();
DCHECK(!graph_view->HasNode(node_name));
*transpose_node_name = node_name;
NodeDef node;
node.set_name(node_name);
node.set_op(kOpTranspose);
node.set_device(string(device));
AttrValue attr_data_type;
attr_data_type.set_type(data_type);
node.mutable_attr()->insert({"T", attr_data_type});
AttrValue attr_data_type_perm;
attr_data_type_perm.set_type(DT_INT32);
node.mutable_attr()->insert({"Tperm", attr_data_type_perm});
if (!fanin_shape.unknown_rank()) {
TF_RETURN_IF_ERROR(
PermuteSingle(absl::StrCat("fanin shape in", node.name()), permutation,
fanin_shape.mutable_dim()));
AttrValue attr_output_shape;
*attr_output_shape.mutable_list()->add_shape() = fanin_shape;
node.mutable_attr()->insert({kAttrOutputShape, attr_output_shape});
}
utils::MutationNewNode const_perm_added_node;
const string const_perm_node_name =
absl::Substitute(name_format, "PermConst");
TF_RETURN_IF_ERROR(CreateConstPermNode(context, const_perm_node_name, device,
permutation, control_node_name,
&const_perm_added_node));
node.add_input("");
node.add_input(const_perm_node_name);
Status status;
*added_node =
graph_view->GetMutationBuilder()->AddNode(std::move(node), &status);
return status;
}
Status Transposer::UpdateFaninEdgesWithOp(TransposeContext* context,
absl::Span<const int> dst_ports,
utils::MutableNodeView* dst_node,
absl::string_view op) {
const bool is_in_frame = context->frames.IsInFrame(*dst_node->node());
for (int dst_port : dst_ports) {
auto& fanin_port = dst_node->GetRegularFanin(dst_port);
auto* fanin_node_view = fanin_port.node_view();
TF_RETURN_IF_ERROR(
UpdateEdge(context,
GetFaninNameFormat(dst_node->GetName(), dst_port,
context->src_format, context->dst_format),
op, nullptr, is_in_frame,
true, fanin_port.index(),
dst_port, fanin_node_view, dst_node));
}
return absl::OkStatus();
}
Status Transposer::UpdateFanoutEdgesWithOp(TransposeContext* context,
absl::Span<const int> src_ports,
utils::MutableNodeView* src_node,
absl::string_view op) {
const auto* output_shape_attr = src_node->GetAttr(kAttrOutputShape);
AttrValue shape_attr_copy;
if (op == kOpTranspose && output_shape_attr != nullptr) {
shape_attr_copy = *output_shape_attr;
for (int port : src_ports) {
auto* shape = shape_attr_copy.mutable_list()->mutable_shape(port);
if (shape->unknown_rank()) continue;
TF_RETURN_IF_ERROR(
PermuteSingle(absl::StrCat("output shape attribute at port ", port,
" in", src_node->GetName()),
context->src_to_dst, shape->mutable_dim()));
}
context->graph_view->GetMutationBuilder()->AddOrUpdateNodeAttr(
src_node, kAttrOutputShape, shape_attr_copy);
}
const bool is_in_frame = context->frames.IsInFrame(*src_node->node());
for (int src_port : src_ports) {
const auto& fanouts_src_port = src_node->GetRegularFanout(src_port);
std::vector<utils::MutableFaninView> sorted_fanouts(
fanouts_src_port.begin(), fanouts_src_port.end());
std::sort(sorted_fanouts.begin(), sorted_fanouts.end(),
ComparatorByNodeNameAndIndex());
int num_downstream_transposers = 0;
for (const auto& fanout : sorted_fanouts) {
TF_RETURN_IF_ERROR(UpdateEdge(
context,
GetFanoutNameFormat(src_node->GetName(), src_port,
num_downstream_transposers++, context->src_format,
context->dst_format),
op, &shape_attr_copy, is_in_frame,
false, src_port, fanout.index(),
src_node, fanout.node_view()));
}
}
return absl::OkStatus();
}
Status Transposer::CreateDataFormatNode(
TransposeContext* context, absl::string_view node_name,
absl::string_view op, absl::string_view device, const DataType& data_type,
bool is_fanin_on_host, bool is_src_format_to_dst_format,
utils::MutationNewNode* added_node) {
auto* graph_view = context->graph_view.get();
DCHECK(!graph_view->HasNode(node_name));
NodeDef node;
node.set_name(string(node_name));
node.set_op(string(op));
node.set_device(string(device));
AttrValue attr_data_type;
attr_data_type.set_type(data_type);
node.mutable_attr()->insert({"T", attr_data_type});
if (is_fanin_on_host) {
AttrValue attr_kernel;
attr_kernel.set_s("host");
node.mutable_attr()->insert({"_kernel", attr_kernel});
}
AttrValue src_format;
src_format.set_s(is_src_format_to_dst_format ? context->src_format
: context->dst_format);
node.mutable_attr()->insert({kAttrSrcFormat, src_format});
AttrValue dst_format;
dst_format.set_s(is_src_format_to_dst_format ? context->dst_format
: context->src_format);
node.mutable_attr()->insert({kAttrDstFormat, dst_format});
node.add_input("");
Status status;
*added_node =
graph_view->GetMutationBuilder()->AddNode(std::move(node), &status);
return status;
}
Status Transposer::UpdateEdge(
TransposeContext* context, absl::string_view name_format,
absl::string_view op, const AttrValue* input_shape, bool is_in_frame,
bool is_src_format_to_dst_format, const int src_port, const int dst_port,
utils::MutableNodeView* src_node, utils::MutableNodeView* dst_node) {
DCHECK(src_node != nullptr);
DCHECK(dst_node != nullptr);
auto* src_node_def = src_node->node();
auto* dst_node_def = dst_node->node();
const string device = GetDeviceName(
is_src_format_to_dst_format ? *dst_node_def : *src_node_def);
DataType data_type =
is_src_format_to_dst_format
? context->graph_properties
->GetInputProperties(dst_node->GetName())[dst_port]
.dtype()
: context->graph_properties
->GetOutputProperties(src_node->GetName())[src_port]
.dtype();
utils::MutationNewNode added_node;
string added_node_name;
if (op == kOpTranspose) {
TensorShapeProto input_shape_proto;
input_shape_proto.set_unknown_rank(true);
if (input_shape != nullptr) {
input_shape_proto = input_shape->list().shape(src_port);
} else {
const auto* src_node_shape_attr = src_node->GetAttr(kAttrOutputShape);
if (src_node_shape_attr != nullptr) {
input_shape_proto = src_node_shape_attr->list().shape(src_port);
}
}
const string control_node_name =
is_in_frame ? AsControlDependency(src_node_def->name()) : "";
const std::vector<int>& permutation =
is_src_format_to_dst_format ? context->src_to_dst : context->dst_to_src;
TF_RETURN_IF_ERROR(CreateTransposeNode(
context, name_format, data_type, device, input_shape_proto, permutation,
control_node_name, &added_node, &added_node_name));
} else if (op == kOpDataFormatVecPermute || op == kOpDataFormatDimMap) {
DeviceNameUtils::ParsedName parsed_name;
bool is_fanin_on_host = DeviceNameUtils::ParseFullName(
GetDeviceName(*src_node_def), &parsed_name) &&
parsed_name.type != "CPU" &&
IsHostMemory(*src_node_def, src_port);
const string node_name = absl::Substitute(name_format, op);
TF_RETURN_IF_ERROR(CreateDataFormatNode(
context, node_name, op, device, data_type, is_fanin_on_host,
is_src_format_to_dst_format, &added_node));
added_node_name = node_name;
} else {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("Unsupported op \"", op,
"\". Supported ops are Transpose, "
"DataFormatVecPerm, DataFormatDimMap."));
}
utils::Mutation* mutation = context->graph_view->GetMutationBuilder();
mutation->AddOrUpdateRegularFanin(added_node, 0,
{src_node->GetName(), src_port});
mutation->AddOrUpdateRegularFanin(dst_node, dst_port, {added_node_name, 0});
return absl::OkStatus();
}
int Transposer::GetFanoutPortRank(const utils::MutableNodeView& node,
int port) const {
const auto* output_shape_attr = node.GetAttr(kAttrOutputShape);
if (output_shape_attr == nullptr ||
output_shape_attr->list().shape_size() <= port) {
return kInvalidRank;
}
const auto& shape = output_shape_attr->list().shape(port);
if (shape.unknown_rank()) {
return kUnknownRank;
}
return shape.dim_size();
}
bool Transposer::IsFanoutPortRankN(const utils::MutableNodeView& node, int port,
int n) const {
return GetFanoutPortRank(node, port) == n;
}
bool Transposer::IsFanoutPortsRankN(const utils::MutableNodeView& node,
absl::Span<const int> ports, int n) const {
for (const auto& port : ports) {
if (!IsFanoutPortRankN(node, port, n)) {
return false;
}
}
return true;
}
int Transposer::GetFaninPortRank(const utils::MutableNodeView& node,
int port) const {
if (port < node.NumRegularFanins() && port >= 0) {
const auto& regular_fanin = node.GetRegularFanin(port);
return GetFanoutPortRank(*regular_fanin.node_view(), regular_fanin.index());
}
return kInvalidRank;
}
bool Transposer::IsFaninPortRankN(const utils::MutableNodeView& node, int port,
int n) const {
return GetFaninPortRank(node, port) == n;
}
bool Transposer::IsFaninPortDimsNIfConst(const utils::MutableNodeView& node,
int port,
absl::Span<const int> dims) const {
if (port < node.NumRegularFanins() && port >= 0) {
const auto& regular_fanin = node.GetRegularFanin(port);
const auto* fanin_node_view = regular_fanin.node_view();
if (!IsConstant(*fanin_node_view->node())) {
return true;
}
const auto* value_attr = fanin_node_view->GetAttr(kAttrValue);
if (value_attr == nullptr) {
return false;
}
Tensor tensor;
if (!tensor.FromProto(value_attr->tensor())) {
return false;
}
const int dims_size = dims.size();
if (tensor.dims() != dims_size) {
return false;
}
for (int i = 0; i < dims_size; ++i) {
if (tensor.dim_size(i) != dims[i]) {
return false;
}
}
return true;
}
return false;
}
bool Transposer::IsFaninPortsDimsNIfConst(const utils::MutableNodeView& node,
absl::Span<const int> ports,
absl::Span<const int> dims) const {
for (const auto& port : ports) {
if (!IsFaninPortDimsNIfConst(node, port, dims)) {
return false;
}
}
return true;
}
bool Transposer::CanProcessNode(const TransposeContext& context,
const utils::MutableNodeView& node) const {
return !context.nodes_to_preserve.contains(node.GetName()) &&
!(node.NumRegularFanouts() == 0 && node.NumControlledFanouts() == 0);
}
string Transposer::GetFaninNameFormat(absl::string_view node_name, int port,
absl::string_view src_format,
absl::string_view dst_format) {
return absl::StrCat(node_name, "-", port, "-$0", src_format, "To", dst_format,
"-", kOptimizedSuffix);
}
string Transposer::GetFanoutNameFormat(absl::string_view node_name, int port,
int index, absl::string_view src_format,
absl::string_view dst_format) {
return absl::StrCat(node_name, "-", port, "-", index, "-$0", dst_format, "To",
src_format, "-", kOptimizedSuffix);
}
string Transposer::LayoutOptimizerNode(absl::string_view node_name) {
return absl::StrCat(node_name, "-", kOptimizedSuffix);
}
string Transposer::GetReshapeNodeNameFormat(absl::string_view node_name,
int index,
absl::string_view src_format,
absl::string_view dst_format) {
return absl::StrCat(node_name, "-", index, "-", kReshape, src_format, "To",
dst_format);
}
string Transposer::GetShapeConstNodeNameFormat(absl::string_view node_name,
int index) {
return absl::StrCat(node_name, "-", index, "-", kReshapeConst);
}
inline string GetLayoutSensitiveNodeDataFormat(
const utils::MutableNodeView& node) {
const auto* attr = node.GetAttr(kAttrDataFormat);
if (attr != nullptr) {
return attr->s();
}
return "";
}
Status LayoutSensitiveOpTransposer::UpdateNode(TransposeContext* context,
utils::MutableNodeView* node) {
utils::Mutation* mutation = context->graph_view->GetMutationBuilder();
AttrValue data_format_attr;
data_format_attr.set_s(context->dst_format);
mutation->AddOrUpdateNodeAttr(node, kAttrDataFormat, data_format_attr);
auto permute_attr = [&context, &node,
&mutation](absl::string_view attr_name) {
const auto* attr = node->GetAttr(attr_name);
if (attr != nullptr) {
AttrValue attr_copy(*attr);
TF_RETURN_IF_ERROR(PermuteSingle(
absl::StrCat(attr_name, " attribute in", node->GetName()),
context->src_to_dst, attr_copy.mutable_list()->mutable_i()));
mutation->AddOrUpdateNodeAttr(node, attr_name, attr_copy);
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(permute_attr(kAttrStrides));
TF_RETURN_IF_ERROR(permute_attr(kAttrKSize));
TF_RETURN_IF_ERROR(permute_attr(kAttrDilations));
const auto* explicit_paddings_attr = node->GetAttr(kAttrExplicitPaddings);
if (explicit_paddings_attr != nullptr && explicit_paddings_attr->has_list() &&
explicit_paddings_attr->list().i_size() > 0) {
AttrValue explicit_paddings_attr_copy(*explicit_paddings_attr);
TF_RETURN_IF_ERROR(PermuteDouble(
absl::StrCat("explicit_paddings attribute in", node->GetName()),
context->src_to_dst,
explicit_paddings_attr_copy.mutable_list()->mutable_i()));
mutation->AddOrUpdateNodeAttr(node, kAttrExplicitPaddings,
explicit_paddings_attr_copy);
}
return absl::OkStatus();
}
Status DefaultLayoutSensitiveOpTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsDefaultLayoutSensitiveOp(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status AvgPoolGradTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsAvgPoolGrad(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFaninPortRankN(*node, 1, 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {1}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status BiasAddTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsBiasAddV2(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, rank)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status BiasAddGradTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsBiasAddGrad(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status Conv2DBackpropFilterTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsConv2DBackpropFilter(*node->node()) ||
IsDepthwiseConv2dNativeBackpropFilter(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 2}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status Conv2DBackpropInputTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsConv2DBackpropInput(*node->node()) ||
IsDepthwiseConv2dNativeBackpropInput(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4)) {
return absl::OkStatus();
}
const auto& fanin = node->GetRegularFanin(0);
auto* fanin_node = fanin.node_view();
const auto* output_shape_attr = fanin_node->GetAttr(kAttrOutputShape);
if (output_shape_attr == nullptr) {
VLOG(3) << "Cannot compute the shape of " << fanin_node->GetName()
<< " because it is missing attribute " << kAttrOutputShape;
return absl::OkStatus();
}
TensorShapeProto fanin_shape = output_shape_attr->list().shape(fanin.index());
if (fanin_shape.dim_size() != 1) {
VLOG(3) << fanin_node->GetName() << " is not a vector.";
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {2}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status Conv3DTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsConv3D(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status Conv3DBackpropFilterTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsConv3DBackpropFilterV2(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 2}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status Conv3DBackpropInputTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsConv3DBackpropInputV2(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {2}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status FusedBatchNormExTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsFusedBatchNormEx(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
if (node->NumRegularFanins() == 6) {
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 5}, node, kOpTranspose));
} else {
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
}
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool FusedBatchNormGradTransposer::IsTraining(
const utils::MutableNodeView& node) const {
const auto* is_training_attr = node.GetAttr(kAttrIsTraining);
if (is_training_attr != nullptr) {
return is_training_attr->b();
}
return false;
}
Status FusedBatchNormGradTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsFusedBatchNormGrad(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) || !IsTraining(*node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 1}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status MaxPoolV2Transposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMaxPoolV2(*node->node()));
const auto& data_fanin = node->GetRegularFanin(0);
auto* data_fanin_node = data_fanin.node_view();
if (!ShouldProcess(*context, *node) ||
!IsFanoutPortRankN(*data_fanin_node, data_fanin.index(), 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1, 2}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status MaxPool3DTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMaxPool3D(*node->node()));
const auto& data_fanin = node->GetRegularFanin(0);
auto* data_fanin_node = data_fanin.node_view();
if (!ShouldProcess(*context, *node) ||
!IsFanoutPortRankN(*data_fanin_node, data_fanin.index(), 5)) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, 5);
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status MaxPoolGradTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMaxPoolGrad(*node->node()) || IsMaxPoolGradGradV1(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 1, 2}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status MaxPoolGradV2Transposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMaxPoolGradV2(*node->node()) || IsMaxPoolGradGradV2(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateNode(context, node));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 1, 2}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {3, 4}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
inline bool IsValidConstPermTransposeNode(const utils::MutableNodeView& node,
absl::Span<const int> permutation) {
Tensor tensor;
if (!GetValueAttrFromConstInputNode(node, IsTranspose, 1, &tensor)) {
return false;
}
const int permutation_size = permutation.size();
if (tensor.NumElements() != permutation_size) {
return false;
}
const auto& tensor_data = tensor.unaligned_flat<int32>();
for (int i = 0; i < permutation_size; i++) {
if (permutation[i] != tensor_data(i)) {
return false;
}
}
return true;
}
inline bool IsValidDataFormatNode(const utils::MutableNodeView& node,
absl::string_view src_format,
absl::string_view dst_format) {
if (!IsDataFormatOp(node)) {
return false;
}
const auto* src_format_attr = node.GetAttr(kAttrSrcFormat);
if (src_format_attr == nullptr || src_format_attr->s() != src_format) {
return false;
}
const auto* dst_format_attr = node.GetAttr(kAttrDstFormat);
if (dst_format_attr == nullptr || dst_format_attr->s() != dst_format) {
return false;
}
return true;
}
inline bool IsLayoutOptimizerAddedDstToSrcTranspose(
const TransposeContext& context, const utils::MutableNodeView& node) {
return node.node_index() >= context.num_nodes &&
IsValidConstPermTransposeNode(node, context.dst_to_src);
}
inline bool IsLayoutOptimizerAddedDstToSrcTransform(
const TransposeContext& context, const utils::MutableNodeView& node) {
return node.node_index() >= context.num_nodes &&
(IsValidConstPermTransposeNode(node, context.dst_to_src) ||
IsValidDataFormatNode(node, context.dst_format, context.src_format));
}
bool LayoutAgnosticOpTransposer::IsAfterDstToSrcTransform(
const TransposeContext& context, const utils::MutableNodeView& node) const {
std::deque<utils::MutableNodeView*> queue;
absl::flat_hash_set<utils::MutableNodeView*> visited_nodes;
auto data_node_pos = GetDataFaninPorts(node);
for (const int pos : data_node_pos) {
const auto& fanin = node.GetRegularFanin(pos);
auto* fanin_node = fanin.node_view();
queue.push_back(fanin_node);
visited_nodes.insert(fanin_node);
}
while (!queue.empty()) {
utils::MutableNodeView* current_node = queue.front();
queue.pop_front();
if (IsLayoutOptimizerAddedDstToSrcTransform(context, *current_node)) {
return true;
}
if (IsLayoutAgnosticOp(*current_node->node())) {
auto current_node_pos = GetDataFaninPorts(*current_node);
for (const auto& pos : current_node_pos) {
const auto& fanin = current_node->GetRegularFanin(pos);
auto* fanin_node = fanin.node_view();
if (visited_nodes.insert(fanin_node).second) {
queue.push_back(fanin_node);
}
}
}
}
return false;
}
std::vector<int> LayoutAgnosticOpTransposer::GetVariadicNDFaninPorts(
const TransposeContext& context, const utils::MutableNodeView& node,
int rank) const {
std::vector<int> ports;
const int num_regular_fanins = node.NumRegularFanins();
ports.reserve(num_regular_fanins);
for (int i = 0; i < num_regular_fanins; ++i) {
const auto& regular_fanin = node.GetRegularFanin(i);
auto* regular_fanin_node = regular_fanin.node_view();
int regular_fanin_port = regular_fanin.index();
if ((IsFanoutPortRankN(*regular_fanin_node, regular_fanin_port, rank)) &&
((IsAfterDstToSrcTransform(context, *regular_fanin_node) &&
IsLayoutAgnosticOp(*regular_fanin_node->node())) ||
IsLayoutOptimizerAddedDstToSrcTranspose(context,
*regular_fanin_node))) {
ports.push_back(i);
}
}
return ports;
}
Status DefaultLayoutAgnosticOpTransposer::TransposeNode(
TransposeContext* context, utils::MutableNodeView* node) {
DCHECK(IsDefaultLayoutAgnosticOp(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status AddNTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsAddN(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, GetDataFaninPorts(*node),
node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool BinaryOpTransposer::IsNDOperateWithMD(const utils::MutableNodeView& node,
int n, int m) {
return IsFaninPortRankN(node, 0, n) && IsFaninPortRankN(node, 1, m);
}
bool BinaryOpTransposer::IsFaninShapeSupported(
const utils::MutableNodeView& node, int rank) {
return (IsNDOperateWithMD(node, rank, 0) ||
IsNDOperateWithMD(node, rank, 1) ||
IsNDOperateWithMD(node, rank, rank) ||
IsNDOperateWithMD(node, 0, rank) || IsNDOperateWithMD(node, 1, rank));
}
std::vector<int> BinaryOpTransposer::GetNDDataFaninPorts(
const utils::MutableNodeView& node, int rank) {
std::vector<int> values;
if (IsFaninPortRankN(node, 0, rank)) {
values.push_back(0);
}
if (IsFaninPortRankN(node, 1, rank)) {
values.push_back(1);
}
return values;
}
Status BinaryOpTransposer::AddNodeReshape(
utils::Mutation* mutation, absl::string_view node_name,
absl::string_view node_device, absl::string_view input_name,
absl::string_view shape_const_node_name, const DataType& data_type) {
NodeDef new_node;
new_node.set_name(string(node_name));
new_node.add_input(string(input_name));
new_node.add_input(string(shape_const_node_name));
new_node.set_op(kReshape);
new_node.set_device(string(node_device));
AttrValue attr_type_indices;
attr_type_indices.set_type(DT_INT32);
new_node.mutable_attr()->insert({"Tshape", attr_type_indices});
AttrValue attr_type_params;
attr_type_params.set_type(data_type);
new_node.mutable_attr()->insert({"T", attr_type_params});
Status status;
mutation->AddNode(std::move(new_node), &status);
return status;
}
Status BinaryOpTransposer::AddNodeShapeConst(
utils::Mutation* mutation, absl::string_view node_name,
absl::string_view node_device, bool node_in_frame, int num_channels,
absl::string_view depended_node, int rank) {
NodeDef new_node;
new_node.set_name(string(node_name));
new_node.set_op(kOpConst);
new_node.set_device(string(node_device));
AttrValue attr_data_type;
attr_data_type.set_type(DT_INT32);
new_node.mutable_attr()->insert({"dtype", attr_data_type});
AttrValue attr_tensor;
Tensor tensor(DT_INT32, TensorShape({rank}));
std::vector<int> shape(rank, 1);
shape[1] = num_channels;
for (int i = 0; i < static_cast<int>(shape.size()); i++) {
tensor.flat<int>()(i) = shape[i];
}
tensor.AsProtoTensorContent(attr_tensor.mutable_tensor());
new_node.mutable_attr()->insert({"value", attr_tensor});
if (node_in_frame) {
new_node.add_input(AsControlDependency(string(depended_node)));
}
Status status;
mutation->AddNode(std::move(new_node), &status);
return status;
}
Status BinaryOpTransposer::MaybeReshapeVectorFanin(TransposeContext* context,
utils::MutableNodeView* node,
int rank) {
int vector_index = -1;
if (IsNDOperateWithMD(*node, rank, 1)) {
vector_index = 1;
} else if (IsNDOperateWithMD(*node, 1, rank)) {
vector_index = 0;
}
if (vector_index != -1) {
const string& node_name = node->GetName();
const string& node_device = node->GetDevice();
string reshape_node_name = LayoutOptimizerNode(GetReshapeNodeNameFormat(
node_name, vector_index, context->src_format, context->dst_format));
string shape_const_node_name = LayoutOptimizerNode(
GetShapeConstNodeNameFormat(node_name, vector_index));
const auto& fanin = node->GetRegularFanin(vector_index);
auto* fanin_node = fanin.node_view();
const auto* output_shape_attr = fanin_node->GetAttr(kAttrOutputShape);
if (output_shape_attr == nullptr) {
return errors::InvalidArgument("Missing attribute ", kAttrOutputShape);
}
int vector_size =
output_shape_attr->list().shape(fanin.index()).dim(0).size();
utils::Mutation* mutation = context->graph_view->GetMutationBuilder();
TF_RETURN_IF_ERROR(
AddNodeShapeConst(mutation, shape_const_node_name, node_device,
context->frames.IsInFrame(*node->node()), vector_size,
fanin_node->GetName(), rank));
const auto* t_attr = node->GetAttr(kAttrT);
if (t_attr == nullptr) {
return errors::InvalidArgument("Missing attribute ", kAttrT);
}
TF_RETURN_IF_ERROR(
AddNodeReshape(mutation, reshape_node_name, node_device,
TensorIdToString({fanin_node->GetName(), fanin.index()}),
shape_const_node_name, t_attr->type()));
mutation->AddOrUpdateRegularFanin(node, vector_index,
{reshape_node_name, 0});
}
return absl::OkStatus();
}
Status BinaryOpTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsBinaryOp(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) || !IsFaninShapeSupported(*node, rank) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(
context, GetNDDataFaninPorts(*node, rank), node, kOpTranspose));
TF_RETURN_IF_ERROR(MaybeReshapeVectorFanin(context, node, rank));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status ConcatOpTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsConcat(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(
context, GetConcatDataFaninPorts(*node), node, kOpTranspose));
int axis_node = 0;
if (node->GetOp() == "ConcatV2") {
const auto* n_attr = node->GetAttr(kAttrN);
if (n_attr != nullptr) {
axis_node = n_attr->i();
}
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {axis_node}, node, kOpDataFormatDimMap));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status FillOpTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsFill(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4) ||
!IsFaninPortDimsNIfConst(*node, 0, {4}) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status IdentityNTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsIdentityN(*node->node()));
const auto ports_4d = GetVariadicNDFaninPorts(*context, *node, 4);
std::vector<int> ports_5d;
{
ScopedDataFormatUpgrader data_format_upgrader(context, 5);
ports_5d = GetVariadicNDFaninPorts(*context, *node, 5);
}
if (!ShouldProcess(*context, *node)) {
return absl::OkStatus();
}
if (!ports_4d.empty()) {
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, ports_4d, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, ports_4d, node, kOpTranspose));
}
if (!ports_5d.empty()) {
ScopedDataFormatUpgrader data_format_upgrader(context, 5);
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, ports_5d, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, ports_5d, node, kOpTranspose));
}
return context->graph_view->GetMutationBuilder()->Apply();
}
bool MergeTransposer::IsEveryFaninAfterDstToSrcTransform(
const TransposeContext& context, const utils::MutableNodeView& node) const {
for (const auto& regular_fanin : node.GetRegularFanins()) {
auto* regular_fanin_node = regular_fanin.node_view();
if ((IsFanoutPortRankN(*regular_fanin_node, regular_fanin.index(), 4) ||
IsFanoutPortRankN(*regular_fanin_node, regular_fanin.index(), 5)) &&
((IsAfterDstToSrcTransform(context, *regular_fanin_node) &&
IsLayoutAgnosticOp(*regular_fanin_node->node())) ||
IsLayoutOptimizerAddedDstToSrcTranspose(context,
*regular_fanin_node))) {
continue;
}
return false;
}
return true;
}
Status MergeTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMerge(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsEveryFaninAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, GetDataFaninPorts(*node),
node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status PadTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsMirrorPad(*node->node()) || IsMirrorPadGrad(*node->node()) ||
IsPad(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4) ||
!IsFaninPortDimsNIfConst(*node, 1, {4, 2}) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool ReduceTransposer::KeepDims(const utils::MutableNodeView& node) {
const auto* keep_dims_attr = node.GetAttr(kAttrKeepDims);
if (keep_dims_attr != nullptr) {
return keep_dims_attr->b();
}
return false;
}
bool ReduceTransposer::IsAlongAxis(const Tensor& tensor,
absl::Span<const int> axis, int rank) {
const int axis_size = axis.size();
if (tensor.dims() != 1 || tensor.dim_size(0) != axis_size) {
return false;
}
for (int i = 0; i < axis_size; ++i) {
int local_axis = 0;
if (tensor.dtype() == DT_INT32) {
local_axis = tensor.flat<int32>()(i);
} else {
local_axis = tensor.flat<int64_t>()(i);
}
if (local_axis < 0) {
local_axis += rank;
}
bool along_axis = false;
for (int dim : axis) {
if (local_axis == dim) {
along_axis = true;
break;
}
}
if (!along_axis) {
return false;
}
}
return true;
}
bool ReduceTransposer::IsReduceAxisSupported(const TransposeContext& context,
const utils::MutableNodeView& node,
int rank) {
if (KeepDims(node)) {
return true;
}
const auto& regular_fanin_1 = node.GetRegularFanin(1);
auto* axis_node = regular_fanin_1.node_view();
if (!IsConstant(*axis_node->node())) {
return false;
}
const auto* value_attr = axis_node->GetAttr(kAttrValue);
if (value_attr == nullptr) {
return false;
}
Tensor tensor;
if (!tensor.FromProto(value_attr->tensor())) {
LOG(ERROR) << "Failed to parse TensorProto.";
return false;
}
auto indices = [&context](absl::Span<const char> labels) {
return GetDimensionIndicesFromLabel(context.src_dim_indices, labels);
};
if (rank == 5) {
return IsAlongAxis(tensor, indices({'N', 'D', 'H', 'W', 'C'}), 5) ||
IsAlongAxis(tensor, indices({'D', 'H', 'W', 'C'}), 5) ||
IsAlongAxis(tensor, indices({'N', 'D', 'H', 'W'}), 5) ||
IsAlongAxis(tensor, indices({'D', 'H', 'W'}), 5) ||
IsAlongAxis(tensor, indices({'C'}), 5);
}
DCHECK_EQ(rank, 4);
return IsAlongAxis(tensor, indices({'N', 'H', 'W', 'C'}), 4) ||
IsAlongAxis(tensor, indices({'H', 'W', 'C'}), 4) ||
IsAlongAxis(tensor, indices({'N', 'H', 'W'}), 4) ||
IsAlongAxis(tensor, indices({'H', 'W'}), 4) ||
IsAlongAxis(tensor, indices({'C'}), 4);
}
Status ReduceTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsReduceOp(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsReduceAxisSupported(*context, *node, rank) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1}, node, kOpDataFormatDimMap));
if (KeepDims(*node)) {
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
}
return context->graph_view->GetMutationBuilder()->Apply();
}
Status ReverseV2Transposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsReverseV2(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1}, node, kOpDataFormatDimMap));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool SelectTransposer::IsFaninScalarVector4D(
const utils::MutableNodeView& fanin, int port) {
return IsFanoutPortRankN(fanin, port, 0) ||
IsFanoutPortRankN(fanin, port, 1) || IsFanoutPortRankN(fanin, port, 4);
}
std::vector<int> SelectTransposer::GetFaninPorts(
const utils::MutableNodeView& fanin, int port) {
if (IsFanoutPortRankN(fanin, port, 4)) {
return {0, 1, 2};
}
return {1, 2};
}
Status SelectTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSelect(*node->node()));
const auto& regular_fanin_0 = node->GetRegularFanin(0);
auto* regular_fanin_0_node = regular_fanin_0.node_view();
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4) ||
!IsFaninScalarVector4D(*regular_fanin_0_node, regular_fanin_0.index()) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(
context, GetFaninPorts(*regular_fanin_0_node, regular_fanin_0.index()),
node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status ShapeTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsShape(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, {0}, node, kOpDataFormatVecPermute));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status ShapeNTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsShapeN(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
const auto ports = GetVariadicNDFaninPorts(*context, *node, rank);
if (!ShouldProcess(*context, *node) || ports.empty()) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, ports, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, ports, node, kOpDataFormatVecPermute));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status SliceTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSlice(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsFaninPortsDimsNIfConst(*node, {1, 2}, {rank}) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1, 2}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status SplitTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSplit(*node->node()));
const auto ports = GetDataFanoutPorts(*node);
int rank = 4;
if (!IsFanoutPortsRankN(*node, ports, 4)) {
if (!IsFanoutPortsRankN(*node, ports, 5)) {
return absl::OkStatus();
} else {
rank = 5;
}
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {1}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0}, node, kOpDataFormatDimMap));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, ports, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status SplitVTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSplitV(*node->node()));
const auto ports = GetDataFanoutPorts(*node);
int rank = 4;
if (!IsFanoutPortsRankN(*node, ports, 4)) {
if (!IsFanoutPortsRankN(*node, ports, 5)) {
return absl::OkStatus();
} else {
rank = 5;
}
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {2}, node, kOpDataFormatDimMap));
TF_RETURN_IF_ERROR(
UpdateFanoutEdgesWithOp(context, ports, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool SqueezeTransposer::IsInputConvertible(
const TransposeContext& context, const utils::MutableNodeView& node) const {
const auto& regular_fanin_0 = node.GetRegularFanin(0);
auto* regular_fanin_0_node = regular_fanin_0.node_view();
const auto* output_shape_attr =
regular_fanin_0_node->GetAttr(kAttrOutputShape);
if (output_shape_attr != nullptr) {
auto& shape = output_shape_attr->list().shape(regular_fanin_0.index());
if (shape.dim_size() != kRank) {
return false;
}
const int height_dim = context.src_dim_indices.at('H');
const int width_dim = context.src_dim_indices.at('W');
if (shape.dim(height_dim).size() == 1 && shape.dim(width_dim).size() == 1) {
return true;
}
}
return false;
}
bool SqueezeTransposer::IsAlongAxis(const AttrValue& attr,
absl::Span<const int> axis,
int rank) const {
const auto& list = attr.list();
int axis_size = axis.size();
if (list.i_size() == 0) {
return true;
} else if (list.i_size() != axis_size) {
return false;
}
for (int i = 0; i < axis_size; ++i) {
int local_axis = list.i(i);
if (local_axis < 0) {
local_axis += rank;
}
bool along_axis = false;
for (int dim : axis) {
if (local_axis == dim) {
along_axis = true;
break;
}
}
if (!along_axis) {
return false;
}
}
return true;
}
bool SqueezeTransposer::IsDimsSupported(
const TransposeContext& context, const utils::MutableNodeView& node) const {
auto indices = [&context](absl::Span<const char> labels) {
return GetDimensionIndicesFromLabel(context.src_dim_indices, labels);
};
const auto* squeeze_dims_attr = node.GetAttr(kAttrSqueezeDims);
if (squeeze_dims_attr == nullptr) {
return false;
}
return (IsFanoutPortRankN(node, 0, 2) &&
IsAlongAxis(*squeeze_dims_attr, indices({'H', 'W'}), kRank)) ||
(IsFanoutPortRankN(node, 0, 1) &&
IsAlongAxis(*squeeze_dims_attr, indices({'N', 'H', 'W'}), kRank));
}
Status SqueezeTransposer::UpdateSqueezeDims(TransposeContext* context,
utils::MutableNodeView* node) {
const auto* squeeze_dims_attr = node->GetAttr(kAttrSqueezeDims);
if (squeeze_dims_attr == nullptr) {
return errors::InvalidArgument("Missing attribute ", kAttrSqueezeDims);
}
const int num_input_dims = context->src_format.length();
const int min_squeeze_dim = -num_input_dims;
std::vector<int> squeeze_dims_mapped;
const int squeeze_dims_size = squeeze_dims_attr->list().i_size();
squeeze_dims_mapped.reserve(squeeze_dims_size);
for (int i = 0; i < squeeze_dims_size; ++i) {
int dim = squeeze_dims_attr->list().i(i);
if (dim < min_squeeze_dim || dim >= num_input_dims) {
return errors::InvalidArgument(
"Attribute '", kAttrSqueezeDims, "' contains out of range index '",
dim, "', index must be between [", min_squeeze_dim, ", ",
num_input_dims, ")");
}
if (dim < 0) {
dim += num_input_dims;
}
squeeze_dims_mapped.push_back(context->dst_to_src[dim]);
}
std::sort(squeeze_dims_mapped.begin(), squeeze_dims_mapped.end());
AttrValue squeeze_dims;
squeeze_dims.mutable_list()->mutable_i()->Reserve(squeeze_dims_size);
for (const auto& dim : squeeze_dims_mapped) {
squeeze_dims.mutable_list()->mutable_i()->Add(dim);
}
context->graph_view->GetMutationBuilder()->AddOrUpdateNodeAttr(
node, kAttrSqueezeDims, squeeze_dims);
return absl::OkStatus();
}
Status SqueezeTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSqueeze(*node->node()));
if (!ShouldProcess(*context, *node) || !IsDimsSupported(*context, *node) ||
!IsInputConvertible(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateSqueezeDims(context, node));
return context->graph_view->GetMutationBuilder()->Apply();
}
bool StridedSliceTransposer::IsMaskZero(const utils::MutableNodeView& node,
absl::string_view mask) {
const auto* mask_attr = node.GetAttr(mask);
if (mask_attr != nullptr) {
return mask_attr->i() == 0;
}
return true;
}
bool StridedSliceTransposer::HasOnlyBeginEndMask(
const utils::MutableNodeView& node) {
return IsMaskZero(node, "ellipsis_mask") &&
IsMaskZero(node, "new_axis_mask") &&
IsMaskZero(node, "shrink_axis_mask");
}
Status StridedSliceTransposer::PermuteMask(TransposeContext* context,
utils::MutableNodeView* node,
absl::string_view mask) {
const auto* mask_attr = node->GetAttr(mask);
const int mask_i = mask_attr != nullptr ? mask_attr->i() : 0;
if (mask_i < 0 || mask_i > 15) {
return errors::InvalidArgument("invalid mask value: ", mask_i);
}
int result = 0;
for (int i = 0, end = context->src_to_dst.size(); i < end; i++) {
const int final_pos = context->src_to_dst[i];
const int position_mask = 1 << final_pos;
const int bit_i = (mask_i & position_mask) >> final_pos;
result |= bit_i << i;
}
AttrValue new_mask_attr;
new_mask_attr.set_i(result);
context->graph_view->GetMutationBuilder()->AddOrUpdateNodeAttr(node, mask,
new_mask_attr);
return absl::OkStatus();
}
Status StridedSliceTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsStridedSlice(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) || !HasOnlyBeginEndMask(*node) ||
!IsAfterDstToSrcTransform(*context, *node) ||
(!IsFaninPortsDimsNIfConst(*node, {1, 2, 3}, {4}) &&
!IsFaninPortsDimsNIfConst(*node, {1, 2, 3, 4}, {5}))) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(PermuteMask(context, node, "begin_mask"));
TF_RETURN_IF_ERROR(PermuteMask(context, node, "end_mask"));
if (rank == 4) {
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {1, 2, 3}, node,
kOpDataFormatVecPermute));
} else if (rank == 5) {
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {1, 2, 3, 4}, node,
kOpDataFormatVecPermute));
}
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status SwitchTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsSwitch(*node->node()));
const int rank = GetFaninPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, GetDataFanoutPorts(*node),
node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status TernaryOpTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsTernaryOp(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 1, 2}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status TileTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsTile(*node->node()));
if (!ShouldProcess(*context, *node) || !IsFanoutPortRankN(*node, 0, 4) ||
!IsFaninPortDimsNIfConst(*node, 1, {4}) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose));
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {1}, node, kOpDataFormatVecPermute));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
Status UnaryGradTransposer::TransposeNode(TransposeContext* context,
utils::MutableNodeView* node) {
DCHECK(IsUnaryGrad(*node->node()));
const int rank = GetFanoutPortRank(*node, 0);
if (rank != 4 && rank != 5) {
return absl::OkStatus();
}
ScopedDataFormatUpgrader data_format_upgrader(context, rank);
if (!ShouldProcess(*context, *node) ||
!IsAfterDstToSrcTransform(*context, *node)) {
return absl::OkStatus();
}
VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName()
<< "' with op '" << node->GetOp() << "' from data format '"
<< context->src_format << "' to '" << context->dst_format << "'";
TF_RETURN_IF_ERROR(
UpdateFaninEdgesWithOp(context, {0, 1}, node, kOpTranspose));
TF_RETURN_IF_ERROR(UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose));
return context->graph_view->GetMutationBuilder()->Apply();
}
string GetDeviceName(const NodeDef& node) { return node.device(); }
bool IsDefaultLayoutSensitiveOp(const NodeDef& node) {
static absl::flat_hash_set<string>* default_layout_sensitive_ops =
new absl::flat_hash_set<std::string>(
{"AvgPool", "Conv2D", "DepthwiseConv2dNative", "DepthToSpace",
"FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3",
"FusedConv2DBiasActivation", "MaxPool", "SpaceToDepth"});
return default_layout_sensitive_ops->find(node.op()) !=
default_layout_sensitive_ops->end();
}
bool IsLayoutSensitiveOp(const NodeDef& node) {
return IsDefaultLayoutSensitiveOp(node) || IsAvgPoolGrad(node) ||
IsBiasAddV2(node) || IsBiasAddGrad(node) ||
IsConv2DBackpropFilter(node) || IsConv2DBackpropInput(node) ||
IsDepthwiseConv2dNativeBackpropFilter(node) ||
IsDepthwiseConv2dNativeBackpropInput(node) ||
IsFusedBatchNormEx(node) || IsFusedBatchNormGrad(node) ||
IsMaxPoolV2(node) || IsMaxPoolGrad(node) || IsMaxPoolGradV2(node) ||
IsMaxPoolGradGradV1(node) || IsMaxPoolGradGradV2(node) ||
IsConv3D(node) || IsConv3DBackpropInputV2(node) ||
IsConv3DBackpropFilterV2(node) || IsMaxPool3D(node);
}
bool IsDefaultLayoutAgnosticOp(const NodeDef& node) {
static absl::flat_hash_set<string>* agnostic_nodes =
new absl::flat_hash_set<std::string>({"Abs",
"Acos",
"Acosh",
"Angle",
"Asin",
"Asinh",
"Atan",
"Atanh",
"Bitcast",
"Cast",
"Ceil",
"CheckNumerics",
"ComplexAbs",
"Conj",
"Cos",
"Cosh",
"Digamma",
"Elu",
"Enter",
"Erf",
"Erfc",
"Exit",
"Exp",
"Expm1",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxArgs",
"Floor",
"GuaranteeConst",
"Identity",
"Imag",
"Inv",
"IsFinite",
"IsInf",
"IsNan",
"LeakyRelu",
"Lgamma",
"Log",
"LogicalNot",
"Log1p",
"Neg",
"NextIteration",
"OnesLike",
"PreventGradient",
"QuantizeAndDequantizeV2",
"QuantizeAndDequantizeV3",
"QuantizeAndDequantizeV4",
"Real",
"Reciprocal",
"Relu",
"Relu6",
"Rint",
"Selu",
"Sigmoid",
"Sign",
"Sin",
"Sinh",
"Snapshot",
"Softplus",
"Round",
"Rsqrt",
"Sqrt",
"Square",
"StopGradient",
"Tan",
"Tanh",
"ZerosLike"});
return agnostic_nodes->find(node.op()) != agnostic_nodes->end();
}
bool IsLayoutAgnosticOp(const NodeDef& node) {
return IsDefaultLayoutAgnosticOp(node) || IsAddN(node) || IsBinaryOp(node) ||
IsIdentityN(node) || IsMerge(node) || IsMirrorPad(node) ||
IsMirrorPadGrad(node) || IsPad(node) || IsSelect(node) ||
IsSwitch(node) || IsTernaryOp(node) || IsUnaryGrad(node) ||
IsConcat(node) || IsReverseV2(node) || IsTile(node) || IsShape(node) ||
IsShapeN(node) || IsFill(node) || IsSlice(node) || IsSplit(node) ||
IsSqueeze(node) || IsSplitV(node) || IsStridedSlice(node) ||
IsReduceOp(node);
}
bool IsTernaryOp(const NodeDef& node) { return IsBetainc(node); }
bool IsUnaryGrad(const NodeDef& node) {
bool is_unary_grad =
IsEluGrad(node) || IsInvGrad(node) || IsLeakyReluGrad(node) ||
IsReciprocalGrad(node) || IsRelu6Grad(node) || IsReluGrad(node) ||
IsRsqrtGrad(node) || IsSeluGrad(node) || IsSigmoidGrad(node) ||
IsSoftplusGrad(node) || IsSoftsignGrad(node) || IsSqrtGrad(node) ||
IsTanhGrad(node);
return is_unary_grad;
}
bool IsMaxPoolV2(const NodeDef& node) { return node.op() == "MaxPoolV2"; }
bool IsMaxPool3D(const NodeDef& node) { return node.op() == "MaxPool3D"; }
bool IsMaxPoolGradV2(const NodeDef& node) {
return node.op() == "MaxPoolGradV2";
}
bool IsMaxPoolGradGradV1(const NodeDef& node) {
return node.op() == "MaxPoolGradGrad";
}
bool IsMaxPoolGradGradV2(const NodeDef& node) {
return node.op() == "MaxPoolGradGradV2";
}
bool IsBinaryOp(const NodeDef& node) {
bool is_binary =
IsAdd(node) || IsAtan2(node) || IsComparisonOp(node) || IsComplex(node) ||
IsDiv(node) || IsFloorDiv(node) || IsIgamma(node) || IsIgammac(node) ||
IsLogicalAnd(node) || IsLogicalOr(node) || IsMaximum(node) ||
IsMinimum(node) || IsMod(node) || IsMul(node) || IsPolygamma(node) ||
IsPow(node) || IsRealDiv(node) || IsSquaredDifference(node) ||
IsSub(node) || IsTruncateDiv(node) || IsTruncateMod(node) || IsZeta(node);
return is_binary;
}
bool IsReduceOp(const NodeDef& node) {
return IsSum(node) || IsMean(node) || IsProd(node) || IsMax(node) ||
IsMin(node) || IsAll(node) || IsAny(node);
}
std::vector<int> GetDataFaninPorts(const utils::MutableNodeView& node) {
const auto* node_def = node.node();
if (IsAvgPoolGrad(*node_def) || IsSplit(*node_def)) {
return {1};
}
if (IsStridedSliceGrad(*node_def)) {
return {4};
}
if (IsBinaryOp(*node_def) || IsUnaryGrad(*node_def)) {
return {0, 1};
}
if (IsTernaryOp(*node_def) || IsSelect(*node_def) ||
IsMaxPoolGrad(*node_def) || IsMaxPoolGradV2(*node_def) ||
IsMaxPoolGradGradV1(*node_def) || IsMaxPoolGradGradV2(*node_def)) {
return {0, 1, 2};
}
if (IsShapeN(*node_def) || IsIdentityN(*node_def) || IsAddN(*node_def) ||
IsMerge(*node_def)) {
return GetRegularFaninPorts(node);
}
if (IsConcat(*node_def)) {
return GetConcatDataFaninPorts(node);
}
if (node.NumRegularFanins() > 0) {
return {0};
}
return {};
}
std::vector<int> GetDataFanoutPorts(const utils::MutableNodeView& node) {
const auto* node_def = node.node();
if (IsIdentityN(*node_def) || IsShape(*node_def) || IsShapeN(*node_def)) {
return GetDataFaninPorts(node);
}
if (IsSplit(*node_def) || IsSplitV(*node_def)) {
const auto* num_split_attr = node.GetAttr(kAttrNumSplit);
if (num_split_attr == nullptr) {
return {0};
}
std::vector<int> values(num_split_attr->i());
std::iota(values.begin(), values.end(), 0);
return values;
}
if (IsSwitch(*node_def)) {
const auto* num_outs_attr = node.GetAttr(kAttrNumOuts);
const int num_outs = num_outs_attr != nullptr ? num_outs_attr->i() : 2;
std::vector<int> values(num_outs);
std::iota(values.begin(), values.end(), 0);
return values;
}
return {0};
}
bool GetValueAttrFromConstInputNode(
const utils::MutableNodeView& node,
const std::function<bool(const NodeDef&)>& predicate, int index,
Tensor* tensor) {
if (!predicate(*node.node())) {
return false;
}
const auto& regular_fanin = node.GetRegularFanin(index);
auto* regular_fanin_node = regular_fanin.node_view();
if (!IsConstant(*regular_fanin_node->node())) {
return false;
}
const auto* value_attr = regular_fanin_node->GetAttr(kAttrValue);
if (value_attr == nullptr || value_attr->tensor().dtype() != DT_INT32) {
return false;
}
if (!tensor->FromProto(value_attr->tensor())) {
return false;
}
return true;
}
bool IsDataFormatOp(const utils::MutableNodeView& node) {
const string& op = node.GetOp();
return op == kOpDataFormatDimMap || op == kOpDataFormatVecPermute;
}
absl::flat_hash_map<char, int> GetDimensionIndices(
absl::string_view data_format) {
const int size = data_format.size();
absl::flat_hash_map<char, int> index;
index.reserve(size);
for (int i = 0; i < size; i++) {
index[data_format[i]] = i;
}
return index;
}
std::vector<int> GetPermutation(
const absl::flat_hash_map<char, int>& src_dim_indices,
absl::string_view dst_format) {
DCHECK(src_dim_indices.size() == dst_format.size());
std::vector<int> permutation;
const int size = dst_format.size();
permutation.reserve(size);
for (int i = 0; i < size; i++) {
permutation.push_back(src_dim_indices.at(dst_format[i]));
}
return permutation;
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops_internal.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::ExpectTensorEqual;
constexpr int kBatchSize = 32;
constexpr int kWidth = 10;
constexpr int kHeight = 10;
constexpr int kDepthIn = 8;
constexpr int kKernel = 2;
constexpr int kStride1 = 2;
constexpr int kStride2 = 4;
constexpr int kOutWidth = 5;
constexpr int kOutHeight = 5;
constexpr int kDepthOut = 16;
constexpr int kDilation = 2;
constexpr int kPaddingTop = 1;
constexpr int kPaddingBottom = 2;
constexpr int kPaddingLeft = 3;
constexpr int kPaddingRight = 4;
constexpr char kSrcFormat[] = "NHWC";
constexpr char kDstFormat[] = "NCHW";
constexpr char kGPU[] = "GPU";
constexpr char kAttrOutputShapes[] = "_output_shapes";
constexpr char kAttrDataFormat[] = "data_format";
constexpr char kOpTranspose[] = "Transpose";
class TransposerImpl : public Transposer {
public:
explicit TransposerImpl() : Transposer() {}
Status TransposeNode(TransposeContext*, utils::MutableNodeView*) override {
return absl::OkStatus();
}
};
void VerifyRegularFaninMatch(const utils::MutableNodeView* node, int port,
absl::string_view fanin_name, int fanin_port) {
ASSERT_GT(node->NumRegularFanins(), port);
const auto& fanin = node->GetRegularFanin(port);
EXPECT_EQ(fanin.node_view()->GetName(), fanin_name);
EXPECT_EQ(fanin.index(), fanin_port);
}
void VerifyShapeAttributeMatch(const utils::MutableNodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrOutputShapes);
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->shape().DebugString(), attr_value);
}
void VerifyShapeAttributeMatch(const utils::MutableNodeView* node,
int shape_index, absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrOutputShapes);
ASSERT_NE(attr, nullptr);
ASSERT_GT(attr->list().shape_size(), shape_index);
EXPECT_EQ(attr->list().shape(shape_index).DebugString(), attr_value);
}
void VerifyDataFormatAttributeMatch(const utils::MutableNodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr(kAttrDataFormat);
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->s(), attr_value);
}
Output SimpleConv2D(const Scope* scope, const DataType& data_type = DT_FLOAT) {
auto input =
ops::RandomUniform(scope->WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto filter =
ops::RandomUniform(scope->WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, data_type);
auto conv2d = ops::Conv2D(
scope->WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, kStride1, kStride2, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
return conv2d;
}
Status CreateSimpleConv2DGraph(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope, data_type);
auto output = ops::Identity(scope.WithOpName("output"), conv2d);
return scope.ToGraphDef(graph);
}
Status CreateSimpleFusedBatchNorm(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto x =
ops::RandomUniform(scope.WithOpName("x"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto scale =
ops::RandomUniform(scope.WithOpName("scale"), {kDepthIn}, DT_FLOAT);
auto offset =
ops::RandomUniform(scope.WithOpName("offset"), {kDepthIn}, DT_FLOAT);
auto mean =
ops::RandomUniform(scope.WithOpName("mean"), {kDepthIn}, DT_FLOAT);
auto var = ops::RandomUniform(scope.WithOpName("var"), {kDepthIn}, DT_FLOAT);
auto batch_norm = ops::FusedBatchNormV2(
scope.WithOpName("bn").WithDevice("/device:GPU:0"), x, scale, offset,
mean, var, ops::FusedBatchNormV2::IsTraining(false).Epsilon(0.1f));
auto output_y = ops::Identity(scope.WithOpName("output_y"), batch_norm.y);
auto output_mean =
ops::Identity(scope.WithOpName("output_mean"), batch_norm.batch_mean);
auto output_variance = ops::Identity(scope.WithOpName("output_variance"),
batch_norm.batch_variance);
return scope.ToGraphDef(graph);
}
Status CreateSimpleMaxPoolGrad(GraphDef* graph, bool use_grad_grad) {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("orig_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto output_data = ops::RandomUniform(
scope.WithOpName("orig_output"),
{kBatchSize, kOutHeight, kOutWidth, kDepthIn}, DT_FLOAT);
auto output_grad =
ops::RandomUniform(scope.WithOpName("grad"),
{kBatchSize, use_grad_grad ? kHeight : kOutHeight,
use_grad_grad ? kWidth : kOutWidth, kDepthIn},
DT_FLOAT);
Output maxpool_grad;
if (use_grad_grad) {
maxpool_grad = ops::MaxPoolGradGrad(
scope.WithOpName("maxpool_grad").WithDevice("/device:GPU:0"), input,
output_data, output_grad, {1, kKernel, kKernel, 1},
{1, kStride1, kStride1, 1}, "VALID");
} else {
maxpool_grad = ops::internal::MaxPoolGrad(
scope.WithOpName("maxpool_grad").WithDevice("/device:GPU:0"), input,
output_data, output_grad, {1, kKernel, kKernel, 1},
{1, kStride1, kStride1, 1}, "VALID");
}
auto output = ops::Identity(scope.WithOpName("output"), maxpool_grad);
return scope.ToGraphDef(graph);
}
Status CreateSimpleBiasAddGrad(GraphDef* graph, const Input& shape) {
Scope scope = Scope::NewRootScope();
auto input = ops::RandomUniform(scope.WithOpName("input"), shape, DT_FLOAT);
auto bag =
ops::BiasAddGrad(scope.WithOpName("bag").WithDevice("/device:GPU:0"),
input, ops::BiasAddGrad::DataFormat(kSrcFormat));
auto output = ops::Identity(scope.WithOpName("output"), bag);
return scope.ToGraphDef(graph);
}
Status CreateSimpleConv2DBackpropFilter(GraphDef* graph,
const DataType& data_type = DT_FLOAT,
absl::string_view padding = "SAME") {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto out_backprop =
ops::RandomUniform(scope.WithOpName("out_backprop"),
{kBatchSize, kHeight, kWidth, kDepthOut}, data_type);
if (padding == "EXPLICIT") {
auto conv2d_backprop_filter = ops::Conv2DBackpropFilter(
scope.WithOpName("conv2d_backprop_filter").WithDevice("/device:GPU:0"),
input, {kHeight, kWidth, kDepthIn, kDepthOut}, out_backprop,
{1, 2, 4, 1}, padding,
ops::Conv2DBackpropFilter::Attrs()
.Dilations({1, kDilation, kDilation, 1})
.ExplicitPaddings({0, 0, kPaddingTop, kPaddingBottom, kPaddingLeft,
kPaddingRight, 0, 0})
.DataFormat(kSrcFormat));
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_filter);
} else {
auto conv2d_backprop_filter = ops::Conv2DBackpropFilter(
scope.WithOpName("conv2d_backprop_filter").WithDevice("/device:GPU:0"),
input, {kHeight, kWidth, kDepthIn, kDepthOut}, out_backprop,
{1, 2, 4, 1}, padding,
ops::Conv2DBackpropFilter::DataFormat(kSrcFormat));
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_filter);
}
return scope.ToGraphDef(graph);
}
Status CreateSimpleConv2DBackpropInput(GraphDef* graph,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto input_sizes = ops::Const(scope.WithOpName("input_sizes"),
{kBatchSize, kHeight, kWidth, kDepthIn});
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, data_type);
auto out_backprop =
ops::RandomUniform(scope.WithOpName("out_backprop"),
{kBatchSize, kHeight, kWidth, kDepthOut}, data_type);
auto conv2d_backprop_input = ops::Conv2DBackpropInput(
scope.WithOpName("conv2d_backprop_input").WithDevice("/device:GPU:0"),
input_sizes, filter, out_backprop, {1, kStride1, kStride1, 1}, "VALID");
auto output =
ops::Identity(scope.WithOpName("output"), conv2d_backprop_input);
return scope.ToGraphDef(graph);
}
Status CreateSimpleFusedBatchNormGrad(GraphDef* graph, bool is_training,
const DataType& data_type = DT_FLOAT) {
Scope scope = Scope::NewRootScope();
auto y_backprop =
ops::RandomUniform(scope.WithOpName("y_backprop"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto x =
ops::RandomUniform(scope.WithOpName("x"),
{kBatchSize, kHeight, kWidth, kDepthIn}, data_type);
auto scale =
ops::RandomUniform(scope.WithOpName("scale"), {kDepthIn}, DT_FLOAT);
auto reserve_space_1 = ops::RandomUniform(scope.WithOpName("reserve_space_1"),
{kDepthIn}, DT_FLOAT);
auto reserve_space_2 = ops::RandomUniform(scope.WithOpName("reserve_space_2"),
{kDepthIn}, DT_FLOAT);
auto fused_batch_norm_grad = ops::FusedBatchNormGradV2(
scope.WithOpName("fused_batch_norm_grad").WithDevice("/device:GPU:0"),
y_backprop, x, scale, reserve_space_1, reserve_space_2,
ops::FusedBatchNormGradV2::DataFormat(kSrcFormat)
.IsTraining(is_training)
.Epsilon(0.1f));
auto x_backprop = ops::Identity(scope.WithOpName("x_backprop"),
fused_batch_norm_grad.x_backprop);
auto scale_backprop = ops::Identity(scope.WithOpName("scale_backprop"),
fused_batch_norm_grad.scale_backprop);
auto offset_backprop = ops::Identity(scope.WithOpName("offset_backprop"),
fused_batch_norm_grad.offset_backprop);
auto reserve_space_3 = ops::Identity(scope.WithOpName("reserve_space_3"),
fused_batch_norm_grad.reserve_space_3);
auto reserve_space_4 = ops::Identity(scope.WithOpName("reserve_space_4"),
fused_batch_norm_grad.reserve_space_4);
return scope.ToGraphDef(graph);
}
Status CreateSimpleAddN(GraphDef* graph) {
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
Output a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output b = ops::RandomUniform(scope.WithOpName("b"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output c = ops::RandomUniform(scope.WithOpName("c"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto add_n = ops::AddN(scope.WithOpName("add_n").WithDevice("/device:GPU:0"),
{a, b, c, conv2d});
auto output = ops::Identity(scope.WithOpName("output"), add_n);
return scope.ToGraphDef(graph);
}
Status CreateSimpleIdentityN(GraphDef* graph) {
Scope scope = Scope::NewRootScope();
auto conv2d_1_input =
ops::RandomUniform(scope.WithOpName("conv2d_1_input"),
{kBatchSize, kDepthIn, kHeight, kWidth}, DT_FLOAT);
auto conv2d_1_filter =
ops::RandomUniform(scope.WithOpName("conv2d_1_filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d_1 =
ops::Conv2D(scope.WithOpName("conv2d_1").WithDevice("/device:GPU:0"),
conv2d_1_input, conv2d_1_filter, {1, 1, 2, 4}, "SAME",
ops::Conv2D::DataFormat(kDstFormat));
auto conv2d_2_input =
ops::RandomUniform(scope.WithOpName("conv2d_2_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto conv2d_2_filter =
ops::RandomUniform(scope.WithOpName("conv2d_2_filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d_2 =
ops::Conv2D(scope.WithOpName("conv2d_2").WithDevice("/device:GPU:0"),
conv2d_2_input, conv2d_2_filter, {1, 2, 4, 1}, "SAME",
ops::Conv2D::DataFormat(kSrcFormat));
Output a = ops::RandomUniform(
scope.WithOpName("a"), {kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
Output b = ops::RandomUniform(scope.WithOpName("b"), {kBatchSize, kDepthIn},
DT_FLOAT);
auto identity_n =
ops::IdentityN(scope.WithOpName("identity_n").WithDevice("/device:GPU:0"),
{conv2d_1, conv2d_2, a, b});
auto conv2d_1_output =
ops::Identity(scope.WithOpName("conv2d_1_output"), identity_n.output[0]);
auto conv2d_2_output =
ops::Identity(scope.WithOpName("conv2d_2_output"), identity_n.output[1]);
auto a_output =
ops::Identity(scope.WithOpName("a_output"), identity_n.output[2]);
auto b_output =
ops::Identity(scope.WithOpName("b_output"), identity_n.output[3]);
return scope.ToGraphDef(graph);
}
class TransposerTest : public ::testing::Test {
protected:
void SetUp() override {
bool gpu_available = GetNumAvailableGPUs() > 0;
if (gpu_available) {
virtual_cluster_ =
std::make_unique<SingleMachine>(10, 1, 1);
} else {
DeviceProperties gpu_device;
gpu_device.set_type(kGPU);
gpu_device.mutable_environment()->insert({"architecture", "6"});
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/GPU:1", gpu_device}}));
}
TF_ASSERT_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_ASSERT_OK(virtual_cluster_->Shutdown()); }
template <typename T>
void ReduceTransposerKeepDims() {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const<T>(scope.WithOpName("axis"), {0, 1, 2}, {3});
auto attrs = ops::Sum::Attrs().KeepDims(true);
auto sum_op = ops::Sum(scope.WithOpName("sum").WithDevice("/device:GPU:0"),
conv2d, axis, attrs);
auto z = ops::Identity(scope.WithOpName("z"), sum_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ReduceTransposer reducer_transposer;
auto* sum = context.graph_view->GetNode("sum");
ASSERT_NE(sum, nullptr);
TF_ASSERT_OK(reducer_transposer.TransposeNode(&context, sum));
auto* input_transpose_node = context.graph_view->GetNode(
"sum-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
auto* updated_sum_node = context.graph_view->GetNode("sum");
ASSERT_NE(updated_sum_node, nullptr);
ASSERT_EQ(updated_sum_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_sum_node, 0,
input_transpose_node->GetName(), 0);
auto* axis_node = context.graph_view->GetNode(
"sum-1-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* output_transpose_node = context.graph_view->GetNode(
"sum-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
template <typename T>
void ReduceTransposerValidAxisNode() {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const<T>(scope.WithOpName("axis"), {0, 1, 2}, {3});
auto sum_op = ops::Max(scope.WithOpName("max").WithDevice("/device:GPU:0"),
conv2d, axis);
auto z = ops::Identity(scope.WithOpName("z"), sum_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ReduceTransposer reducer_transposer;
auto* max = context.graph_view->GetNode("max");
ASSERT_NE(max, nullptr);
TF_ASSERT_OK(reducer_transposer.TransposeNode(&context, max));
auto* input_transpose_node = context.graph_view->GetNode(
"max-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
auto* updated_max_node = context.graph_view->GetNode("max");
ASSERT_NE(updated_max_node, nullptr);
ASSERT_EQ(updated_max_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_max_node, 0,
input_transpose_node->GetName(), 0);
auto* axis_node = context.graph_view->GetNode(
"max-1-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, updated_max_node->GetName(), 0);
}
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(TransposerTest, CreateConstPermNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
constexpr char kNodeName[] = "const_perm_node";
constexpr char kDevice[] = "/device:GPU:0";
utils::MutationNewNode added_node;
EXPECT_FALSE(context.graph_view->HasNode(kNodeName));
TF_ASSERT_OK(transposer.CreateConstPermNode(&context, kNodeName, kDevice,
{0, 3, 1, 2}, "", &added_node));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
utils::MutableNodeView* const_perm_node =
context.graph_view->GetNode(kNodeName);
EXPECT_EQ(const_perm_node->GetName(), kNodeName);
EXPECT_EQ(const_perm_node->GetDevice(), kDevice);
const auto* value_attr = const_perm_node->GetAttr("value");
ASSERT_NE(value_attr, nullptr);
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(value_attr->tensor()));
Tensor expected(DT_INT32, {4});
::tensorflow::test::FillValues<int32>(&expected, {0, 3, 1, 2});
ExpectTensorEqual<int32>(tensor, expected);
}
TensorShapeProto MakeTensorShapeFromDimensions(absl::Span<const int> dims) {
TensorShapeProto shape_proto = TensorShapeProto();
for (const int dim : dims) {
TensorShapeProto_Dim dim_proto = TensorShapeProto_Dim();
dim_proto.set_size(dim);
*shape_proto.add_dim() = std::move(dim_proto);
}
return shape_proto;
}
TEST_F(TransposerTest, CreateTransposeNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
constexpr char kNodeNameFormat[] =
"transpose_node-0-$0-NWCHToNCWH-LayoutOptimizer";
constexpr char kDevice[] = "/device:GPU:0";
TensorShapeProto input_shape = MakeTensorShapeFromDimensions({1, 2, 3, 4});
TensorShapeProto expected_shape = MakeTensorShapeFromDimensions({1, 4, 2, 3});
utils::MutationNewNode added_node;
string transpose_node_name;
TF_ASSERT_OK(transposer.CreateTransposeNode(
&context, kNodeNameFormat, DT_DOUBLE, kDevice, input_shape, {0, 3, 1, 2},
"", &added_node, &transpose_node_name));
EXPECT_EQ(transpose_node_name,
"transpose_node-0-Transpose-NWCHToNCWH-LayoutOptimizer");
utils::Mutation* mutation = context.graph_view->GetMutationBuilder();
Status status;
mutation->AddNode({}, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* transpose_node = context.graph_view->GetNode(transpose_node_name);
ASSERT_NE(transpose_node, nullptr);
EXPECT_EQ(transpose_node->GetDevice(), kDevice);
const auto* output_shapes_attr = transpose_node->GetAttr("_output_shapes");
EXPECT_EQ(output_shapes_attr->list().shape(0).DebugString(),
expected_shape.DebugString());
}
TEST_F(TransposerTest, UpdateNode) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer transposer;
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
TF_ASSERT_OK(transposer.UpdateNode(&context, conv2d));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(updated_conv2d, nullptr);
VerifyDataFormatAttributeMatch(updated_conv2d, kDstFormat);
}
AttrValue_ListValue MakeAttrValueListValueFromVector(
absl::Span<const int> vec) {
AttrValue_ListValue list_proto = AttrValue_ListValue();
for (const int i : vec) {
list_proto.add_i(i);
}
return list_proto;
}
TEST_F(TransposerTest, UpdateStrides) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, "ABCD", "ACBD");
AttrValue_ListValue expected_original_strides =
MakeAttrValueListValueFromVector({1, 2, 4, 1});
AttrValue_ListValue expected_updated_strides =
MakeAttrValueListValueFromVector({1, 4, 2, 1});
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
const auto& strides_attr = conv2d->GetAttr("strides");
ASSERT_NE(strides_attr, nullptr);
EXPECT_EQ(strides_attr->list().DebugString(),
expected_original_strides.DebugString());
AttrValue data_format_attr;
data_format_attr.set_s("ABCD");
context.graph_view->GetMutationBuilder()->AddOrUpdateNodeAttr(
conv2d, "data_format", data_format_attr);
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
DefaultLayoutSensitiveOpTransposer transposer;
TF_ASSERT_OK(transposer.UpdateNode(&context, conv2d));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
const auto& updated_strides_attr = updated_conv2d->GetAttr("strides");
ASSERT_NE(updated_strides_attr, nullptr);
EXPECT_EQ(updated_strides_attr->list().DebugString(),
expected_updated_strides.DebugString());
}
TEST_F(TransposerTest, UpdateFaninEdgesTranspose) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleFusedBatchNormGrad(&item.graph, true));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
FusedBatchNormGradTransposer transposer;
auto* fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng, nullptr);
const auto& fbng_output_shapes_attr = fbng->GetAttr("_output_shapes");
ASSERT_NE(fbng_output_shapes_attr, nullptr);
const TensorShapeProto& expected_shape = fbng_output_shapes_attr->shape();
TF_ASSERT_OK(
transposer.UpdateFaninEdgesWithOp(&context, {0, 1}, fbng, kOpTranspose));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* transpose_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(transpose_node1, nullptr);
VerifyShapeAttributeMatch(transpose_node1, expected_shape.DebugString());
auto* transpose_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(transpose_node2, nullptr);
VerifyShapeAttributeMatch(transpose_node2, expected_shape.DebugString());
auto* const_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-PermConstNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(const_node1, nullptr);
auto* const_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-PermConstNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(const_node2, nullptr);
auto* y_backprop = context.graph_view->GetNode("y_backprop");
ASSERT_NE(y_backprop, nullptr);
ASSERT_EQ(transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(transpose_node1, 0, y_backprop->GetName(), 0);
VerifyRegularFaninMatch(transpose_node1, 1, const_node1->GetName(), 0);
auto* x = context.graph_view->GetNode("x");
ASSERT_NE(x, nullptr);
ASSERT_EQ(transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(transpose_node2, 0, x->GetName(), 0);
VerifyRegularFaninMatch(transpose_node2, 1, const_node2->GetName(), 0);
auto* updated_fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(updated_fbng, nullptr);
ASSERT_EQ(updated_fbng->NumRegularFanins(), 5);
VerifyRegularFaninMatch(updated_fbng, 0, transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(updated_fbng, 1, transpose_node2->GetName(), 0);
}
TEST_F(TransposerTest, UpdateFanoutEdgesTranspose) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
TransposerImpl transposer;
TensorShapeProto expected_original_shape =
MakeTensorShapeFromDimensions({32, 5, 3, 16});
TensorShapeProto expected_updated_shape =
MakeTensorShapeFromDimensions({32, 16, 5, 3});
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
VerifyShapeAttributeMatch(conv2d, 0, expected_original_shape.DebugString());
TF_ASSERT_OK(
transposer.UpdateFanoutEdgesWithOp(&context, {0}, conv2d, kOpTranspose));
TF_ASSERT_OK(context.graph_view->GetMutationBuilder()->Apply());
auto* updated_conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(updated_conv2d, nullptr);
VerifyShapeAttributeMatch(updated_conv2d, 0,
expected_updated_shape.DebugString());
auto* transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(transpose_node, nullptr);
VerifyShapeAttributeMatch(transpose_node, 0,
expected_original_shape.DebugString());
auto* const_node = context.graph_view->GetNode(
"conv2d-0-0-PermConstNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(const_node, nullptr);
ASSERT_EQ(transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(transpose_node, 0, updated_conv2d->GetName(), 0);
VerifyRegularFaninMatch(transpose_node, 1, const_node->GetName(), 0);
auto* output = context.graph_view->GetNode("output");
ASSERT_NE(output, nullptr);
ASSERT_EQ(output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output, 0, transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, DefaultLayoutSensitiveOpTransposerTestFusedBatchNorm) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleFusedBatchNorm(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer transposer;
auto* bn = context.graph_view->GetNode("bn");
TF_ASSERT_OK(transposer.TransposeNode(&context, bn));
auto* input_transpose_node =
context.graph_view->GetNode("bn-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "x", 0);
auto* bn_node = context.graph_view->GetNode("bn");
ASSERT_NE(bn_node, nullptr);
ASSERT_EQ(bn_node->NumRegularFanins(), 5);
VerifyRegularFaninMatch(bn_node, 0, input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(bn_node, 1, "scale", 0);
VerifyRegularFaninMatch(bn_node, 2, "offset", 0);
VerifyRegularFaninMatch(bn_node, 3, "mean", 0);
VerifyRegularFaninMatch(bn_node, 4, "var", 0);
VerifyDataFormatAttributeMatch(bn_node, kDstFormat);
auto* output_transpose_node =
context.graph_view->GetNode("bn-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, bn_node->GetName(), 0);
auto* output_y = context.graph_view->GetNode("output_y");
ASSERT_NE(output_y, nullptr);
ASSERT_EQ(output_y->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_y, 0, output_transpose_node->GetName(), 0);
auto* output_mean = context.graph_view->GetNode("output_mean");
ASSERT_NE(output_mean, nullptr);
ASSERT_EQ(output_mean->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_mean, 0, bn_node->GetName(), 1);
auto* output_variance = context.graph_view->GetNode("output_variance");
ASSERT_NE(output_variance, nullptr);
ASSERT_EQ(output_variance->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_variance, 0, bn_node->GetName(), 2);
}
TEST_F(TransposerTest, DefaultLayoutSensitiveOpTransposerTestConv2D) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DGraph(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer transposer;
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, conv2d));
auto* input_transpose_node = context.graph_view->GetNode(
"conv2d-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "input", 0);
auto* conv2d_node = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_node, 0, input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(conv2d_node, 1, "filter", 0);
VerifyDataFormatAttributeMatch(conv2d_node, kDstFormat);
const auto* strides_attr = conv2d_node->GetAttr("strides");
ASSERT_NE(strides_attr, nullptr);
ASSERT_EQ(strides_attr->list().i_size(), 4);
EXPECT_EQ(strides_attr->list().i(0), 1);
EXPECT_EQ(strides_attr->list().i(1), 1);
EXPECT_EQ(strides_attr->list().i(2), kStride1);
EXPECT_EQ(strides_attr->list().i(3), kStride2);
auto* output_transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, conv2d_node->GetName(), 0);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, MaxPoolGradTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
for (bool use_grad_grad : {false, true}) {
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleMaxPoolGrad(&item.graph, use_grad_grad));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
MaxPoolGradTransposer transposer;
auto* maxpool_grad = context.graph_view->GetNode("maxpool_grad");
ASSERT_NE(maxpool_grad, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, maxpool_grad));
auto* input_transpose_node1 = context.graph_view->GetNode(
"maxpool_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0, "orig_input", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"maxpool_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "orig_output", 0);
auto* input_transpose_node3 = context.graph_view->GetNode(
"maxpool_grad-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node3, nullptr);
ASSERT_EQ(input_transpose_node3->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node3, 0, "grad", 0);
auto* updated_maxpool_grad = context.graph_view->GetNode("maxpool_grad");
VerifyDataFormatAttributeMatch(updated_maxpool_grad, kDstFormat);
ASSERT_EQ(updated_maxpool_grad->NumRegularFanins(), 3);
VerifyRegularFaninMatch(updated_maxpool_grad, 0,
input_transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpool_grad, 1,
input_transpose_node2->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpool_grad, 2,
input_transpose_node3->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"maxpool_grad-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0,
updated_maxpool_grad->GetName(), 0);
}
}
TEST_F(TransposerTest, BiasAddGradTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleBiasAddGrad(
&item.graph, {kBatchSize, kHeight, kWidth, kDepthIn}));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
BiasAddGradTransposer transposer;
auto* bag = context.graph_view->GetNode("bag");
ASSERT_NE(bag, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, bag));
auto* input_transpose_node =
context.graph_view->GetNode("bag-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "input", 0);
auto* bag_node = context.graph_view->GetNode("bag");
ASSERT_NE(bag_node, nullptr);
VerifyDataFormatAttributeMatch(bag_node, kDstFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"bag-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, bag_node->GetName(), 0);
}
TEST_F(TransposerTest, BiasAddGradTransposerIncorrectInputTest) {
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(
CreateSimpleBiasAddGrad(&item.graph, {kHeight, kWidth, kDepthIn}));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
BiasAddGradTransposer transposer;
auto* bag = context.graph_view->GetNode("bag");
ASSERT_NE(bag, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, bag));
auto* input_transpose_node =
context.graph_view->GetNode("bag-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node, nullptr);
auto* bag_node = context.graph_view->GetNode("bag");
ASSERT_NE(bag_node, nullptr);
VerifyDataFormatAttributeMatch(bag_node, kSrcFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"bag-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, bag_node->GetName(), 0);
}
TEST_F(TransposerTest, Conv2DBackpropFilterTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DBackpropFilter(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
Conv2DBackpropFilterTransposer transposer;
auto* conv2d_bf = context.graph_view->GetNode("conv2d_backprop_filter");
ASSERT_NE(conv2d_bf, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, conv2d_bf));
auto* input_transpose_node1 = context.graph_view->GetNode(
"conv2d_backprop_filter-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0, "input", 0);
auto* input_transpose_node_filter_sizes = context.graph_view->GetNode(
"conv2d_backprop_filter-1-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node_filter_sizes, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"conv2d_backprop_filter-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "out_backprop", 0);
auto* conv2d_bf_node = context.graph_view->GetNode("conv2d_backprop_filter");
ASSERT_NE(conv2d_bf_node, nullptr);
ASSERT_EQ(conv2d_bf_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(conv2d_bf_node, 0, input_transpose_node1->GetName(),
0);
VerifyRegularFaninMatch(conv2d_bf_node, 2, input_transpose_node2->GetName(),
0);
VerifyDataFormatAttributeMatch(conv2d_bf_node, kDstFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"conv2d_backprop_filter-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, conv2d_bf_node->GetName(), 0);
}
TEST_F(TransposerTest, NodeAttributes) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(
CreateSimpleConv2DBackpropFilter(&item.graph, DT_FLOAT, "EXPLICIT"));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
Conv2DBackpropFilterTransposer transposer;
auto* conv2d_bf = context.graph_view->GetNode("conv2d_backprop_filter");
ASSERT_NE(conv2d_bf, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, conv2d_bf));
auto* conv2d_bf_node = context.graph_view->GetNode("conv2d_backprop_filter");
ASSERT_NE(conv2d_bf_node, nullptr);
ASSERT_EQ(conv2d_bf_node->NumRegularFanins(), 3);
VerifyDataFormatAttributeMatch(conv2d_bf_node, kDstFormat);
auto* dilations_attr = conv2d_bf_node->GetAttr("dilations");
ASSERT_NE(dilations_attr, nullptr);
ASSERT_EQ(dilations_attr->list().i_size(), 4);
EXPECT_EQ(dilations_attr->list().i(0), 1);
EXPECT_EQ(dilations_attr->list().i(1), 1);
EXPECT_EQ(dilations_attr->list().i(2), kDilation);
EXPECT_EQ(dilations_attr->list().i(3), kDilation);
auto* explicit_paddings_attr = conv2d_bf_node->GetAttr("explicit_paddings");
ASSERT_NE(explicit_paddings_attr, nullptr);
ASSERT_EQ(explicit_paddings_attr->list().i_size(), 8);
EXPECT_EQ(explicit_paddings_attr->list().i(0), 0);
EXPECT_EQ(explicit_paddings_attr->list().i(1), 0);
EXPECT_EQ(explicit_paddings_attr->list().i(2), 0);
EXPECT_EQ(explicit_paddings_attr->list().i(3), 0);
EXPECT_EQ(explicit_paddings_attr->list().i(4), kPaddingTop);
EXPECT_EQ(explicit_paddings_attr->list().i(5), kPaddingBottom);
EXPECT_EQ(explicit_paddings_attr->list().i(6), kPaddingLeft);
EXPECT_EQ(explicit_paddings_attr->list().i(7), kPaddingRight);
}
TEST_F(TransposerTest, Conv2DBackpropInputTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleConv2DBackpropInput(&item.graph));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
Conv2DBackpropInputTransposer transposer;
auto* conv2d_i = context.graph_view->GetNode("conv2d_backprop_input");
ASSERT_NE(conv2d_i, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, conv2d_i));
auto* input_vec_permute_node = context.graph_view->GetNode(
"conv2d_backprop_input-0-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_vec_permute_node, nullptr);
ASSERT_EQ(input_vec_permute_node->NumRegularFanins(), 1);
const auto* src_format_attr = input_vec_permute_node->GetAttr(kAttrSrcFormat);
ASSERT_NE(src_format_attr, nullptr);
EXPECT_EQ(src_format_attr->s(), kSrcFormat);
const auto* dst_format_attr = input_vec_permute_node->GetAttr(kAttrDstFormat);
ASSERT_NE(dst_format_attr, nullptr);
EXPECT_EQ(dst_format_attr->s(), kDstFormat);
auto* input_transpose_node = context.graph_view->GetNode(
"conv2d_backprop_input-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "out_backprop", 0);
auto* conv2d_i_node = context.graph_view->GetNode("conv2d_backprop_input");
ASSERT_NE(conv2d_i_node, nullptr);
ASSERT_EQ(conv2d_i_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(conv2d_i_node, 0, input_vec_permute_node->GetName(),
0);
VerifyRegularFaninMatch(conv2d_i_node, 1, "filter", 0);
VerifyRegularFaninMatch(conv2d_i_node, 2, input_transpose_node->GetName(), 0);
VerifyDataFormatAttributeMatch(conv2d_i_node, kDstFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"conv2d_backprop_input-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, conv2d_i_node->GetName(),
0);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, FusedBatchNormGradTransposerIsTrainingTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleFusedBatchNormGrad(&item.graph, true));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
FusedBatchNormGradTransposer transposer;
auto* fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, fbng));
auto* input_transpose_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0, "y_backprop", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "x", 0);
auto* fbng_node = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng_node, nullptr);
ASSERT_EQ(fbng_node->NumRegularFanins(), 5);
VerifyRegularFaninMatch(fbng_node, 0, input_transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(fbng_node, 1, input_transpose_node2->GetName(), 0);
VerifyRegularFaninMatch(fbng_node, 2, "scale", 0);
VerifyRegularFaninMatch(fbng_node, 3, "reserve_space_1", 0);
VerifyRegularFaninMatch(fbng_node, 4, "reserve_space_2", 0);
VerifyDataFormatAttributeMatch(fbng_node, kDstFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"fused_batch_norm_grad-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, fbng_node->GetName(), 0);
auto* x_backprop = context.graph_view->GetNode("x_backprop");
ASSERT_NE(x_backprop, nullptr);
ASSERT_EQ(x_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(x_backprop, 0, output_transpose_node->GetName(), 0);
auto* scale_backprop = context.graph_view->GetNode("scale_backprop");
ASSERT_NE(scale_backprop, nullptr);
ASSERT_EQ(scale_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(scale_backprop, 0, fbng_node->GetName(), 1);
auto* offset_backprop = context.graph_view->GetNode("offset_backprop");
ASSERT_NE(offset_backprop, nullptr);
ASSERT_EQ(offset_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(offset_backprop, 0, fbng_node->GetName(), 2);
auto* reserve_space_3 = context.graph_view->GetNode("reserve_space_3");
ASSERT_NE(reserve_space_3, nullptr);
ASSERT_EQ(reserve_space_3->NumRegularFanins(), 1);
VerifyRegularFaninMatch(reserve_space_3, 0, fbng_node->GetName(), 3);
auto* reserve_space_4 = context.graph_view->GetNode("reserve_space_4");
ASSERT_NE(reserve_space_4, nullptr);
ASSERT_EQ(reserve_space_4->NumRegularFanins(), 1);
VerifyRegularFaninMatch(reserve_space_4, 0, fbng_node->GetName(), 4);
}
TEST_F(TransposerTest, FusedBatchNormGradTransposerNotTrainingTest) {
GrapplerItem item;
TransposeContext context;
TF_ASSERT_OK(CreateSimpleFusedBatchNormGrad(&item.graph, false));
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
FusedBatchNormGradTransposer transposer;
auto* fbng = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, fbng));
auto* input_transpose_node1 = context.graph_view->GetNode(
"fused_batch_norm_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"fused_batch_norm_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node2, nullptr);
auto* fbng_node = context.graph_view->GetNode("fused_batch_norm_grad");
ASSERT_NE(fbng_node, nullptr);
ASSERT_EQ(fbng_node->NumRegularFanins(), 5);
VerifyRegularFaninMatch(fbng_node, 0, "y_backprop", 0);
VerifyRegularFaninMatch(fbng_node, 1, "x", 0);
VerifyRegularFaninMatch(fbng_node, 2, "scale", 0);
VerifyRegularFaninMatch(fbng_node, 3, "reserve_space_1", 0);
VerifyRegularFaninMatch(fbng_node, 4, "reserve_space_2", 0);
VerifyDataFormatAttributeMatch(fbng_node, kSrcFormat);
auto* output_transpose_node = context.graph_view->GetNode(
"fused_batch_norm_grad-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* x_backprop = context.graph_view->GetNode("x_backprop");
ASSERT_NE(x_backprop, nullptr);
ASSERT_EQ(x_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(x_backprop, 0, fbng_node->GetName(), 0);
auto* scale_backprop = context.graph_view->GetNode("scale_backprop");
ASSERT_NE(scale_backprop, nullptr);
ASSERT_EQ(scale_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(scale_backprop, 0, fbng_node->GetName(), 1);
auto* offset_backprop = context.graph_view->GetNode("offset_backprop");
ASSERT_NE(offset_backprop, nullptr);
ASSERT_EQ(offset_backprop->NumRegularFanins(), 1);
VerifyRegularFaninMatch(offset_backprop, 0, fbng_node->GetName(), 2);
auto* reserve_space_3 = context.graph_view->GetNode("reserve_space_3");
ASSERT_NE(reserve_space_3, nullptr);
ASSERT_EQ(reserve_space_3->NumRegularFanins(), 1);
VerifyRegularFaninMatch(reserve_space_3, 0, fbng_node->GetName(), 3);
auto* reserve_space_4 = context.graph_view->GetNode("reserve_space_4");
ASSERT_NE(reserve_space_4, nullptr);
ASSERT_EQ(reserve_space_4->NumRegularFanins(), 1);
VerifyRegularFaninMatch(reserve_space_4, 0, fbng_node->GetName(), 4);
}
TEST_F(TransposerTest, DefaultLayoutAgnosticOpTransposerIdentityTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto identity = ops::Identity(
scope.WithOpName("identity").WithDevice("/device:GPU:0"), conv2d);
auto output = ops::Identity(scope.WithOpName("output"), identity);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
DefaultLayoutAgnosticOpTransposer transposer;
auto* i = context.graph_view->GetNode("identity");
ASSERT_NE(i, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, i));
auto* input_transpose_node = context.graph_view->GetNode(
"identity-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* i_node = context.graph_view->GetNode("identity");
ASSERT_NE(i_node, nullptr);
ASSERT_EQ(i_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(i_node, 0, input_transpose_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"identity-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, i_node->GetName(), 0);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, DefaultLayoutAgnosticOpTransposerIdentityBadInputTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto sum = ops::Sum(scope.WithOpName("sum"), conv2d, {0, 1});
auto identity = ops::Identity(
scope.WithOpName("identity").WithDevice("/device:GPU:0"), sum);
auto output = ops::Identity(scope.WithOpName("output"), identity);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
DefaultLayoutAgnosticOpTransposer transposer;
auto* i = context.graph_view->GetNode("identity");
ASSERT_NE(i, nullptr);
TF_ASSERT_OK(transposer.TransposeNode(&context, i));
auto* input_transpose_node = context.graph_view->GetNode(
"identity-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node, nullptr);
auto* i_node = context.graph_view->GetNode("identity");
ASSERT_NE(i_node, nullptr);
ASSERT_EQ(i_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(i_node, 0, "sum", 0);
auto* output_transpose_node = context.graph_view->GetNode(
"identity-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, i_node->GetName(), 0);
}
TEST_F(TransposerTest, AddNTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TF_ASSERT_OK(CreateSimpleAddN(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* conv2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(conv2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, conv2d));
AddNTransposer addn_transposer;
auto* an = context.graph_view->GetNode("add_n");
ASSERT_NE(an, nullptr);
TF_ASSERT_OK(addn_transposer.TransposeNode(&context, an));
auto* input_transpose_node1 = context.graph_view->GetNode(
"add_n-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0, "a", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"add_n-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "b", 0);
auto* input_transpose_node3 = context.graph_view->GetNode(
"add_n-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node3, nullptr);
ASSERT_EQ(input_transpose_node3->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node3, 0, "c", 0);
auto* input_transpose_node4 = context.graph_view->GetNode(
"add_n-3-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node4, nullptr);
ASSERT_EQ(input_transpose_node4->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node4, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* an_node = context.graph_view->GetNode("add_n");
ASSERT_NE(an_node, nullptr);
ASSERT_EQ(an_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(an_node, 0, input_transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(an_node, 1, input_transpose_node2->GetName(), 0);
VerifyRegularFaninMatch(an_node, 2, input_transpose_node3->GetName(), 0);
VerifyRegularFaninMatch(an_node, 3, input_transpose_node4->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"add_n-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, an_node->GetName(), 0);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, AddNTransposerNotAfterTransformTest) {
GrapplerItem item;
TF_ASSERT_OK(CreateSimpleAddN(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
AddNTransposer addn_transposer;
auto* an = context.graph_view->GetNode("add_n");
ASSERT_NE(an, nullptr);
TF_ASSERT_OK(addn_transposer.TransposeNode(&context, an));
auto* input_transpose_node1 = context.graph_view->GetNode(
"add_n-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"add_n-1-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node2, nullptr);
auto* input_transpose_node3 = context.graph_view->GetNode(
"add_n-2-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node3, nullptr);
auto* input_transpose_node4 = context.graph_view->GetNode(
"add_n-3-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node4, nullptr);
auto* an_node = context.graph_view->GetNode("add_n");
ASSERT_NE(an_node, nullptr);
ASSERT_EQ(an_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(an_node, 0, "a", 0);
VerifyRegularFaninMatch(an_node, 1, "b", 0);
VerifyRegularFaninMatch(an_node, 2, "c", 0);
VerifyRegularFaninMatch(an_node, 3, "conv2d", 0);
auto* output_transpose_node = context.graph_view->GetNode(
"add_n-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, an_node->GetName(), 0);
}
TEST_F(TransposerTest, IdentityNTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
TF_ASSERT_OK(CreateSimpleIdentityN(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* conv2d_1 = context.graph_view->GetNode("conv2d_1");
ASSERT_NE(conv2d_1, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, conv2d_1));
auto* conv2d_2 = context.graph_view->GetNode("conv2d_2");
ASSERT_NE(conv2d_2, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, conv2d_2));
IdentityNTransposer identityn_transposer;
auto* in = context.graph_view->GetNode("identity_n");
ASSERT_NE(in, nullptr);
TF_ASSERT_OK(identityn_transposer.TransposeNode(&context, in));
auto* input_transpose_node1 = context.graph_view->GetNode(
"identity_n-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"identity_n-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0,
"conv2d_2-0-0-TransposeNCHWToNHWC-LayoutOptimizer",
0);
auto* input_transpose_node3 = context.graph_view->GetNode(
"identity_n-2-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node3, nullptr);
auto* input_transpose_node4 = context.graph_view->GetNode(
"identity_n-3-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node4, nullptr);
auto* in_node = context.graph_view->GetNode("identity_n");
ASSERT_NE(in_node, nullptr);
ASSERT_EQ(in_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(in_node, 0, "conv2d_1", 0);
VerifyRegularFaninMatch(in_node, 1, input_transpose_node2->GetName(), 0);
VerifyRegularFaninMatch(in_node, 2, "a", 0);
VerifyRegularFaninMatch(in_node, 3, "b", 0);
auto* output_transpose_node1 = context.graph_view->GetNode(
"identity_n-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node1, nullptr);
auto* output_transpose_node2 = context.graph_view->GetNode(
"identity_n-1-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node2, nullptr);
ASSERT_EQ(output_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node2, 0, in_node->GetName(), 1);
auto* output_transpose_node3 = context.graph_view->GetNode(
"identity_n-2-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node3, nullptr);
auto* output_transpose_node4 = context.graph_view->GetNode(
"identity_n-3-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node4, nullptr);
auto* conv2d_1_output_node = context.graph_view->GetNode("conv2d_1_output");
ASSERT_NE(conv2d_1_output_node, nullptr);
ASSERT_EQ(conv2d_1_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(conv2d_1_output_node, 0, in_node->GetName(), 0);
auto* conv2d_2_output_node = context.graph_view->GetNode("conv2d_2_output");
ASSERT_NE(conv2d_2_output_node, nullptr);
ASSERT_EQ(conv2d_2_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(conv2d_2_output_node, 0,
output_transpose_node2->GetName(), 0);
auto* a_output_node = context.graph_view->GetNode("a_output");
ASSERT_NE(a_output_node, nullptr);
ASSERT_EQ(a_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(a_output_node, 0, in_node->GetName(), 2);
auto* b_output_node = context.graph_view->GetNode("b_output");
ASSERT_NE(b_output_node, nullptr);
ASSERT_EQ(b_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(b_output_node, 0, in_node->GetName(), 3);
}
TEST_F(TransposerTest, MergeTransposerTestMergeBothInputsConvertible) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
Output i1 = ops::Identity(scope.WithOpName("i1"), conv2d);
auto merge = ops::Merge(scope.WithOpName("merge").WithDevice("/device:GPU:0"),
{conv2d, i1});
auto i2 = ops::Identity(scope.WithOpName("i2"), merge.output);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
MergeTransposer merge_transposer;
auto* m = context.graph_view->GetNode("merge");
ASSERT_NE(m, nullptr);
TF_ASSERT_OK(merge_transposer.TransposeNode(&context, m));
auto* input_transpose_node1 = context.graph_view->GetNode(
"merge-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-1-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"merge-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "i1", 0);
auto* m_node = context.graph_view->GetNode("merge");
ASSERT_NE(m_node, nullptr);
ASSERT_EQ(m_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(m_node, 0, input_transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(m_node, 1, input_transpose_node2->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"merge-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, m_node->GetName(), 0);
auto* output_node = context.graph_view->GetNode("i2");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, MergeTransposerTestMergeOneInputNotConvertible) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto tensor_4d =
ops::Const(scope.WithOpName("tensor_4d"), 3.0f, {1, 1, 1, 3});
auto merge = ops::Merge(scope.WithOpName("merge").WithDevice("/device:GPU:0"),
{conv2d, tensor_4d});
auto i2 = ops::Identity(scope.WithOpName("i2"), merge.output);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
MergeTransposer merge_transposer;
auto* m = context.graph_view->GetNode("merge");
ASSERT_NE(m, nullptr);
TF_ASSERT_OK(merge_transposer.TransposeNode(&context, m));
auto* input_transpose_node1 = context.graph_view->GetNode(
"merge-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"merge-1-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node2, nullptr);
auto* m_node = context.graph_view->GetNode("merge");
ASSERT_NE(m_node, nullptr);
ASSERT_EQ(m_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(m_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
VerifyRegularFaninMatch(m_node, 1, "tensor_4d", 0);
auto* output_transpose_node = context.graph_view->GetNode(
"merge-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* output_node = context.graph_view->GetNode("i2");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, m_node->GetName(), 0);
}
TEST_F(TransposerTest, PadTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto c = ops::Const(scope.WithOpName("c"), {1, 2, 3, 4, 5, 6, 7, 8}, {4, 2});
auto p =
ops::Pad(scope.WithOpName("p").WithDevice("/device:GPU:0"), conv2d, c);
auto o = ops::Identity(scope.WithOpName("o"), p);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
PadTransposer pad_transposer;
auto* pad = context.graph_view->GetNode("p");
ASSERT_NE(pad, nullptr);
TF_ASSERT_OK(pad_transposer.TransposeNode(&context, pad));
auto* input_transpose_node =
context.graph_view->GetNode("p-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* padding_transpose_node = context.graph_view->GetNode(
"p-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(padding_transpose_node, nullptr);
ASSERT_EQ(padding_transpose_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(padding_transpose_node, 0, "c", 0);
auto* pad_node = context.graph_view->GetNode("p");
ASSERT_NE(pad_node, nullptr);
ASSERT_EQ(pad_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(pad_node, 0, input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(pad_node, 1, padding_transpose_node->GetName(), 0);
auto* output_transpose_node =
context.graph_view->GetNode("p-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, pad_node->GetName(), 0);
auto* output_node = context.graph_view->GetNode("o");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
}
TEST_F(TransposerTest, SwitchTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
ops::Variable ctrl(scope.WithOpName("ctrl"), {}, DT_BOOL);
auto sw = ops::Switch(scope.WithOpName("switch").WithDevice("/device:GPU:0"),
conv2d, ctrl);
auto i1 = ops::Identity(scope.WithOpName("i1"), sw.output_false);
auto i2 = ops::Identity(scope.WithOpName("i2"), sw.output_true);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SwitchTransposer switch_transposer;
auto* sw_node = context.graph_view->GetNode("switch");
ASSERT_NE(sw_node, nullptr);
TF_ASSERT_OK(switch_transposer.TransposeNode(&context, sw_node));
auto* input_transpose_node = context.graph_view->GetNode(
"switch-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* switch_node = context.graph_view->GetNode("switch");
ASSERT_NE(switch_node, nullptr);
ASSERT_EQ(switch_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(switch_node, 0, input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(switch_node, 1, "ctrl", 0);
auto* output_transpose_node1 = context.graph_view->GetNode(
"switch-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node1, nullptr);
ASSERT_EQ(output_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node1, 0, switch_node->GetName(), 0);
auto* output_transpose_node2 = context.graph_view->GetNode(
"switch-1-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node2, nullptr);
ASSERT_EQ(output_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node2, 0, switch_node->GetName(), 1);
auto* i1_node = context.graph_view->GetNode("i1");
ASSERT_NE(i1_node, nullptr);
ASSERT_EQ(i1_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(i1_node, 0, output_transpose_node1->GetName(), 0);
auto* i2_node = context.graph_view->GetNode("i2");
ASSERT_NE(i2_node, nullptr);
ASSERT_EQ(i2_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(i2_node, 0, output_transpose_node2->GetName(), 0);
}
TEST_F(TransposerTest, TernaryOpTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto b = ops::RandomUniform(scope.WithOpName("b"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto beta_inc = ops::Betainc(
scope.WithOpName("beta_inc").WithDevice("/device:GPU:0"), a, b, conv2d);
auto z = ops::Identity(scope.WithOpName("z"), beta_inc);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
TernaryOpTransposer ternary_op_transposer;
auto* bi = context.graph_view->GetNode("beta_inc");
ASSERT_NE(bi, nullptr);
TF_ASSERT_OK(ternary_op_transposer.TransposeNode(&context, bi));
auto* input_transpose_node1 = context.graph_view->GetNode(
"beta_inc-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0, "a", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"beta_inc-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "b", 0);
auto* input_transpose_node3 = context.graph_view->GetNode(
"beta_inc-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node3, nullptr);
ASSERT_EQ(input_transpose_node3->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node3, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* bi_node = context.graph_view->GetNode("beta_inc");
ASSERT_NE(bi_node, nullptr);
ASSERT_EQ(bi_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(bi_node, 0, input_transpose_node1->GetName(), 0);
VerifyRegularFaninMatch(bi_node, 1, input_transpose_node2->GetName(), 0);
VerifyRegularFaninMatch(bi_node, 2, input_transpose_node3->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"beta_inc-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, bi_node->GetName(), 0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, UnaryGradTransposerTestTanhGrad) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto tanh_grad_op = ops::internal::TanhGrad(
scope.WithOpName("tanh_grad").WithDevice("/device:GPU:0"), conv2d, a);
auto z = ops::Identity(scope.WithOpName("z"), tanh_grad_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
UnaryGradTransposer unary_grad_transposer;
auto* tanh_grad = context.graph_view->GetNode("tanh_grad");
ASSERT_NE(tanh_grad, nullptr);
TF_ASSERT_OK(unary_grad_transposer.TransposeNode(&context, tanh_grad));
auto* input_transpose_node1 = context.graph_view->GetNode(
"tanh_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"tanh_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "a", 0);
auto* tanh_grad_node = context.graph_view->GetNode("tanh_grad");
ASSERT_NE(tanh_grad_node, nullptr);
ASSERT_EQ(tanh_grad_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(tanh_grad_node, 0, input_transpose_node1->GetName(),
0);
VerifyRegularFaninMatch(tanh_grad_node, 1, input_transpose_node2->GetName(),
0);
auto* output_transpose_node = context.graph_view->GetNode(
"tanh_grad-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, tanh_grad_node->GetName(),
0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, UnaryGradTransposerTestRelu6Grad) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope);
auto a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto relu6_grad_op = ops::internal::SigmoidGrad(
scope.WithOpName("relu6_grad").WithDevice("/device:GPU:0"), conv2d, a);
auto z = ops::Identity(scope.WithOpName("z"), relu6_grad_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
UnaryGradTransposer unary_grad_transposer;
auto* relu6_grad = context.graph_view->GetNode("relu6_grad");
ASSERT_NE(relu6_grad, nullptr);
TF_ASSERT_OK(unary_grad_transposer.TransposeNode(&context, relu6_grad));
auto* input_transpose_node1 = context.graph_view->GetNode(
"relu6_grad-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* input_transpose_node2 = context.graph_view->GetNode(
"relu6_grad-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
ASSERT_EQ(input_transpose_node2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node2, 0, "a", 0);
auto* relu6_grad_node = context.graph_view->GetNode("relu6_grad");
ASSERT_NE(relu6_grad_node, nullptr);
ASSERT_EQ(relu6_grad_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(relu6_grad_node, 0, input_transpose_node1->GetName(),
0);
VerifyRegularFaninMatch(relu6_grad_node, 1, input_transpose_node2->GetName(),
0);
auto* output_transpose_node = context.graph_view->GetNode(
"relu6_grad-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, relu6_grad_node->GetName(),
0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, SqueezeTransposerTest) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {32, 1, 1, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 16}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op = ops::Squeeze(
scope.WithOpName("squeeze").WithDevice("/device:GPU:0"), conv2d);
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* squeeze_node = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze_node, nullptr);
ASSERT_EQ(squeeze_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(squeeze_node, 0, input_transpose_node1->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"squeeze-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, squeeze_node->GetName(), 0);
}
TEST_F(TransposerTest, SqueezeTransposerTestUnsupportedInputShape) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {32, 5, 5, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {5, 5, 8, 16}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op = ops::Squeeze(
scope.WithOpName("squeeze").WithDevice("/device:GPU:0"), conv2d);
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
}
TEST_F(TransposerTest, SqueezeTransposerTestInvalidHWAxis) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {32, 1, 1, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 16}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op =
ops::Squeeze(scope.WithOpName("squeeze").WithDevice("/device:GPU:0"),
conv2d, ops::Squeeze::Attrs().Axis({1}));
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
}
TEST_F(TransposerTest, SqueezeTransposerTestInvalidNHWAxis) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {32, 1, 1, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 1}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op =
ops::Squeeze(scope.WithOpName("squeeze").WithDevice("/device:GPU:0"),
conv2d, ops::Squeeze::Attrs().Axis({1, 2, 3}));
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(input_transpose_node1, nullptr);
}
TEST_F(TransposerTest, SqueezeTransposerTestSqueezeDimsUpdated) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {1, 1, 1, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 1}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op =
ops::Squeeze(scope.WithOpName("squeeze").WithDevice("/device:GPU:0"),
conv2d, ops::Squeeze::Attrs().Axis({1, 2}));
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* squeeze_node = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze_node, nullptr);
ASSERT_EQ(squeeze_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(squeeze_node, 0, input_transpose_node1->GetName(), 0);
const auto* squeeze_dims_attr = squeeze_node->GetAttr("squeeze_dims");
const auto& list = squeeze_dims_attr->list();
ASSERT_EQ(list.i_size(), 2);
EXPECT_EQ(list.i(0), 2);
EXPECT_EQ(list.i(1), 3);
auto* output_transpose_node = context.graph_view->GetNode(
"squeeze-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, squeeze_node->GetName(), 0);
}
TEST_F(TransposerTest, SqueezeTransposerTestNegativeSqueezeDimsUpdated) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {1, 1, 1, 8}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 1}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto squeeze_op =
ops::Squeeze(scope.WithOpName("squeeze").WithDevice("/device:GPU:0"),
conv2d, ops::Squeeze::Attrs().Axis({-3, -2}));
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* squeeze_node = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze_node, nullptr);
ASSERT_EQ(squeeze_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(squeeze_node, 0, input_transpose_node1->GetName(), 0);
const auto* squeeze_dims_attr = squeeze_node->GetAttr("squeeze_dims");
const auto& list = squeeze_dims_attr->list();
ASSERT_EQ(list.i_size(), 2);
EXPECT_EQ(list.i(0), 2);
EXPECT_EQ(list.i(1), 3);
auto* output_transpose_node = context.graph_view->GetNode(
"squeeze-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, squeeze_node->GetName(), 0);
}
TEST_F(TransposerTest, SqueezeTransposerTestNCHWToNHWCSqueezeDimsUpdated) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"), {1, 8, 1, 1}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"), {1, 1, 8, 1}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 1, 1, 1}, "SAME", ops::Conv2D::DataFormat(kDstFormat));
auto squeeze_op =
ops::Squeeze(scope.WithOpName("squeeze").WithDevice("/device:GPU:0"),
conv2d, ops::Squeeze::Attrs().Axis({2, 3}));
auto z = ops::Identity(scope.WithOpName("z"), squeeze_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kDstFormat, kSrcFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SqueezeTransposer squeeze_transposer;
auto* squeeze = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze, nullptr);
TF_ASSERT_OK(squeeze_transposer.TransposeNode(&context, squeeze));
auto* input_transpose_node1 = context.graph_view->GetNode(
"squeeze-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNHWCToNCHW-LayoutOptimizer", 0);
auto* squeeze_node = context.graph_view->GetNode("squeeze");
ASSERT_NE(squeeze_node, nullptr);
ASSERT_EQ(squeeze_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(squeeze_node, 0, input_transpose_node1->GetName(), 0);
const auto* squeeze_dims_attr = squeeze_node->GetAttr("squeeze_dims");
const auto& list = squeeze_dims_attr->list();
ASSERT_EQ(list.i_size(), 2);
EXPECT_EQ(list.i(0), 1);
EXPECT_EQ(list.i(1), 2);
auto* output_transpose_node = context.graph_view->GetNode(
"squeeze-0-0-TransposeNHWCToNCHW-LayoutOptimizer");
EXPECT_EQ(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, squeeze_node->GetName(), 0);
}
TEST_F(TransposerTest, MaxPoolV2Transposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kWidth, kHeight, kDepthIn}, DT_FLOAT);
auto ksize = ops::Const(scope.WithOpName("ksize"), {1, kKernel, kKernel, 1});
auto strides =
ops::Const(scope.WithOpName("strides"), {1, kKernel, kKernel, 1});
auto maxpool_op =
ops::MaxPoolV2(scope.WithOpName("maxpoolv2").WithDevice("/device:GPU:0"),
input, ksize, strides, "VALID");
auto z = ops::Identity(scope.WithOpName("z"), maxpool_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
MaxPoolV2Transposer maxpool_transposer;
auto* maxpool = context.graph_view->GetNode("maxpoolv2");
ASSERT_NE(maxpool, nullptr);
TF_ASSERT_OK(maxpool_transposer.TransposeNode(&context, maxpool));
auto* input_transpose_node1 = context.graph_view->GetNode(
"maxpoolv2-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
auto* input_transpose_node2 = context.graph_view->GetNode(
"maxpoolv2-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node2, nullptr);
auto* input_transpose_node3 = context.graph_view->GetNode(
"maxpoolv2-2-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node3, nullptr);
auto* updated_maxpool = context.graph_view->GetNode("maxpoolv2");
ASSERT_NE(updated_maxpool, nullptr);
ASSERT_EQ(updated_maxpool->NumRegularFanins(), 3);
VerifyRegularFaninMatch(updated_maxpool, 0, input_transpose_node1->GetName(),
0);
VerifyRegularFaninMatch(updated_maxpool, 1, input_transpose_node2->GetName(),
0);
VerifyRegularFaninMatch(updated_maxpool, 2, input_transpose_node3->GetName(),
0);
auto* output_transpose_node = context.graph_view->GetNode(
"maxpoolv2-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, MaxPoolGradV2Transposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
for (bool use_grad_grad : {false, true}) {
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto orig_input =
ops::RandomUniform(scope.WithOpName("orig_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto orig_output =
ops::RandomUniform(scope.WithOpName("orig_output"),
{kBatchSize, use_grad_grad ? kOutHeight : kHeight,
use_grad_grad ? kOutWidth : kWidth, kDepthIn},
DT_FLOAT);
auto grad =
ops::RandomUniform(scope.WithOpName("grad_input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto ksize =
ops::Const(scope.WithOpName("ksize"), {1, kKernel, kKernel, 1});
auto strides =
ops::Const(scope.WithOpName("strides"), {1, kKernel, kKernel, 1});
Output maxpoolgrad_op;
if (use_grad_grad) {
maxpoolgrad_op = ops::MaxPoolGradGradV2(
scope.WithOpName("maxpoolgradv2").WithDevice("/device:GPU:0"),
orig_input, orig_output, grad, ksize, strides, "VALID");
} else {
maxpoolgrad_op = ops::MaxPoolGradV2(
scope.WithOpName("maxpoolgradv2").WithDevice("/device:GPU:0"),
orig_input, orig_output, grad, ksize, strides, "VALID");
}
auto z = ops::Identity(scope.WithOpName("z"), maxpoolgrad_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
MaxPoolGradV2Transposer maxpoolgrad_transposer;
auto* maxpoolgrad = context.graph_view->GetNode("maxpoolgradv2");
ASSERT_NE(maxpoolgrad, nullptr);
TF_ASSERT_OK(maxpoolgrad_transposer.TransposeNode(&context, maxpoolgrad));
auto* orig_input_transpose_node = context.graph_view->GetNode(
"maxpoolgradv2-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(orig_input_transpose_node, nullptr);
auto* orig_output_transpose_node = context.graph_view->GetNode(
"maxpoolgradv2-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(orig_output_transpose_node, nullptr);
auto* grad_input_transpose_node = context.graph_view->GetNode(
"maxpoolgradv2-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(grad_input_transpose_node, nullptr);
auto* size_node = context.graph_view->GetNode(
"maxpoolgradv2-3-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(size_node, nullptr);
auto* stride_node = context.graph_view->GetNode(
"maxpoolgradv2-4-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(stride_node, nullptr);
auto* updated_maxpoolgrad = context.graph_view->GetNode("maxpoolgradv2");
ASSERT_NE(updated_maxpoolgrad, nullptr);
ASSERT_EQ(updated_maxpoolgrad->NumRegularFanins(), 5);
VerifyRegularFaninMatch(updated_maxpoolgrad, 0,
orig_input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpoolgrad, 1,
orig_output_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpoolgrad, 2,
grad_input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpoolgrad, 3, size_node->GetName(), 0);
VerifyRegularFaninMatch(updated_maxpoolgrad, 4, stride_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"maxpoolgradv2-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
}
TEST_F(TransposerTest, BinaryOpTransposerAdd) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto a = ops::RandomUniform(scope.WithOpName("a"), {1}, DT_FLOAT);
auto add =
ops::Add(scope.WithOpName("Add").WithDevice("/device:GPU:0"), a, conv2d);
auto z = ops::Identity(scope.WithOpName("z"), add);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
auto* addop = context.graph_view->GetNode("Add");
ASSERT_NE(addop, nullptr);
BinaryOpTransposer binaryop_transposer;
TF_ASSERT_OK(binaryop_transposer.TransposeNode(&context, addop));
auto* input_const_node =
context.graph_view->GetNode("Add-0-ReshapeConst-LayoutOptimizer");
ASSERT_NE(input_const_node, nullptr);
EXPECT_EQ(input_const_node->NumRegularFanins(), 0);
auto* input_reshape_node =
context.graph_view->GetNode("Add-0-ReshapeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_reshape_node, nullptr);
ASSERT_EQ(input_reshape_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_reshape_node, 0, "a", 0);
VerifyRegularFaninMatch(input_reshape_node, 1, input_const_node->GetName(),
0);
auto* input_transpose_node =
context.graph_view->GetNode("Add-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* updated_add = context.graph_view->GetNode("Add");
ASSERT_NE(updated_add, nullptr);
ASSERT_EQ(updated_add->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_add, 0, input_reshape_node->GetName(), 0);
VerifyRegularFaninMatch(updated_add, 1, input_transpose_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"Add-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, BinaryOpTransposerMul) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto a = ops::RandomUniform(scope.WithOpName("a"), {1}, DT_FLOAT);
auto mul =
ops::Mul(scope.WithOpName("Mul").WithDevice("/device:GPU:0"), conv2d, a);
auto z = ops::Identity(scope.WithOpName("z"), mul);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
auto* mulop = context.graph_view->GetNode("Mul");
ASSERT_NE(mulop, nullptr);
BinaryOpTransposer binaryop_transposer;
TF_ASSERT_OK(binaryop_transposer.TransposeNode(&context, mulop));
auto* input_const_node =
context.graph_view->GetNode("Mul-1-ReshapeConst-LayoutOptimizer");
ASSERT_NE(input_const_node, nullptr);
EXPECT_EQ(input_const_node->NumRegularFanins(), 0);
auto* input_reshape_node =
context.graph_view->GetNode("Mul-1-ReshapeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_reshape_node, nullptr);
ASSERT_EQ(input_reshape_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_reshape_node, 0, "a", 0);
VerifyRegularFaninMatch(input_reshape_node, 1, input_const_node->GetName(),
0);
auto* input_transpose_node =
context.graph_view->GetNode("Mul-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* updated_mul = context.graph_view->GetNode("Mul");
ASSERT_NE(updated_mul, nullptr);
ASSERT_EQ(updated_mul->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_mul, 1, input_reshape_node->GetName(), 0);
VerifyRegularFaninMatch(updated_mul, 0, input_transpose_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"Mul-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, BinaryOpTransposerPolygamma) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
auto conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto a = ops::RandomUniform(scope.WithOpName("a"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto polygamma = ops::Polygamma(
scope.WithOpName("polygamma").WithDevice("/device:GPU:0"), conv2d, a);
auto z = ops::Identity(scope.WithOpName("z"), polygamma);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
BinaryOpTransposer binaryop_transposer;
auto* polygamma_op = context.graph_view->GetNode("polygamma");
ASSERT_NE(polygamma_op, nullptr);
TF_ASSERT_OK(binaryop_transposer.TransposeNode(&context, polygamma_op));
auto* input_transpose_node1 = context.graph_view->GetNode(
"polygamma-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node1, nullptr);
ASSERT_EQ(input_transpose_node1->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node1, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* updated_polygamma = context.graph_view->GetNode("polygamma");
ASSERT_NE(updated_polygamma, nullptr);
ASSERT_EQ(updated_polygamma->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_polygamma, 0,
input_transpose_node1->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"polygamma-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
bool CreateConcatV1Op(const Scope& scope, const InputList& tensors,
const Input& concat_axis, Output* output) {
if (!scope.ok()) {
return false;
}
auto values = ops::AsNodeOutList(scope, tensors);
if (!scope.ok()) {
return false;
}
auto axis = ops::AsNodeOut(scope, concat_axis);
if (!scope.ok()) {
return false;
}
Node* ret;
const auto unique_name = scope.GetUniqueNameForOp("Concat");
auto builder = NodeBuilder(unique_name, "Concat").Input(axis).Input(values);
scope.UpdateBuilder(&builder);
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) {
return false;
}
scope.UpdateStatus(scope.DoShapeInference(ret));
*output = Output(ret, 0);
return true;
}
TEST_F(TransposerTest, ConcatOpTransposerConcat) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
Output input_1 = ops::RandomUniform(scope.WithOpName("input_1"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output input_2 = ops::RandomUniform(scope.WithOpName("input_2"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const(scope.WithOpName("axis"), 2, {});
Output concat_op;
ASSERT_TRUE(
CreateConcatV1Op(scope.WithOpName("concat").WithDevice("/device:GPU:0"),
{input_1, input_2, conv2d}, axis, &concat_op));
auto z = ops::Identity(scope.WithOpName("z"), concat_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ConcatOpTransposer concat_transposer;
auto* concat = context.graph_view->GetNode("concat");
ASSERT_NE(concat, nullptr);
TF_ASSERT_OK(concat_transposer.TransposeNode(&context, concat));
auto* conv2d_transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(conv2d_transpose_node, nullptr);
auto* conv2d_concat_input_node = context.graph_view->GetNode(
"concat-3-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(conv2d_concat_input_node, nullptr);
ASSERT_EQ(conv2d_concat_input_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_concat_input_node, 0,
conv2d_transpose_node->GetName(), 0);
auto* axis_dim_node = context.graph_view->GetNode(
"concat-0-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_dim_node, nullptr);
auto* updated_concat = context.graph_view->GetNode("concat");
ASSERT_NE(updated_concat, nullptr);
ASSERT_EQ(updated_concat->NumRegularFanins(), 4);
VerifyRegularFaninMatch(updated_concat, 0, axis_dim_node->GetName(), 0);
VerifyRegularFaninMatch(updated_concat, 1,
"concat-1-TransposeNHWCToNCHW-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_concat, 2,
"concat-2-TransposeNHWCToNCHW-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_concat, 3,
conv2d_concat_input_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"concat-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, ConcatOpTransposerConcatV2) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
Output input_1 = ops::RandomUniform(scope.WithOpName("input_1"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
Output input_2 = ops::RandomUniform(scope.WithOpName("input_2"),
{kBatchSize, 5, 3, kDepthOut}, DT_FLOAT);
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const(scope.WithOpName("axis"), 2, {});
auto concat_op =
ops::Concat(scope.WithOpName("concat").WithDevice("/device:GPU:0"),
{input_1, input_2, conv2d}, axis);
auto z = ops::Identity(scope.WithOpName("z"), concat_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ConcatOpTransposer concat_transposer;
auto* concat = context.graph_view->GetNode("concat");
ASSERT_NE(concat, nullptr);
TF_ASSERT_OK(concat_transposer.TransposeNode(&context, concat));
auto* conv2d_transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(conv2d_transpose_node, nullptr);
auto* conv2d_concat_input_node = context.graph_view->GetNode(
"concat-2-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(conv2d_concat_input_node, nullptr);
ASSERT_EQ(conv2d_concat_input_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_concat_input_node, 0,
conv2d_transpose_node->GetName(), 0);
auto* axis_dim_node = context.graph_view->GetNode(
"concat-3-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_dim_node, nullptr);
auto* updated_concat = context.graph_view->GetNode("concat");
ASSERT_NE(updated_concat, nullptr);
ASSERT_EQ(updated_concat->NumRegularFanins(), 4);
VerifyRegularFaninMatch(updated_concat, 0,
"concat-0-TransposeNHWCToNCHW-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_concat, 1,
"concat-1-TransposeNHWCToNCHW-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_concat, 2,
conv2d_concat_input_node->GetName(), 0);
VerifyRegularFaninMatch(updated_concat, 3, axis_dim_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"concat-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, ReverseV2Transposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const(scope.WithOpName("axis"), {0, 3}, {2});
auto reverse_op = ops::Reverse(
scope.WithOpName("reverse_v2").WithDevice("/device:GPU:0"), conv2d, axis);
auto z = ops::Identity(scope.WithOpName("z"), reverse_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ReverseV2Transposer reverse_v2_transposer;
auto* reverse_v2 = context.graph_view->GetNode("reverse_v2");
ASSERT_NE(reverse_v2, nullptr);
TF_ASSERT_OK(reverse_v2_transposer.TransposeNode(&context, reverse_v2));
auto* input_transpose_node = context.graph_view->GetNode(
"reverse_v2-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* axis_node = context.graph_view->GetNode(
"reverse_v2-1-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* updated_reverse_v2 = context.graph_view->GetNode("reverse_v2");
ASSERT_NE(updated_reverse_v2, nullptr);
ASSERT_EQ(updated_reverse_v2->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_reverse_v2, 0,
input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_reverse_v2, 1, axis_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"reverse_v2-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, TileTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto multiple = ops::Const(scope.WithOpName("multiple"), {1, 1, 2, 3}, {4});
auto tile_op = ops::Tile(scope.WithOpName("tile").WithDevice("/device:GPU:0"),
conv2d, multiple);
auto z = ops::Identity(scope.WithOpName("z"), tile_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
TileTransposer tile_transposer;
auto* tile = context.graph_view->GetNode("tile");
ASSERT_NE(tile, nullptr);
TF_ASSERT_OK(tile_transposer.TransposeNode(&context, tile));
auto* input_transpose_node =
context.graph_view->GetNode("tile-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* multiple_node = context.graph_view->GetNode(
"tile-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(multiple_node, nullptr);
ASSERT_EQ(multiple_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(multiple_node, 0, "multiple", 0);
auto* updated_tile = context.graph_view->GetNode("tile");
ASSERT_NE(updated_tile, nullptr);
ASSERT_EQ(updated_tile->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_tile, 0, input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_tile, 1, multiple_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"tile-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, ShapeTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto shape =
ops::Shape(scope.WithOpName("shape").WithDevice("/device:GPU:0"), conv2d);
auto z = ops::Identity(scope.WithOpName("z"), shape);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
ShapeTransposer shape_transposer;
auto* shape_node = context.graph_view->GetNode("shape");
ASSERT_NE(shape_node, nullptr);
TF_ASSERT_OK(shape_transposer.TransposeNode(&context, shape_node));
auto* conv2d_transpose_node = context.graph_view->GetNode(
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(conv2d_transpose_node, nullptr);
auto* shape_input_node = context.graph_view->GetNode(
"shape-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(shape_input_node, nullptr);
ASSERT_EQ(shape_input_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(shape_input_node, 0, conv2d_transpose_node->GetName(),
0);
auto* output_vec_perm_node = context.graph_view->GetNode(
"shape-0-0-DataFormatVecPermuteNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_vec_perm_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_vec_perm_node->GetName(), 0);
}
TEST_F(TransposerTest, ShapeNTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d_1 = ops::Conv2D(
scope.WithOpName("conv2d_1").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
Output conv2d_2 = ops::Conv2D(
scope.WithOpName("conv2d_2").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
Output conv2d_3 = ops::Conv2D(
scope.WithOpName("conv2d_3").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto shape =
ops::ShapeN(scope.WithOpName("shape").WithDevice("/device:GPU:0"),
{conv2d_1, conv2d_2, conv2d_3});
auto z_1 = ops::Identity(scope.WithOpName("z_1"), shape.output[0]);
auto z_2 = ops::Identity(scope.WithOpName("z_2"), shape.output[1]);
auto z_3 = ops::Identity(scope.WithOpName("z_3"), shape.output[2]);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d_1 = context.graph_view->GetNode("conv2d_1");
ASSERT_NE(c2d_1, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d_1));
auto* c2d_2 = context.graph_view->GetNode("conv2d_2");
ASSERT_NE(c2d_2, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d_2));
ShapeNTransposer shape_transposer;
auto* shape_node = context.graph_view->GetNode("shape");
ASSERT_NE(shape_node, nullptr);
TF_ASSERT_OK(shape_transposer.TransposeNode(&context, shape_node));
auto* conv2d_1_transpose_node = context.graph_view->GetNode(
"conv2d_1-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(conv2d_1_transpose_node, nullptr);
auto* conv2d_2_transpose_node = context.graph_view->GetNode(
"conv2d_2-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(conv2d_2_transpose_node, nullptr);
auto* shape_input_1_node = context.graph_view->GetNode(
"shape-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(shape_input_1_node, nullptr);
ASSERT_EQ(shape_input_1_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(shape_input_1_node, 0,
conv2d_1_transpose_node->GetName(), 0);
auto* shape_input_2_node = context.graph_view->GetNode(
"shape-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(shape_input_2_node, nullptr);
ASSERT_EQ(shape_input_2_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(shape_input_2_node, 0,
conv2d_2_transpose_node->GetName(), 0);
auto* updated_shape_node = context.graph_view->GetNode("shape");
ASSERT_NE(updated_shape_node, nullptr);
ASSERT_EQ(updated_shape_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(updated_shape_node, 0, shape_input_1_node->GetName(),
0);
VerifyRegularFaninMatch(updated_shape_node, 1, shape_input_2_node->GetName(),
0);
VerifyRegularFaninMatch(updated_shape_node, 2, "conv2d_3", 0);
auto* output_vec_perm_node_1 = context.graph_view->GetNode(
"shape-0-0-DataFormatVecPermuteNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_vec_perm_node_1, nullptr);
auto* output_vec_perm_node_2 = context.graph_view->GetNode(
"shape-1-0-DataFormatVecPermuteNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_vec_perm_node_2, nullptr);
auto* z_output_node_1 = context.graph_view->GetNode("z_1");
ASSERT_NE(z_output_node_1, nullptr);
ASSERT_EQ(z_output_node_1->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_1, 0, output_vec_perm_node_1->GetName(),
0);
auto* z_output_node_2 = context.graph_view->GetNode("z_2");
ASSERT_NE(z_output_node_2, nullptr);
ASSERT_EQ(z_output_node_2->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_2, 0, output_vec_perm_node_2->GetName(),
0);
auto* z_output_node_3 = context.graph_view->GetNode("z_3");
ASSERT_NE(z_output_node_3, nullptr);
ASSERT_EQ(z_output_node_3->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_3, 0, updated_shape_node->GetName(), 2);
}
TEST_F(TransposerTest, FillOpTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto shape = ops::Shape(scope.WithOpName("conv2d"), conv2d);
auto value = ops::Const(scope.WithOpName("value"), 0, {});
auto fill = ops::Fill(scope.WithOpName("fill").WithDevice("/device:GPU:0"),
shape, value);
auto z = ops::Identity(scope.WithOpName("z"), fill);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
FillOpTransposer fill_op_transposer;
auto* fill_node = context.graph_view->GetNode("fill");
ASSERT_NE(fill_node, nullptr);
TF_ASSERT_OK(fill_op_transposer.TransposeNode(&context, fill_node));
auto* input_node = context.graph_view->GetNode(
"fill-0-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_node, nullptr);
auto* updated_fill_node = context.graph_view->GetNode("fill");
ASSERT_NE(updated_fill_node, nullptr);
ASSERT_EQ(updated_fill_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_fill_node, 0, input_node->GetName(), 0);
VerifyRegularFaninMatch(updated_fill_node, 1, "value", 0);
auto* output_node = context.graph_view->GetNode(
"fill-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_node, 0, updated_fill_node->GetName(), 0);
auto* z_node = context.graph_view->GetNode("z");
ASSERT_NE(z_node, nullptr);
ASSERT_EQ(z_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_node, 0, output_node->GetName(), 0);
}
TEST_F(TransposerTest, SliceTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto begin = ops::Const(scope.WithOpName("begin"), {0, 0, 2, 1}, {4});
auto size = ops::Const(scope.WithOpName("size"), {1, 1, 2, 3}, {4});
auto slice_op =
ops::Slice(scope.WithOpName("slice").WithDevice("/device:GPU:0"), conv2d,
begin, size);
auto z = ops::Identity(scope.WithOpName("z"), slice_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SliceTransposer slice_transposer;
auto* slice = context.graph_view->GetNode("slice");
ASSERT_NE(slice, nullptr);
TF_ASSERT_OK(slice_transposer.TransposeNode(&context, slice));
auto* input_transpose_node = context.graph_view->GetNode(
"slice-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* begin_node = context.graph_view->GetNode(
"slice-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(begin_node, nullptr);
ASSERT_EQ(begin_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(begin_node, 0, "begin", 0);
auto* size_node = context.graph_view->GetNode(
"slice-2-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(size_node, nullptr);
ASSERT_EQ(size_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(size_node, 0, "size", 0);
auto* updated_slice_node = context.graph_view->GetNode("slice");
ASSERT_NE(updated_slice_node, nullptr);
ASSERT_EQ(updated_slice_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(updated_slice_node, 0,
input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_slice_node, 1, begin_node->GetName(), 0);
VerifyRegularFaninMatch(updated_slice_node, 2, size_node->GetName(), 0);
auto* output_transpose_node = context.graph_view->GetNode(
"slice-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, SplitTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const(scope.WithOpName("axis"), 2, {});
auto split_op = ops::Split(
scope.WithOpName("split").WithDevice("/device:GPU:0"), axis, conv2d, 3);
auto z_1 = ops::Identity(scope.WithOpName("z_1"), split_op.output[0]);
auto z_2 = ops::Identity(scope.WithOpName("z_2"), split_op.output[1]);
auto z_3 = ops::Identity(scope.WithOpName("z_3"), split_op.output[2]);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SplitTransposer split_transposer;
auto* split = context.graph_view->GetNode("split");
ASSERT_NE(split, nullptr);
TF_ASSERT_OK(split_transposer.TransposeNode(&context, split));
auto* input_transpose_node = context.graph_view->GetNode(
"split-1-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* axis_node = context.graph_view->GetNode(
"split-0-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* updated_split_node = context.graph_view->GetNode("split");
ASSERT_NE(updated_split_node, nullptr);
ASSERT_EQ(updated_split_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(updated_split_node, 0, axis_node->GetName(), 0);
VerifyRegularFaninMatch(updated_split_node, 1,
input_transpose_node->GetName(), 0);
auto* output_transpose_node_1 = context.graph_view->GetNode(
"split-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_1, nullptr);
auto* output_transpose_node_2 = context.graph_view->GetNode(
"split-1-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_2, nullptr);
auto* output_transpose_node_3 = context.graph_view->GetNode(
"split-2-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_3, nullptr);
auto* z_output_node_1 = context.graph_view->GetNode("z_1");
ASSERT_NE(z_output_node_1, nullptr);
ASSERT_EQ(z_output_node_1->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_1, 0,
output_transpose_node_1->GetName(), 0);
auto* z_output_node_2 = context.graph_view->GetNode("z_2");
ASSERT_NE(z_output_node_2, nullptr);
ASSERT_EQ(z_output_node_2->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_2, 0,
output_transpose_node_2->GetName(), 0);
auto* z_output_node_3 = context.graph_view->GetNode("z_3");
ASSERT_NE(z_output_node_3, nullptr);
ASSERT_EQ(z_output_node_3->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_3, 0,
output_transpose_node_3->GetName(), 0);
}
TEST_F(TransposerTest, SplitVTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto axis = ops::Const(scope.WithOpName("axis"), 1, {});
auto size_splits =
ops::Const(scope.WithOpName("size_splits"), {2, 2, 1}, {3});
auto splitv_op =
ops::SplitV(scope.WithOpName("splitv").WithDevice("/device:GPU:0"),
conv2d, size_splits, axis, 3);
auto z_1 = ops::Identity(scope.WithOpName("z_1"), splitv_op.output[0]);
auto z_2 = ops::Identity(scope.WithOpName("z_2"), splitv_op.output[1]);
auto z_3 = ops::Identity(scope.WithOpName("z_3"), splitv_op.output[2]);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
SplitVTransposer splitv_transposer;
auto* splitv = context.graph_view->GetNode("splitv");
ASSERT_NE(splitv, nullptr);
TF_ASSERT_OK(splitv_transposer.TransposeNode(&context, splitv));
auto* input_transpose_node = context.graph_view->GetNode(
"splitv-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* axis_node = context.graph_view->GetNode(
"splitv-2-DataFormatDimMapNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(axis_node, nullptr);
ASSERT_EQ(axis_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(axis_node, 0, "axis", 0);
auto* updated_splitv_node = context.graph_view->GetNode("splitv");
ASSERT_NE(updated_splitv_node, nullptr);
ASSERT_EQ(updated_splitv_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(updated_splitv_node, 0,
input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_splitv_node, 1, "size_splits", 0);
VerifyRegularFaninMatch(updated_splitv_node, 2, axis_node->GetName(), 0);
auto* output_transpose_node_1 = context.graph_view->GetNode(
"splitv-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_1, nullptr);
auto* output_transpose_node_2 = context.graph_view->GetNode(
"splitv-1-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_2, nullptr);
auto* output_transpose_node_3 = context.graph_view->GetNode(
"splitv-2-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node_3, nullptr);
auto* z_output_node_1 = context.graph_view->GetNode("z_1");
ASSERT_NE(z_output_node_1, nullptr);
ASSERT_EQ(z_output_node_1->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_1, 0,
output_transpose_node_1->GetName(), 0);
auto* z_output_node_2 = context.graph_view->GetNode("z_2");
ASSERT_NE(z_output_node_2, nullptr);
ASSERT_EQ(z_output_node_2->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_2, 0,
output_transpose_node_2->GetName(), 0);
auto* z_output_node_3 = context.graph_view->GetNode("z_3");
ASSERT_NE(z_output_node_3, nullptr);
ASSERT_EQ(z_output_node_3->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node_3, 0,
output_transpose_node_3->GetName(), 0);
}
TEST_F(TransposerTest, StridedSliceTransposer) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto attrs = ops::StridedSlice::Attrs().BeginMask(0xB).EndMask(0x7);
auto begin = ops::Const(scope.WithOpName("begin"), {2, 0, 2, 1}, {4});
auto end = ops::Const(scope.WithOpName("end"), {34, 4, 3, 1}, {4});
auto strides = ops::Const(scope.WithOpName("strides"), {7, 2, 1, 1}, {4});
auto strided_slice_op = ops::StridedSlice(
scope.WithOpName("stridedslice").WithDevice("/device:GPU:0"), conv2d,
begin, end, strides, attrs);
auto z = ops::Identity(scope.WithOpName("z"), strided_slice_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
StridedSliceTransposer stridedslice_transposer;
auto* stridedslice = context.graph_view->GetNode("stridedslice");
ASSERT_NE(stridedslice, nullptr);
TF_ASSERT_OK(stridedslice_transposer.TransposeNode(&context, stridedslice));
auto* input_transpose_node = context.graph_view->GetNode(
"stridedslice-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
auto* begin_node = context.graph_view->GetNode(
"stridedslice-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(begin_node, nullptr);
auto* end_node = context.graph_view->GetNode(
"stridedslice-2-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(end_node, nullptr);
auto* strides_node = context.graph_view->GetNode(
"stridedslice-3-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_NE(strides_node, nullptr);
auto* updated_stridedslice_node = context.graph_view->GetNode("stridedslice");
ASSERT_NE(updated_stridedslice_node, nullptr);
ASSERT_EQ(updated_stridedslice_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(updated_stridedslice_node, 0,
input_transpose_node->GetName(), 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 1, begin_node->GetName(),
0);
VerifyRegularFaninMatch(updated_stridedslice_node, 2, end_node->GetName(), 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 3, strides_node->GetName(),
0);
const auto* begin_mask_attr =
updated_stridedslice_node->GetAttr("begin_mask");
ASSERT_NE(begin_mask_attr, nullptr);
EXPECT_EQ(begin_mask_attr->i(), 0x7);
const auto* end_mask_attr = updated_stridedslice_node->GetAttr("end_mask");
ASSERT_NE(end_mask_attr, nullptr);
EXPECT_EQ(end_mask_attr->i(), 0xD);
auto* output_transpose_node = context.graph_view->GetNode(
"stridedslice-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_NE(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0, output_transpose_node->GetName(),
0);
}
TEST_F(TransposerTest, StridedSliceTransposerEllipsisMaskPresent) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto attrs =
ops::StridedSlice::Attrs().BeginMask(0xB).EndMask(0x7).EllipsisMask(0x2);
auto begin = ops::Const(scope.WithOpName("begin"), {2, 0, 2, 1}, {4});
auto end = ops::Const(scope.WithOpName("end"), {34, 4, 3, 1}, {4});
auto strides = ops::Const(scope.WithOpName("strides"), {7, 2, 1, 1}, {4});
auto strided_slice_op = ops::StridedSlice(
scope.WithOpName("stridedslice").WithDevice("/device:GPU:0"), conv2d,
begin, end, strides, attrs);
auto z = ops::Identity(scope.WithOpName("z"), strided_slice_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
StridedSliceTransposer stridedslice_transposer;
auto* stridedslice = context.graph_view->GetNode("stridedslice");
ASSERT_NE(stridedslice, nullptr);
TF_ASSERT_OK(stridedslice_transposer.TransposeNode(&context, stridedslice));
auto* updated_stridedslice_node = context.graph_view->GetNode("stridedslice");
ASSERT_NE(updated_stridedslice_node, nullptr);
ASSERT_EQ(updated_stridedslice_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(updated_stridedslice_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 1, "begin", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 2, "end", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 3, "strides", 0);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0,
updated_stridedslice_node->GetName(), 0);
}
TEST_F(TransposerTest, StridedSliceTransposerConstFaninBadRank) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
GrapplerItem item;
Scope scope = Scope::NewRootScope();
auto input =
ops::RandomUniform(scope.WithOpName("input"),
{kBatchSize, kHeight, kWidth, kDepthIn}, DT_FLOAT);
auto filter =
ops::RandomUniform(scope.WithOpName("filter"),
{kHeight, kWidth, kDepthIn, kDepthOut}, DT_FLOAT);
Output conv2d = ops::Conv2D(
scope.WithOpName("conv2d").WithDevice("/device:GPU:0"), input, filter,
{1, 2, 4, 1}, "SAME", ops::Conv2D::DataFormat(kSrcFormat));
auto attrs = ops::StridedSlice::Attrs().BeginMask(0xB).EndMask(0x7);
auto begin = ops::Const(scope.WithOpName("begin"), {2, 0, 2}, {3});
auto end = ops::Const(scope.WithOpName("end"), {34, 4, 3}, {3});
auto strides = ops::Const(scope.WithOpName("strides"), {7, 2, 1}, {3});
auto strided_slice_op = ops::StridedSlice(
scope.WithOpName("stridedslice").WithDevice("/device:GPU:0"), conv2d,
begin, end, strides, attrs);
auto z = ops::Identity(scope.WithOpName("z"), strided_slice_op);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
TransposeContext context;
TF_ASSERT_OK(TransposeContext::InitializeTransposeContext(
item, virtual_cluster_.get(), &context));
context.AssignDeviceAndDataFormats(kGPU, kSrcFormat, kDstFormat);
DefaultLayoutSensitiveOpTransposer conv2d_transposer;
auto* c2d = context.graph_view->GetNode("conv2d");
ASSERT_NE(c2d, nullptr);
TF_ASSERT_OK(conv2d_transposer.TransposeNode(&context, c2d));
StridedSliceTransposer stridedslice_transposer;
auto* stridedslice = context.graph_view->GetNode("stridedslice");
ASSERT_NE(stridedslice, nullptr);
TF_ASSERT_OK(stridedslice_transposer.TransposeNode(&context, stridedslice));
auto* input_transpose_node = context.graph_view->GetNode(
"stridedslice-0-TransposeNHWCToNCHW-LayoutOptimizer");
ASSERT_EQ(input_transpose_node, nullptr);
auto* begin_node = context.graph_view->GetNode(
"stridedslice-1-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_EQ(begin_node, nullptr);
auto* end_node = context.graph_view->GetNode(
"stridedslice-2-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_EQ(end_node, nullptr);
auto* strides_node = context.graph_view->GetNode(
"stridedslice-3-DataFormatVecPermuteNHWCToNCHW-LayoutOptimizer");
ASSERT_EQ(strides_node, nullptr);
auto* updated_stridedslice_node = context.graph_view->GetNode("stridedslice");
ASSERT_NE(updated_stridedslice_node, nullptr);
ASSERT_EQ(updated_stridedslice_node->NumRegularFanins(), 4);
VerifyRegularFaninMatch(updated_stridedslice_node, 0,
"conv2d-0-0-TransposeNCHWToNHWC-LayoutOptimizer", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 1, "begin", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 2, "end", 0);
VerifyRegularFaninMatch(updated_stridedslice_node, 3, "strides", 0);
const auto* begin_mask_attr =
updated_stridedslice_node->GetAttr("begin_mask");
ASSERT_NE(begin_mask_attr, nullptr);
EXPECT_EQ(begin_mask_attr->i(), 0xB);
const auto* end_mask_attr = updated_stridedslice_node->GetAttr("end_mask");
ASSERT_NE(end_mask_attr, nullptr);
EXPECT_EQ(end_mask_attr->i(), 0x7);
auto* output_transpose_node = context.graph_view->GetNode(
"stridedslice-0-0-TransposeNCHWToNHWC-LayoutOptimizer");
ASSERT_EQ(output_transpose_node, nullptr);
auto* z_output_node = context.graph_view->GetNode("z");
ASSERT_NE(z_output_node, nullptr);
ASSERT_EQ(z_output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(z_output_node, 0,
updated_stridedslice_node->GetName(), 0);
}
TEST_F(TransposerTest, ReduceTransposerKeepDims) {
ReduceTransposerKeepDims<int32>();
ReduceTransposerKeepDims<int64_t>();
}
TEST_F(TransposerTest, ReduceTransposerValidAxisNode) {
ReduceTransposerValidAxisNode<int32>();
ReduceTransposerValidAxisNode<int64_t>();
}
TEST(PermutationTest, PermutesVector) {
std::vector<int64_t> input{32, 16, 8, 4};
std::vector<int64_t> expected{4, 8, 16, 32};
TF_ASSERT_OK(PermuteSingle("test", {3, 2, 1, 0}, &input));
ASSERT_EQ(input.size(), 4);
for (int i = 0; i < input.size(); ++i) {
EXPECT_EQ(input[i], expected[i]);
}
}
TEST(PermutationTest, PermutesRepeatedField) {
TensorShapeProto input_shape = MakeTensorShapeFromDimensions({1, 2, 3, 4});
TensorShapeProto expected_shape = MakeTensorShapeFromDimensions({1, 4, 2, 3});
TF_ASSERT_OK(PermuteSingle("test", {0, 3, 1, 2}, input_shape.mutable_dim()));
EXPECT_EQ(input_shape.DebugString(), expected_shape.DebugString());
}
TEST(PermutationTest, PermutesDoubleRepeatedField) {
{
TensorShapeProto input =
MakeTensorShapeFromDimensions({1, 2, 3, 4, 5, 6, 7, 8});
TensorShapeProto expected =
MakeTensorShapeFromDimensions({1, 2, 7, 8, 3, 4, 5, 6});
TF_ASSERT_OK(PermuteDouble("test", {0, 3, 1, 2}, input.mutable_dim()));
EXPECT_EQ(input.DebugString(), expected.DebugString());
}
{
TensorShapeProto input =
MakeTensorShapeFromDimensions({1, 2, 3, 4, 5, 6, 7, 8});
TensorShapeProto expected =
MakeTensorShapeFromDimensions({1, 2, 5, 6, 7, 8, 3, 4});
TF_ASSERT_OK(PermuteDouble("test", {0, 2, 3, 1}, input.mutable_dim()));
EXPECT_EQ(input.DebugString(), expected.DebugString());
}
}
TEST(PermutationTest, PermutesDataFormat) {
string input = "NHWC";
string expected = "NCHW";
TF_ASSERT_OK(PermuteSingle("test", {0, 3, 1, 2}, &input));
EXPECT_EQ(input, expected);
}
TEST(PermutationTest, PermutesString) {
string input = "ABCD";
string expected = "ACBD";
TF_ASSERT_OK(PermuteSingle("test", {0, 2, 1, 3}, &input));
EXPECT_EQ(input, expected);
}
TEST(PermutationTest, GetNHWCToNCHWPermutation) {
string src_format = "NHWC";
absl::flat_hash_map<char, int> src_dim_indices =
GetDimensionIndices(src_format);
EXPECT_EQ(src_dim_indices.size(), 4);
EXPECT_EQ(src_dim_indices['N'], 0);
EXPECT_EQ(src_dim_indices['H'], 1);
EXPECT_EQ(src_dim_indices['W'], 2);
EXPECT_EQ(src_dim_indices['C'], 3);
string dst_format = "NCHW";
std::vector<int> permutation = GetPermutation(src_dim_indices, dst_format);
ASSERT_EQ(permutation.size(), 4);
EXPECT_EQ(permutation[0], 0);
EXPECT_EQ(permutation[1], 3);
EXPECT_EQ(permutation[2], 1);
EXPECT_EQ(permutation[3], 2);
}
TEST(PermutationTest, GetNCHWToNHWCPermutation) {
string src_format = "NCHW";
absl::flat_hash_map<char, int> src_dim_indices =
GetDimensionIndices(src_format);
EXPECT_EQ(src_dim_indices.size(), 4);
EXPECT_EQ(src_dim_indices['N'], 0);
EXPECT_EQ(src_dim_indices['C'], 1);
EXPECT_EQ(src_dim_indices['H'], 2);
EXPECT_EQ(src_dim_indices['W'], 3);
string dst_format = "NHWC";
std::vector<int> permutation = GetPermutation(src_dim_indices, dst_format);
ASSERT_EQ(permutation.size(), 4);
EXPECT_EQ(permutation[0], 0);
EXPECT_EQ(permutation[1], 2);
EXPECT_EQ(permutation[2], 3);
EXPECT_EQ(permutation[3], 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0ab4aa0-9e5d-4a2f-8263-c618612be419 | cpp | tensorflow/tensorflow | evaluation_utils | tensorflow/core/grappler/optimizers/evaluation_utils.cc | tensorflow/core/grappler/optimizers/evaluation_utils_test.cc | #include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/denormal.h"
#include "tensorflow/core/platform/setround.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace grappler {
using TensorVector = absl::InlinedVector<TensorValue, 4UL>;
const int kDeviceSimpleThreads = 2;
DeviceSimple::DeviceSimple() : DeviceBase(Env::Default()) {
eigen_worker_threads_.num_threads = kDeviceSimpleThreads;
eigen_worker_threads_.workers = new thread::ThreadPool(
Env::Default(), "evaluation_utils", eigen_worker_threads_.num_threads);
eigen_device_.reset(new Eigen::ThreadPoolDevice(
eigen_worker_threads_.workers->AsEigenThreadPool(),
eigen_worker_threads_.num_threads));
set_tensorflow_cpu_worker_threads(&eigen_worker_threads_);
set_eigen_cpu_device(eigen_device_.get());
}
DeviceSimple::~DeviceSimple() {
eigen_device_.reset();
delete eigen_worker_threads_.workers;
}
Status DeviceSimple::MakeTensorFromProto(const TensorProto& tensor_proto,
const AllocatorAttributes alloc_attrs,
Tensor* tensor) {
Tensor parsed(tensor_proto.dtype());
if (!parsed.FromProto(cpu_allocator(), tensor_proto)) {
return errors::InvalidArgument("Cannot parse tensor from tensor_proto.");
}
*tensor = parsed;
return absl::OkStatus();
}
Status EvaluateNode(const NodeDef& node, const TensorVector& inputs,
DeviceBase* cpu_device, ResourceMgr* resource_mgr,
TensorVector* output) {
Status status;
std::unique_ptr<DeviceBase> device;
if (cpu_device == nullptr) {
device.reset(new DeviceSimple());
cpu_device = device.get();
}
std::unique_ptr<OpKernel> op_kernel(
CreateOpKernel(DEVICE_CPU, cpu_device, cpu_device->GetAllocator({}), node,
TF_GRAPH_DEF_VERSION, &status));
TF_RETURN_IF_ERROR(status);
OpKernelContext::Params params;
params.device = cpu_device;
params.frame_iter = FrameAndIter(0, 0);
params.inputs = inputs;
params.op_kernel = op_kernel.get();
params.resource_manager = resource_mgr;
absl::InlinedVector<AllocatorAttributes, 4UL> output_attrs;
const int num_outputs = op_kernel->num_outputs();
for (int i = 0; i < num_outputs; i++) {
AllocatorAttributes attr;
attr.set_on_host(true);
output_attrs.push_back(attr);
}
params.output_attr_array = output_attrs.data();
OpKernelContext op_context(¶ms);
op_kernel->Compute(&op_context);
for (int i = 0; i < num_outputs; i++) {
output->push_back(op_context.release_output(i));
}
return op_context.status();
}
}
} | #include "tensorflow/core/platform/cpu_info.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/ThreadPool"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/optimizers/evaluation_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
TEST(EvaluationUtilsTest, DeviceSimple_BasicProperties) {
DeviceSimple dsimple;
ASSERT_TRUE(dsimple.has_eigen_cpu_device());
const Eigen::ThreadPoolInterface* pool =
dsimple.eigen_cpu_device()->getPool();
ASSERT_NE(pool, nullptr);
}
TEST(EvaluationUtilsTest, DeviceSimple_MakeTensorFromProto) {
DeviceSimple dsimple;
TensorProto proto;
Tensor tensor;
EXPECT_FALSE(dsimple.MakeTensorFromProto(proto, {}, &tensor).ok());
Tensor original(tensorflow::DT_INT16, TensorShape{4, 2});
original.flat<int16>().setRandom();
original.AsProtoTensorContent(&proto);
TF_ASSERT_OK(dsimple.MakeTensorFromProto(proto, {}, &tensor));
ASSERT_EQ(tensor.dtype(), original.dtype());
ASSERT_EQ(tensor.shape(), original.shape());
auto buf0 = original.flat<int16>();
auto buf1 = tensor.flat<int16>();
ASSERT_EQ(buf0.size(), buf1.size());
for (int i = 0; i < buf0.size(); ++i) {
EXPECT_EQ(buf0(i), buf1(i));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/evaluation_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/evaluation_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84795ad2-e755-44a1-8402-5295dc75e04e | cpp | tensorflow/tensorflow | function_optimizer | tensorflow/core/grappler/optimizers/function_optimizer.cc | tensorflow/core/grappler/optimizers/function_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr const char* const kFuncAttr = FunctionLibraryDefinition::kFuncAttr;
constexpr const char* const kNoSpecializeAttr = "_nospecialize";
constexpr const char* const kGrapplerSpecializedFuncAttr =
"_GrapplerSpecializedFunc";
bool IsDirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
return func_node.op() == func.signature().name();
}
bool IsIndirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
if (!IsPartitionedCall(func_node) && !IsStatefulPartitionedCall(func_node)) {
return false;
}
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return func_attr != nullptr && func_attr->has_func() &&
func_attr->func().name() == func.signature().name();
}
AttrSlice FunctionInstantiationAttributes(const FunctionDef& func,
const NodeDef& func_node) {
if (IsDirectFunctionCall(func, func_node)) {
return AttrSlice(func_node);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return AttrSlice(&func_attr->func().attr());
} else {
LOG(WARNING) << "Can't resolve function instantiation attributes: "
<< SummarizeNodeDef(func_node);
return AttrSlice();
}
}
class FakeDevice : public Device {
public:
FakeDevice(Env* env, const string& device) : Device(env, attr(device)) {}
explicit FakeDevice(const string& device) : FakeDevice(nullptr, device) {}
Status Sync() override { return absl::OkStatus(); }
private:
static DeviceAttributes attr(const string& device) {
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(device, &parsed_name);
DCHECK(parsed) << "Failed to parse full device name: " << device;
DeviceAttributes attr;
attr.set_name(device);
attr.set_device_type(parsed_name.type);
return attr;
}
};
bool MarkedNoSpecialize(const FunctionDef& fdef) {
const auto attr = AttrSlice(&fdef.attr());
bool nospecialize = false;
return TryGetNodeAttr(attr, kNoSpecializeAttr, &nospecialize) && nospecialize;
}
struct FunctionSpecializationSignature {
using InputPort = int;
using OutputPort = int;
string func_name;
bool is_in_fetch_set;
absl::flat_hash_set<OutputPort> active_outputs;
absl::flat_hash_map<string, DataType> type_parameters;
absl::flat_hash_map<string, AttrValue> body_parameters;
absl::flat_hash_map<InputPort, string> const_inputs;
bool operator==(const FunctionSpecializationSignature& other) const {
bool equals = func_name == other.func_name &&
is_in_fetch_set == other.is_in_fetch_set &&
active_outputs == other.active_outputs &&
type_parameters == other.type_parameters &&
const_inputs == other.const_inputs;
if (!equals) return false;
if (body_parameters.size() != other.body_parameters.size()) return false;
for (const auto& lhs : body_parameters) {
auto it = other.body_parameters.find(lhs.first);
if (it == other.body_parameters.end()) return false;
if (!AreAttrValuesEqual(lhs.second, (*it).second,
true)) {
return false;
}
}
return true;
}
template <typename H>
friend H AbslHashValue(H h, const FunctionSpecializationSignature& s) {
H base = H::combine(std::move(h), s.func_name, s.is_in_fetch_set);
std::vector<uint64> hashes;
hashes.reserve(s.active_outputs.size()
+ s.type_parameters.size() * 2
+ s.body_parameters.size() * 2
+ s.const_inputs.size() * 2);
absl::c_transform(s.active_outputs, std::back_inserter(hashes),
hash<OutputPort>());
using TypeParam = std::pair<const string, DataType>;
absl::c_for_each(s.type_parameters, [&hashes](const TypeParam& type_param) {
AttrValue attr_value;
attr_value.set_type(type_param.second);
hashes.push_back(Hash64(type_param.first));
hashes.push_back(AttrValueHash(attr_value));
});
using BodyParam = std::pair<const string, AttrValue>;
absl::c_for_each(s.body_parameters, [&hashes](const BodyParam& body_param) {
hashes.push_back(Hash64(body_param.first));
hashes.push_back(FastAttrValueHash(body_param.second));
});
using ConstInput = std::pair<const InputPort, string>;
absl::c_for_each(s.const_inputs, [&hashes](const ConstInput& const_input) {
hashes.push_back(hash<InputPort>()(const_input.first));
hashes.push_back(Hash64(const_input.second));
});
absl::c_sort(hashes);
return H::combine_contiguous(std::move(base), hashes.data(), hashes.size());
}
};
struct FunctionSpecialization {
string specialized_func_name;
bool is_in_fetch_set;
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
absl::flat_hash_set<int> active_outputs;
std::vector<std::pair<int, int>> output_mapping;
};
class FunctionOptimizerContext {
public:
explicit FunctionOptimizerContext(const GrapplerItem& item,
RewriterConfig::Toggle opt_level,
const GraphDef& graph)
: item_(&item),
opt_level_(opt_level),
function_library_(OpRegistry::Global(), graph.library()),
truly_const_nodes_(InferTrulyConstNodes(item, graph)),
graph_view_(&graph) {}
const GrapplerItem& item() const { return *item_; }
const int graph_version() const { return item_->graph.versions().producer(); }
RewriterConfig::Toggle opt_level() const { return opt_level_; }
const FunctionLibraryDefinition& function_library() const {
return function_library_;
}
FunctionLibraryDefinition& function_library() { return function_library_; }
const absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>&
tensor_mapping() const {
return tensor_mapping_;
}
const GraphView& graph_view() const { return graph_view_; }
bool IsFeedNode(const string& node_name) const {
return absl::c_any_of(
item_->feed, [&](const std::pair<std::string, Tensor>& feed) {
return ParseTensorName(feed.first).node() == node_name;
});
}
bool IsFetchNode(const string& node_name) const {
return absl::c_any_of(item_->fetch, [&](const string& fetch) {
return ParseTensorName(fetch).node() == node_name;
});
}
bool IsTrulyConst(const string& name) const {
return TrulyConstNode(name) != nullptr;
}
const NodeDef* TrulyConstNode(const string& name) const {
return gtl::FindWithDefault(truly_const_nodes_, name, nullptr);
}
const FunctionSpecialization* FindFunctionSpecialization(
const FunctionSpecializationSignature& sig) const {
return gtl::FindOrNull(specialized_functions_, sig);
}
void AddSpecializedFunction(const FunctionSpecializationSignature& sig,
const FunctionSpecialization& specialized_func) {
specialized_functions_.emplace(sig, specialized_func);
}
void AddTensorMapping(const SafeTensorId& from, const SafeTensorId& to) {
DCHECK(from.index() != Graph::kControlSlot)
<< "Tensor mapping must be from regular tensor";
DCHECK(to.index() != Graph::kControlSlot)
<< "Tensor mapping must be to regular tensor";
auto inserted = tensor_mapping_.insert({from, to});
DCHECK(inserted.second)
<< "Failed to insert duplicated tensor mapping: "
<< "from=" << from.ToString() << " to=" << to.ToString();
}
void AddTensorMapping(const string& func_node,
const FunctionSpecialization& specialized_func) {
for (const auto& pair : specialized_func.output_mapping) {
int from_idx = pair.first;
int to_idx = pair.second;
if (from_idx != to_idx) {
SafeTensorId from_tensor(func_node, from_idx);
SafeTensorId to_tensor(func_node, to_idx);
AddTensorMapping(from_tensor, to_tensor);
}
}
}
private:
static absl::flat_hash_map<string, const NodeDef*> InferTrulyConstNodes(
const GrapplerItem& item, const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> feed_nodes;
for (const auto& feed : item.feed) {
feed_nodes.insert(feed.first);
}
absl::flat_hash_map<string, const NodeDef*> const_nodes;
for (const NodeDef& node : graph.node()) {
if (IsConstant(node) && !feed_nodes.contains(node.name())) {
const_nodes[node.name()] = &node;
}
}
return const_nodes;
}
const GrapplerItem* item_;
RewriterConfig::Toggle opt_level_;
FunctionLibraryDefinition function_library_;
absl::flat_hash_map<string, const NodeDef*> truly_const_nodes_;
absl::flat_hash_map<FunctionSpecializationSignature,
const FunctionSpecialization>
specialized_functions_;
absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>
tensor_mapping_;
GraphView graph_view_;
FunctionOptimizerContext(const FunctionOptimizerContext&) = delete;
void operator=(const FunctionOptimizerContext&) = delete;
};
const FunctionDef* FindFunctionCall(const FunctionOptimizerContext& ctx,
const NodeDef& node) {
if (IsPartitionedCall(node) || IsStatefulPartitionedCall(node)) {
const AttrValue* func_attr = AttrSlice(node).Find("f");
return (func_attr != nullptr && func_attr->has_func())
? ctx.function_library().Find(func_attr->func().name())
: nullptr;
}
return ctx.function_library().Find(node.op());
}
absl::flat_hash_set<int> GetActiveOutputs(const NodeDef& node,
const FunctionOptimizerContext& ctx,
int size_hint = 0) {
absl::flat_hash_set<int> active_outputs;
active_outputs.reserve(static_cast<size_t>(size_hint));
const auto node_fanout_edges =
ctx.graph_view().GetFanoutEdges(node, false);
for (const GraphView::Edge& edge : node_fanout_edges) {
active_outputs.insert(edge.src.port_id);
}
for (const string& fetch : ctx.item().fetch) {
TensorId fetch_tensor = ParseTensorName(fetch);
if (fetch_tensor.node() == node.name()) {
active_outputs.insert(fetch_tensor.index());
}
}
return active_outputs;
}
bool HasTrulyConstInputs(const NodeDef& node,
const FunctionOptimizerContext& ctx) {
const auto is_truly_const = [&ctx](const string& input) {
return ctx.IsTrulyConst(NodeName(input));
};
return absl::c_any_of(node.input(), is_truly_const);
}
bool HasUnusedOutputs(const NodeDef& func_node, const FunctionDef& func,
const FunctionOptimizerContext& ctx) {
int num_outputs = func.signature().output_arg_size();
const absl::flat_hash_set<int> active_outputs =
GetActiveOutputs(func_node, ctx, num_outputs);
int active_outputs_size = active_outputs.size();
return active_outputs_size != num_outputs;
}
FunctionDefLibrary PruneFunctionLibrary(const FunctionLibraryDefinition& flib,
const GraphDef& optimized_graph) {
FunctionLibraryDefinition pruned_flib =
flib.ReachableDefinitions(optimized_graph);
int pruned_functions = static_cast<int>(pruned_flib.num_functions()) -
static_cast<int>(flib.num_functions());
VLOG(3) << "Pruned function library: " << pruned_flib.num_functions()
<< " functions (" << pruned_functions << ")";
return pruned_flib.ToProto();
}
Status PushDownConstInputs(const NodeDef& func_node,
const FunctionOptimizerContext& ctx,
GrapplerFunctionItem* item,
absl::flat_hash_set<string>* const_inputs,
absl::flat_hash_set<string>* control_deps) {
const auto record_control_deps = [&](const NodeDef* const_input) {
for (int i = const_input->input_size() - 1; i >= 0; --i) {
const string& input = const_input->input(i);
if (IsControlInput(input))
control_deps->insert(input);
else
break;
}
};
for (int i = func_node.input_size() - 1; i >= 0; --i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) continue;
const string node_name = NodeName(input);
if (ctx.IsTrulyConst(node_name)) {
VLOG(3) << "Push const into function body: input=" << input;
const auto* const_input = CHECK_NOTNULL(ctx.TrulyConstNode(node_name));
const_inputs->insert(input);
record_control_deps(const_input);
TF_RETURN_IF_ERROR(ReplaceInputWithConst(*const_input, i, item));
}
}
return absl::OkStatus();
}
void RemovePushedDownConstInputs(const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
std::vector<string> keep_inputs;
const auto& inputs = specialized_func_node->input();
absl::c_copy_if(inputs, std::back_inserter(keep_inputs),
[&](const string& input) {
return !specialization.const_inputs.contains(input);
});
specialized_func_node->clear_input();
for (const auto& keep : keep_inputs) specialized_func_node->add_input(keep);
if (!specialization.control_deps.empty()) {
absl::flat_hash_set<string> existing_control_deps;
for (const string& input : keep_inputs) {
existing_control_deps.insert(AsControlDependency(NodeName(input)));
}
for (const string& ctrl : specialization.control_deps) {
if (!existing_control_deps.contains(ctrl)) {
VLOG(3) << "Forward control dependency: input=" << ctrl;
specialized_func_node->add_input(ctrl);
}
}
}
}
void RemovePushedDownConstInputTypes(
const FunctionSpecialization& specialization, const NodeDef& func_node,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
const AttrValue* tin = AttrSlice(func_node).Find("Tin");
if (tin == nullptr || !tin->has_list()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tin"].mutable_list()->clear_type();
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (!specialization.const_inputs.contains(input)) {
DataType dt = tin->list().type(i);
(*attr)["Tin"].mutable_list()->add_type(dt);
}
}
}
void RemoveUnusedOutputsTypes(const FunctionSpecialization& specialization,
const NodeDef& func_node,
NodeDef* specialized_func_node) {
const AttrValue* tout = AttrSlice(func_node).Find("Tout");
if (tout == nullptr || !tout->has_list()) return;
int specialization_active_outputs_size = specialization.active_outputs.size();
if (specialization_active_outputs_size == tout->list().type_size()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tout"].mutable_list()->clear_type();
for (int i = 0; i < tout->list().type_size(); ++i) {
if (specialization.active_outputs.contains(i)) {
DataType dt = tout->list().type(i);
(*attr)["Tout"].mutable_list()->add_type(dt);
}
}
}
Status UpdateSpecializedFunctionCallSite(const FunctionDef& func,
const NodeDef& func_node,
const string& specialized_func_name,
NodeDef* specialized_func_node) {
if (IsDirectFunctionCall(func, func_node)) {
specialized_func_node->set_op(specialized_func_name);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* attr = specialized_func_node->mutable_attr();
(*attr)[kFuncAttr].mutable_func()->set_name(specialized_func_name);
} else {
return absl::InvalidArgumentError("Unknown function call site");
}
return absl::OkStatus();
}
Status UpdateSpecializedFunctionNode(
const FunctionDef& func, const NodeDef& func_node,
const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
bool is_indirect_call = IsIndirectFunctionCall(func, func_node);
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionCallSite(
func, func_node, specialization.specialized_func_name,
specialized_func_node));
RemovePushedDownConstInputs(specialization, specialized_func_node);
if (is_indirect_call) {
RemovePushedDownConstInputTypes(specialization, func_node,
specialized_func_node);
}
if (is_indirect_call && !specialization.is_in_fetch_set) {
RemoveUnusedOutputsTypes(specialization, func_node, specialized_func_node);
}
specialized_func_node->mutable_attr()->erase("_gradient_op_type");
return absl::OkStatus();
}
Status InitializeFunctionSpecializationSignature(
const NodeDef& func_node, const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionOptimizerContext& ctx, FunctionSpecializationSignature* sig) {
DCHECK(sig->const_inputs.empty());
DCHECK(sig->active_outputs.empty());
sig->func_name = func.signature().name();
sig->is_in_fetch_set = ctx.IsFetchNode(func_node.name());
sig->active_outputs = GetActiveOutputs(func_node, ctx);
TF_RETURN_IF_ERROR(InstantiationTypeParameters(func, func_instantiation_attr,
&sig->type_parameters));
TF_RETURN_IF_ERROR(InstantiationBodyParameters(func, func_instantiation_attr,
&sig->body_parameters));
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (ctx.IsTrulyConst(input)) {
sig->const_inputs.emplace(i, input);
}
}
return absl::OkStatus();
}
string SpecializedFunctionName(const FunctionOptimizerContext& ctx,
const FunctionDef& func,
const NodeDef& func_node) {
return absl::Substitute(
"$0_specialized_for_$1_at_$2", func.signature().name(),
absl::StrReplaceAll(func_node.name(), {{"/", "_"}}), ctx.item().id);
}
Status SpecializeFunction(const NodeDef& func_node, const FunctionDef& func,
FunctionOptimizerContext* ctx,
GraphDef* optimized_graph) {
VLOG(2) << "Specialize function call: " << SummarizeNodeDef(func_node);
const AttrSlice func_instantiation_attr =
FunctionInstantiationAttributes(func, func_node);
FunctionSpecializationSignature signature;
TF_RETURN_IF_ERROR(InitializeFunctionSpecializationSignature(
func_node, func, func_instantiation_attr, *ctx, &signature));
const FunctionSpecialization* already_specialized =
ctx->FindFunctionSpecialization(signature);
if (already_specialized) {
VLOG(2) << "Function was already specialized in identical context: "
"specialized_name="
<< already_specialized->specialized_func_name;
NodeDef* specialized_func_node = optimized_graph->add_node();
*specialized_func_node = func_node;
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionNode(
func, func_node, *already_specialized, specialized_func_node));
ctx->AddTensorMapping(specialized_func_node->name(), *already_specialized);
return absl::OkStatus();
}
const auto& flib = ctx->function_library();
GrapplerFunctionItem item;
TF_RETURN_IF_ERROR(MakeGrapplerFunctionItem(
func, func_instantiation_attr, flib, ctx->graph_version(), &item));
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
TF_RETURN_IF_ERROR(PushDownConstInputs(func_node, *ctx, &item, &const_inputs,
&control_deps));
std::vector<std::pair<int, int>> output_mapping;
if (!signature.is_in_fetch_set) {
int num_func_outputs = item.output_size();
absl::flat_hash_set<int> remove;
for (int i = 0; i < num_func_outputs; ++i) {
if (!signature.active_outputs.count(i)) remove.insert(i);
}
TF_RETURN_IF_ERROR(RemoveFunctionOutputs(remove, &item, &output_mapping));
}
FunctionDef specialized_func;
TF_RETURN_IF_ERROR(MakeFunctionDef(item, flib, &specialized_func));
const string specialized_func_name =
SpecializedFunctionName(*ctx, func, func_node);
if (flib.Contains(specialized_func_name)) {
return absl::InternalError("Created duplicate function specialization");
}
specialized_func.mutable_signature()->set_name(specialized_func_name);
auto* specialized_attr = specialized_func.mutable_attr();
(*specialized_attr)[kGrapplerSpecializedFuncAttr].set_b(true);
TF_RETURN_IF_ERROR(ctx->function_library().AddFunctionDef(specialized_func));
NodeDef* specialized_func_node = optimized_graph->add_node();
*specialized_func_node = func_node;
FunctionSpecialization func_specialization = {
specialized_func_name, signature.is_in_fetch_set, const_inputs,
control_deps, signature.active_outputs, output_mapping};
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionNode(
func, func_node, func_specialization, specialized_func_node));
ctx->AddSpecializedFunction(signature, func_specialization);
ctx->AddTensorMapping(specialized_func_node->name(), func_specialization);
return absl::OkStatus();
}
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsPass::kLowerAsMultiDeviceFunctionAttr;
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
using OutputControlSource = InlineFunctionBodyOptions::OutputControlSource;
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
const string& value = GetNodeAttrString(n->attrs(), attr_name);
return !value.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForXlaCompilation(const NodeDef& n) {
auto is_enabled = [&](std::string attr_name) -> bool {
auto it = n.attr().find(attr_name);
return it != n.attr().end() && (!it->second.s().empty() || it->second.b());
};
return is_enabled("_xla_compile_id") || is_enabled("_tpu_replicate") ||
is_enabled(kXlaMustCompileAttr);
}
const bool IsExemptFromSideEffectsExecutionValidation(const string& op) {
static const auto* exemption = new absl::flat_hash_set<string>(
{
"CollectiveGather", "CollectiveReduce", "CollectiveBcastSend",
"CollectiveBcastRecv", "CollectiveBcastSendV2", "CollectiveBcastRecvV2",
"NcclAllReduce", "Send", "Recv", "CollectiveAssignGroupsV2",
"CollectiveInitializeCommunicator",
"RandomUniform", "RandomUniformInt", "RandomStandardNormal",
"ParameterizedTruncatedNormal", "TruncatedNormal", "RandomShuffle",
"Multinomial", "RandomGamma", "RandomGammaGrad", "RandomPoisson",
"RandomPoissonV2",
"ReadVariableOp",
"CudnnRNN", "CudnnRNNBackprop", "CudnnRNNV2", "CudnnRNNV3",
"CudnnRNNBackpropV2", "CudnnRNNBackpropV3",
"EnqueueTPUEmbeddingSparseBatch", "EnqueueTPUEmbeddingIntegerBatch",
"EnqueueTPUEmbeddingSparseTensorBatch",
"EnqueueTPUEmbeddingRaggedTensorBatch",
"EnqueueTPUEmbeddingArbitraryTensorBatch",
"DynamicEnqueueTPUEmbeddingArbitraryTensorBatch",
"SaveV2", "RestoreV2",
"InfeedEnqueue", "InfeedEnqueueTuple"});
return exemption->contains(op);
}
Status ValidateSideEffectsExecution(
const FunctionBody& fbody, OutputControlSource output_control_source,
bool has_outgoing_control_edges,
bool validate_outgoing_control_edge = true) {
std::vector<const Node*> fbody_side_effects;
absl::c_copy_if(
fbody.graph->nodes(), std::back_inserter(fbody_side_effects),
[](const Node* n) {
return n->op_def().is_stateful() && !n->IsArg() && !n->IsRetval() &&
!IsExemptFromSideEffectsExecutionValidation(n->type_string());
});
if (!fbody_side_effects.empty() && !has_outgoing_control_edges) {
const string error_message =
"Can't guarantee execution of function side-effects after inlining. "
"Function call node has no outgoing control edges.";
if (validate_outgoing_control_edge) {
return absl::InternalError(error_message);
} else {
VLOG(3) << error_message;
}
}
absl::flat_hash_set<const Node*> control_sources;
if (output_control_source == OutputControlSource::kDataOutputs) {
control_sources = {fbody.ret_nodes.begin(), fbody.ret_nodes.end()};
} else if (output_control_source == OutputControlSource::kControlOutputs) {
control_sources = {fbody.control_ret_nodes.begin(),
fbody.control_ret_nodes.end()};
}
for (const Node* side_effect : fbody_side_effects) {
VLOG(4) << "Check that node " << side_effect->name()
<< " will execute after inlining.";
bool will_execute = false;
const auto is_control_source = [&](const Node* n) -> void {
const auto it = control_sources.find(n);
if (it != control_sources.end()) {
VLOG(4) << "Found a path to control source: " << side_effect->name()
<< " ---> " << (*it)->name();
will_execute = true;
}
};
DFSFrom(*fbody.graph, {side_effect}, is_control_source,
{}, NodeComparatorName{});
if (!will_execute) {
return absl::InternalError(absl::StrCat(
"Can't guarantee execution of a side-effectful node, that is not "
"reachable from function control source. Function body node: ",
SummarizeNode(*side_effect)));
}
}
return absl::OkStatus();
}
Status ValidateNoDeadOutputs(const FunctionLibraryDefinition& flib_def,
const FunctionBody& fbody) {
absl::flat_hash_set<const Node*> output_nodes = {fbody.ret_nodes.begin(),
fbody.ret_nodes.end()};
std::vector<const Node*> dead_tensor_sources;
for (const Node* n : fbody.graph->nodes()) {
if (n->IsSwitch()) {
VLOG(4) << "Add dead tensors source. Switch node: " << n->name();
dead_tensor_sources.push_back(n);
continue;
}
const FunctionDef* fdef = flib_def.Find(n->type_string());
if (fdef != nullptr) {
std::unique_ptr<FunctionBody> nested_fbody;
NameAttrList func;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(n->def(), &func));
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, &nested_fbody));
if (!ValidateNoDeadOutputs(flib_def, *nested_fbody).ok()) {
VLOG(4) << "Add dead tensors source. Function call: " << func.name()
<< " node=" << n->name();
dead_tensor_sources.push_back(n);
}
}
}
for (const Node* dead_tensor_source : dead_tensor_sources) {
bool has_dead_output = false;
const auto is_output_node = [&](const Node* n) -> void {
const auto it = output_nodes.find(n);
if (it != output_nodes.end()) {
VLOG(4) << "Found a path to output node from dead tensor source: "
<< dead_tensor_source->name() << " ---> " << (*it)->name();
has_dead_output = true;
}
};
const auto stop_traversal = [&has_dead_output](const Edge& edge) -> bool {
return !edge.src()->IsMerge() || has_dead_output;
};
DFSFrom(*fbody.graph, {dead_tensor_source}, is_output_node,
{}, NodeComparatorName{},
stop_traversal);
if (has_dead_output) {
return absl::InternalError(absl::StrCat(
"Can't inline a function with dead outputs. Dead tensor source: ",
SummarizeNode(*dead_tensor_source)));
}
}
return absl::OkStatus();
}
Status MakeFunctionBodyForInlining(const Node& node,
const FunctionLibraryDefinition& flib_def,
std::unique_ptr<FunctionBody>* fbody) {
VLOG(3) << "Make function body for inlining: " << SummarizeNode(node);
const auto find_fdef = [&flib_def, &node](
const string& name,
const FunctionDef** fdef) -> Status {
if ((*fdef = flib_def.Find(name)) == nullptr) {
return absl::InternalError(absl::StrCat(
"Was not able to find a function definition (name=", name,
") for a function call: ", SummarizeNode(node)));
}
return absl::OkStatus();
};
if (node.type_string() == FunctionLibraryDefinition::kGradientOp) {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), kFuncAttr, &func));
const string grad = flib_def.FindGradient(func.name());
if (!grad.empty()) {
const FunctionDef* grad_fdef;
TF_RETURN_IF_ERROR(find_fdef(grad, &grad_fdef));
VLOG(4) << "Instantiate a custom SymbolicGradient: gradient=" << grad
<< " (function=" << func.name() << ")";
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*grad_fdef, AttrSlice(&func.attr()), &flib_def, fbody));
} else if (flib_def.Find(func.name()) == nullptr) {
gradient::Creator creator;
TF_RETURN_IF_ERROR(gradient::GetOpGradientCreator(func.name(), &creator));
if (creator == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("No gradient is defined for ", func.name()));
}
FunctionDef grad_fdef;
TF_RETURN_IF_ERROR(creator(AttrSlice(&func.attr()), &grad_fdef));
VLOG(4) << "Instantiate a SymbolicGradient for a primitive op: "
<< func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
grad_fdef, AttrSlice(&func.attr()), &flib_def, fbody));
} else {
const FunctionDef* fdef;
TF_RETURN_IF_ERROR(find_fdef(func.name(), &fdef));
VLOG(4) << "Instantiate a SymbolicGradient for a function: "
<< func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, fbody));
*fbody = SymbolicGradient(**fbody);
}
} else {
NameAttrList func;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node.def(), &func));
const FunctionDef* fdef;
TF_RETURN_IF_ERROR(find_fdef(func.name(), &fdef));
VLOG(4) << "Instantiate a function call: function=" << func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, fbody));
}
return absl::OkStatus();
}
void AddStrictInputSemantics(Node* caller, Graph* g) {
absl::flat_hash_set<const Node*> existing_control_sources;
for (const Edge* edge : caller->in_edges()) {
if (edge->IsControlEdge()) {
existing_control_sources.insert(edge->src());
}
}
const bool has_incoming_control_edges = !existing_control_sources.empty();
const bool has_resource_input =
absl::c_any_of(caller->input_types(),
[](const DataType dtype) { return dtype == DT_RESOURCE; });
const bool has_constant_enter_input =
absl::c_any_of(caller->in_edges(), [](const Edge* edge) {
Node* src = edge->src();
return src->IsEnter() && CheckBoolAttr(src, "is_constant");
});
const bool requires_strict_semantics =
(!has_incoming_control_edges && has_resource_input) ||
(has_constant_enter_input);
if (!requires_strict_semantics) return;
std::set<const Node*> data_inputs;
for (const Edge* edge : caller->in_edges()) {
if (!edge->IsControlEdge() &&
!existing_control_sources.contains(edge->src())) {
data_inputs.insert(edge->src());
}
}
VLOG(3) << "Add control edges from all data inputs to enforce strict "
"semantics with regard to function inputs";
const auto is_placeholder = [](const Node* node) -> bool {
return node->type_string() == "Placeholder";
};
for (const Node* node : data_inputs) {
if (is_placeholder(node)) continue;
g->AddControlEdge(g->FindNodeId(node->id()), caller,
true);
}
}
void AddFrameForwardingControlEdge(const std::vector<ControlFlowInfo>& info,
Node* caller, Graph* g) {
int info_size = info.size();
if (caller->id() >= info_size) return;
const Node* frame = info[caller->id()].frame;
const bool is_in_while_loop = frame->id() != Graph::kSourceId;
if (!is_in_while_loop) return;
const bool has_incoming_control_edges =
absl::c_any_of(caller->in_edges(),
[](const Edge* edge) { return edge->IsControlEdge(); });
if (has_incoming_control_edges) return;
VLOG(3) << "Add a frame forwarding control edge: from=" << frame->name()
<< " to=" << caller->name();
Node* enter = g->FindNodeId(frame->id());
bool is_constant_enter = enter->attrs().Find("is_constant")->b();
if (is_constant_enter) {
g->AddControlEdge(enter, caller);
} else {
auto it = absl::c_find_if(enter->out_edges(), [](const Edge* e) {
return !e->IsControlEdge() && e->dst()->IsMerge();
});
if (it != enter->out_edges().end()) {
g->AddControlEdge((*it)->dst(), caller);
} else {
LOG(WARNING) << "Enter[is_constant=false] node: " << enter->name()
<< " does not have an outgoing edge to a Merge.";
}
}
}
Status InlineFunctionCalls(const GrapplerItem& item,
const RewriterConfig::Toggle opt_level,
const bool lower_control_flow,
GraphDef* output_graph) {
bool is_aggressive = opt_level == RewriterConfig::AGGRESSIVE;
VLOG(2) << "Inline function calls: grappler_item_id=" << item.id
<< " (aggressive_mode=" << is_aggressive << ")";
FunctionLibraryDefinition flib_def =
FunctionLibraryDefinition(OpRegistry::Global(), item.graph.library());
std::unique_ptr<Graph> graph = std::make_unique<Graph>(flib_def);
GraphConstructorOptions graph_constructor_options;
graph_constructor_options.allow_internal_ops = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(graph_constructor_options,
item.graph, graph.get()));
using NodeNames = absl::flat_hash_set<absl::string_view>;
NodeNames fetch_nodes;
fetch_nodes.reserve(item.fetch.size());
for (const string& fetch : item.fetch) {
fetch_nodes.insert(ParseTensorName(fetch).node());
}
NodeNames keep_nodes(item.keep_ops.begin(), item.keep_ops.end());
if (item.save_op.size() > 0) {
keep_nodes.insert(item.save_op);
}
if (item.restore_op.size() > 0) {
keep_nodes.insert(item.restore_op);
}
std::vector<string> inlined_function_names;
NodeNames feed_nodes;
feed_nodes.reserve(item.feed.size());
for (const std::pair<std::string, Tensor>& feed : item.feed) {
feed_nodes.insert(ParseTensorName(feed.first).node());
}
std::vector<ControlFlowInfo> control_flow_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph.get(), &control_flow_info));
for (int i = 2; i < graph->num_node_ids(); ++i) {
Node* n = graph->FindNodeId(i);
if (n == nullptr) continue;
if (lower_control_flow && LowerUsingSwitchMergeIsOn(n)) {
VLOG(2) << "Lower functional control flow op: " << SummarizeNode(*n);
AddStrictInputSemantics(n, graph.get());
AddFrameForwardingControlEdge(control_flow_info, n, graph.get());
if (n->IsIfNode()) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, graph.get(), false));
} else if (n->IsCaseNode()) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, graph.get(), false));
} else if (n->IsWhileNode()) {
TF_RETURN_IF_ERROR(RewriteWhileNode(n, graph.get(), &flib_def, false));
}
continue;
}
if (!IsFunctionCall(flib_def, *n)) continue;
if (MarkedForXlaCompilation(n->def())) continue;
if (feed_nodes.contains(n->name())) continue;
if (n->name() == item.restore_op || n->name() == item.save_op) continue;
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(MakeFunctionBodyForInlining(*n, flib_def, &fbody));
InlineFunctionBodyOptions inline_options;
inline_options.ignore_noinline = is_aggressive;
bool force_inline_as_multi_device = LowerAsMultiDeviceFunctionIsOn(n);
if (n->IsPartitionedCall() || force_inline_as_multi_device) {
inline_options.output_control_src = OutputControlSource::kControlOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::MultiDevice();
} else {
inline_options.output_control_src = OutputControlSource::kDataOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::SingleDevice();
}
if (fetch_nodes.contains(n->name())) {
inline_options.keep_caller_node = KeepCallerNode::kFetchable;
} else if (keep_nodes.contains(n->name())) {
inline_options.keep_caller_node = KeepCallerNode::kTargetable;
} else {
inline_options.keep_caller_node = KeepCallerNode::kDoNotKeep;
}
Status can_inline_function_call =
ValidateInlining(n, fbody.get(), inline_options);
if (can_inline_function_call.ok()) {
bool has_outgoing_control_edges = absl::c_any_of(
n->out_edges(),
[](const Edge* edge) { return edge->IsControlEdge(); });
can_inline_function_call = ValidateSideEffectsExecution(
*fbody, inline_options.output_control_src,
has_outgoing_control_edges);
if (!can_inline_function_call.ok() &&
(is_aggressive || force_inline_as_multi_device)) {
VLOG(2) << "Ignore error: " << can_inline_function_call.message();
can_inline_function_call = absl::OkStatus();
}
}
if (can_inline_function_call.ok()) {
can_inline_function_call = ValidateNoDeadOutputs(flib_def, *fbody);
}
if (can_inline_function_call.ok()) {
VLOG(2) << "Inline function call node: " << n->name();
AddStrictInputSemantics(n, graph.get());
AddFrameForwardingControlEdge(control_flow_info, n, graph.get());
TF_RETURN_IF_ERROR(InlineFunctionBody(flib_def, graph.get(), n,
fbody.get(), inline_options));
inlined_function_names.push_back(
fbody->record->fdef().signature().name());
} else {
VLOG(2) << "Failed to inline function call node: "
<< can_inline_function_call.message();
}
}
VLOG(4) << "Inlined " << inlined_function_names.size()
<< " function calls: " << absl::StrJoin(inlined_function_names, ", ");
if (inlined_function_names.empty()) {
VLOG(3) << "Not placing graph after function inlining"
<< " (did not inline any of the function calls).";
} else if (item.devices().empty()) {
VLOG(3) << "Not placing graph after function inlining"
<< " (device set is empty)";
} else {
VLOG(3) << "Run placer for the graph after function inlining. "
<< "Devices: [" << absl::StrJoin(item.devices(), ", ") << "]";
DeviceSet device_set;
std::vector<std::unique_ptr<Device>> fake_devices;
for (const string& name : item.devices()) {
auto device = std::make_unique<FakeDevice>(name);
device_set.AddDevice(device.get());
fake_devices.push_back(std::move(device));
}
Placer placer(graph.get(), item.id, &flib_def, &device_set);
TF_RETURN_IF_ERROR(placer.Run());
}
graph->ToGraphDef(output_graph);
return absl::OkStatus();
}
void RestoreTensorMapping(const FunctionOptimizerContext& ctx,
GraphDef* optimized_graph) {
if (ctx.tensor_mapping().empty()) return;
for (NodeDef& node : *optimized_graph->mutable_node()) {
for (int idx = 0; idx < node.input_size(); ++idx) {
TensorId input_tensor = ParseTensorName(node.input(idx));
if (input_tensor.index() == Graph::kControlSlot) break;
auto mapping = ctx.tensor_mapping().find(input_tensor);
if (mapping != ctx.tensor_mapping().end()) {
node.set_input(idx, TensorIdToString(mapping->second));
}
}
}
}
}
Status FunctionOptimizer::RunFunctionOptimizerPass(
const GrapplerItem& item, GraphDef* optimized_graph) const {
VLOG(3) << "Run function optimizer pass: grappler_item_id=" << item.id;
GraphDef graph_after_inlining;
TF_RETURN_IF_ERROR(InlineFunctionCalls(item, opt_level_, lower_control_flow_,
&graph_after_inlining));
FunctionOptimizerContext ctx(item, opt_level_, graph_after_inlining);
for (const NodeDef& node : graph_after_inlining.node()) {
const int num_nodes_before = optimized_graph->node_size();
const auto is_graph_modified = [&]() {
int num_nodes = optimized_graph->node_size();
DCHECK_GE(num_nodes, num_nodes_before) << "Nodes should not be removed";
return num_nodes > num_nodes_before;
};
const auto copy_node = [&]() { *optimized_graph->add_node() = node; };
const FunctionDef* func = FindFunctionCall(ctx, node);
if (func == nullptr) {
copy_node();
continue;
}
const string& func_name = func->signature().name();
const bool specialization_worthy = IsParametrized(*func) ||
HasTrulyConstInputs(node, ctx) ||
HasUnusedOutputs(node, *func, ctx);
const string grad_func = ctx.function_library().FindGradient(func_name);
const bool no_specialize =
!grad_func.empty() || ctx.IsFeedNode(node.name()) ||
MarkedNoSpecialize(*func) || MarkedForXlaCompilation(node);
if (specialization_worthy && !no_specialize) {
Status status = SpecializeFunction(node, *func, &ctx, optimized_graph);
if (!status.ok() && is_graph_modified()) {
return status;
} else if (!status.ok() && !is_graph_modified()) {
VLOG(3) << "Skip specialization error: " << status.message();
copy_node();
}
continue;
} else {
VLOG(2) << "Skip function specialization: " << func->signature().name();
copy_node();
}
}
RestoreTensorMapping(ctx, optimized_graph);
*optimized_graph->mutable_versions() = item.graph.versions();
*optimized_graph->mutable_library() =
PruneFunctionLibrary(ctx.function_library(), *optimized_graph);
return absl::OkStatus();
}
Status FunctionOptimizer::Optimize(Cluster*, const GrapplerItem& item,
GraphDef* optimized_graph) {
if (item.graph.library().function_size() == 0) {
return absl::AbortedError("Nothing to do.");
}
TF_RETURN_IF_ERROR(RunFunctionOptimizerPass(item, optimized_graph));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/flatset.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/job:localhost/replica:0/task:0/device:CPU:0";
}
class FunctionOptimizerTest : public GrapplerTest {};
TEST_F(FunctionOptimizerTest, InlineFunction_SimpleFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
const string arg0 = "Func/y/input/_0";
const string ret0 = "Func/y/output/_1";
const Tensor kTwo = test::AsScalar<int64_t>(2);
GraphDef expected = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef(arg0, "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("y/two", "Const", {}, {{"dtype", DT_INT64}, {"value", kTwo}}),
NDef("y/scale", "Cast", {"y/two"},
{{"DstT", DT_FLOAT}, {"SrcT", DT_INT64}}),
NDef("y/y", "Mul", {arg0, "y/scale"}, {{"T", DT_FLOAT}}),
NDef(ret0, "Identity", {"y/y"}, {{"T", DT_FLOAT}}),
NDef("z", "Identity", {ret0}, {{"T", DT_FLOAT}})},
{});
for (NodeDef& node : *expected.mutable_node()) node.set_device(kDevice);
CompareGraphs(expected, output);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FixedTypeFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<float>(2.0f);
FunctionDef x_times_two = FunctionDefHelper::Define(
"XTimesTwo",
{"x: float"},
{"y: float"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_FLOAT}}},
{{"enter"},
"Enter",
{"x"},
{{"T", DT_FLOAT}, {"frame_name", "frame"}}},
{{"y"}, "Mul", {"x", "two"}, {{"T", DT_FLOAT}}},
});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
x_times_two,
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "XTimesTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithOutputMapping) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "Exp_func", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "Exp_func");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithInputForwarding) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"ForwardInputs",
{"in0: float", "in1: float", "arg2: float", "arg3: int32", "arg4: float"},
{"out0: float", "arg2: float", "arg3: int32"},
{},
{},
{{"out0", "in0"}, {"arg2", "arg2"}, {"arg3", "arg3"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x2", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x3", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("x4", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "ForwardInputs", {"x0", "x1", "x2", "x3", "x4"}, {}, kDevice),
NDef("z0", "Identity", {"y:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z1", "Identity", {"y:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z2", "Identity", {"y:2"}, {{"T", DT_INT32}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "ForwardInputs");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z0", "z1", "z2"};
item.feed.emplace_back("x0", test::AsScalar<float>(3.14f));
item.feed.emplace_back("x1", test::AsScalar<float>(2.7f));
item.feed.emplace_back("x2", test::AsScalar<float>(1.0f));
item.feed.emplace_back("x4", test::AsScalar<float>(-1.0f));
item.feed.emplace_back("x3", test::AsScalar<int>(1234));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
test::ExpectTensorEqual<int>(tensors_expected[2], tensors[2]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithoutInput) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("y", "GenerateTwo", {}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "GenerateTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithNestedFunctionCall) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("outputs", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func, square_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "MySquare");
EXPECT_NE(node.op(), "MyMul");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"outputs"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_TestFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"out1", "out2"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out1", "out2"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
test::ExpectTensorEqual<float>(expected[1], optimized[1]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_IdentityFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Create(
"Identity_func",
{"in: float"},
{"out: float"},
{},
{{{"Identity"}, "Identity", {"in"}, {{"T", DT_FLOAT}}}},
{{"out", "Identity:output:0"}});
auto x = ops::Const(scope, 1.0f, {3, 5, 7});
auto z = ops::Const(scope, 3.0f, {3, 5, 7});
NameAttrList fn;
fn.set_name("Identity_func");
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, z},
{DT_FLOAT}, fn);
auto out = ops::Identity(scope.WithOpName("out"), g0.output[0]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected = EvaluateNodes(item.graph, {"out"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradientNoInlineFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
(*func.mutable_attr())["_noinline"].set_b(true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
CompareGraphs(item.graph, output);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionSimpleFunction) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func} );
Tensor pi = test::AsScalar<float>(3.14f);
item.feed.emplace_back("a", pi);
item.feed.emplace_back("b", pi);
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
{
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
GraphDef optimized_graph;
TF_EXPECT_OK(item.AddDevice(kDevice));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, kDevice),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithControlDependencies) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::ON, true);
const Tensor kOne = test::AsScalar<float>(1.0);
const Tensor kTwo = test::AsScalar<float>(2.0);
const TensorShape scalar = TensorShape({});
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T", "v: resource"}, {"z:T"}, {"T: {float, double}"},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"add"},
"AssignAddVariableOp",
{"v", "one:output:0"},
{{"dtype", DT_FLOAT}}},
{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}},
{{"size_effects", "add"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out_1", "out_2"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}}),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("f1", "PartitionedCall", {"a", "b", "v", "^init_v"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("f2", "PartitionedCall", {"f1", "f1", "v", "^f1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out_1", "Identity", {"f2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_2", "ReadVariableOp", {"v", "^f1", "^f2"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}},
kDevice),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("Func/f1/input_control_node/_0", "NoOp", {"^init_v"}, {}, kDevice),
NDef("Func/f1/input/_1", "Identity",
{"a", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_2", "Identity",
{"b", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_3", "Identity",
{"v", "^Func/f1/input_control_node/_0"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f1/one", "Const", {"^Func/f1/input_control_node/_0"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f1/mul", "Mul", {"Func/f1/input/_1", "Func/f1/input/_2"},
{{"T", DT_FLOAT}}, kDevice),
NDef("f1/add", "AssignAddVariableOp", {"Func/f1/input/_3", "f1/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("Func/f1/output/_4", "Identity", {"f1/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/output_control_node/_5", "NoOp", {"^f1/add"}, {}, kDevice),
NDef("Func/f2/input_control_node/_6", "NoOp",
{"^Func/f1/output_control_node/_5"}, {}, kDevice),
NDef("Func/f2/input/_7", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_8", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_9", "Identity",
{"v", "^Func/f2/input_control_node/_6"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f2/one", "Const", {"^Func/f2/input_control_node/_6"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f2/add", "AssignAddVariableOp", {"Func/f2/input/_9", "f2/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("f2/mul", "Mul", {"Func/f2/input/_7", "Func/f2/input/_8"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/output/_10", "Identity", {"f2/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f2/output_control_node/_11", "NoOp", {"^f2/add"}, {},
kDevice),
NDef("out_1", "Identity", {"Func/f2/output/_10"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out_2", "ReadVariableOp",
{"v", "^Func/f1/output_control_node/_5",
"^Func/f2/output_control_node/_11"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
item.feed.emplace_back("a", kOne);
item.feed.emplace_back("b", kTwo);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 2);
EXPECT_EQ(tensors_expected[0].flat<float>()(0), 4.0);
EXPECT_EQ(tensors_expected[1].flat<float>()(0), 3.0);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, cpu1),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
CompareGraphs(expected, optimized_graph);
}
TEST_F(FunctionOptimizerTest,
InlineMultipleIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"e"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "PartitionedCall", {"a", "c"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("e", "Identity", {"d"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_c_x = "Func/c/input/_0";
const string input_c_y = "Func/c/input/_1";
const string output_c_z = "Func/c/output/_2";
const string input_d_x = "Func/d/input/_3";
const string input_d_y = "Func/d/input/_4";
const string output_d_z = "Func/d/output/_5";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef(input_c_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_c_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, cpu1),
NDef("c/mul", "Mul", {input_c_x, input_c_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_c_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef(input_d_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_d_y, "Identity", {output_c_z}, {{"T", DT_FLOAT}}, cpu1),
NDef("d/mul", "Mul", {input_d_x, input_d_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_d_z, "Identity", {"d/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef("e", "Identity", {output_d_z}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
CompareGraphs(expected, optimized_graph);
}
TEST_F(FunctionOptimizerTest,
InlineIndirectFunctionWithControlDependencyAndNoSideEffects) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
const Tensor kOne = test::AsScalar<float>(1.0);
const Tensor kTwo = test::AsScalar<float>(2.0);
const TensorShape scalar = TensorShape({});
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "NoOp", {}, {}, kDevice),
NDef("f1", "PartitionedCall", {"a", "b", "^c"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("f2", "PartitionedCall", {"f1", "f1", "^f1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out", "Identity", {"f2"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "NoOp", {}, {}, kDevice),
NDef("Func/f1/input_control_node/_0", "NoOp", {"^c"}, {}, kDevice),
NDef("Func/f1/input/_1", "Identity",
{"a", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_2", "Identity",
{"b", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("f1/mul", "Mul", {"Func/f1/input/_1", "Func/f1/input/_2"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f1/output/_3", "Identity", {"f1/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/output_control_node/_4", "NoOp",
{"^Func/f1/input_control_node/_0"}, {}, kDevice),
NDef("Func/f2/input_control_node/_5", "NoOp",
{"^Func/f1/output_control_node/_4"}, {}, kDevice),
NDef("Func/f2/input/_6", "Identity",
{"Func/f1/output/_3", "^Func/f2/input_control_node/_5"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_7", "Identity",
{"Func/f1/output/_3", "^Func/f2/input_control_node/_5"},
{{"T", DT_FLOAT}}, kDevice),
NDef("f2/mul", "Mul", {"Func/f2/input/_6", "Func/f2/input/_7"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/output/_8", "Identity", {"f2/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out", "Identity", {"Func/f2/output/_8"}, {{"T", DT_FLOAT}},
kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
item.feed.emplace_back("a", kOne);
item.feed.emplace_back("b", kTwo);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionDoNotInlineDeadOutputs) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef dead_outputs = FunctionDefHelper::Create(
"DeadOutputs", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{
{{"switch"}, "Switch", {"x", "cond"}, {{"T", "$T"}}},
{{"if_false"}, "Identity", {"switch:output_false:0"}, {{"T", "$T"}}},
{{"if_true"}, "Identity", {"switch:output_true:0"}, {{"T", "$T"}}},
},
{{"z", "if_false:output:0"}});
FunctionDef proxy_func = FunctionDefHelper::Create(
"Proxy", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{{{"dead"}, "DeadOutputs", {"x", "cond"}, {{"T", "$T"}}}},
{{"z", "dead:z:0"}});
GrapplerItem item;
item.fetch = {"out0", "out1"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("fn0", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("DeadOutputs", {{"T", DT_FLOAT}})}},
kDevice),
NDef("fn1", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("Proxy", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out0", "Identity", {"fn0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out1", "Identity", {"fn1"}, {{"T", DT_FLOAT}}, kDevice)},
{dead_outputs, proxy_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = item.graph;
CompareGraphs(expected, optimized_graph);
const Tensor one = test::AsScalar<float>(1.0);
item.feed.emplace_back("a", one);
item.feed.emplace_back("b", test::AsScalar<bool>(false));
auto tensors = EvaluateFetchNodes(item);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors[0], one);
test::ExpectTensorEqual<float>(tensors[1], one);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithMergedDeadTensors) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef no_dead_outputs = FunctionDefHelper::Create(
"NoDeadOutputs", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{
{{"switch"}, "Switch", {"x", "cond"}, {{"T", "$T"}}},
{{"if_false"}, "Identity", {"switch:output_false:0"}, {{"T", "$T"}}},
{{"if_true"}, "Identity", {"switch:output_true:0"}, {{"T", "$T"}}},
{{"merge"},
"Merge",
{"if_false:output:0", "if_true:output:0"},
{{"T", "$T"}, {"N", 2}}},
},
{{"z", "merge:output:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("fn", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("NoDeadOutputs", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out", "Identity", {"fn"}, {{"T", DT_FLOAT}}, kDevice)},
{no_dead_outputs});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("Func/fn/input/_0", "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("Func/fn/input/_1", "Identity", {"b"}, {{"T", DT_BOOL}}, kDevice),
NDef("fn/switch", "Switch", {"Func/fn/input/_0", "Func/fn/input/_1"},
{{"T", DT_FLOAT}}, kDevice),
NDef("fn/if_false", "Identity", {"fn/switch"}, {{"T", DT_FLOAT}},
kDevice),
NDef("fn/if_true", "Identity", {"fn/switch:1"}, {{"T", DT_FLOAT}},
kDevice),
NDef("fn/merge", "Merge", {"fn/if_false", "fn/if_true"},
{{"T", DT_FLOAT}, {"N", 2}}, kDevice),
NDef("Func/fn/output/_2", "Identity", {"fn/merge"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out", "Identity", {"Func/fn/output/_2"}, {{"T", DT_FLOAT}},
kDevice)},
{no_dead_outputs});
CompareGraphs(expected, optimized_graph);
const Tensor one = test::AsScalar<float>(1.0);
item.feed.emplace_back("a", one);
item.feed.emplace_back("b", test::AsScalar<bool>(false));
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithNestedFunctionCall) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"output:T"}, {"T: {float, double}"},
{{{"square"},
"PartitionedCall",
{"x", "x"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}}}},
{{"output", "square:output:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"c"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MySquare", {{"T", DT_FLOAT}})}},
kDevice),
NDef("c", "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func, square_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("Func/b/input/_0", "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/input/_2", "Identity", {"Func/b/input/_0"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/input/_3", "Identity", {"Func/b/input/_0"},
{{"T", DT_FLOAT}}, kDevice),
NDef("b/square/mul", "Mul",
{"Func/b/square/input/_2", "Func/b/square/input/_3"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/output/_4", "Identity", {"b/square/mul"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/output/_1", "Identity", {"Func/b/square/output/_4"},
{{"T", DT_FLOAT}}, kDevice),
NDef("c", "Identity", {"Func/b/output/_1"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
Tensor three = test::AsScalar<float>(3.0f);
item.feed.emplace_back("a", three);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
GrapplerItem ConditionalAdd() {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionDef add_func = FDH::Create(
"MyAdd", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"add"}, "Add", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "add:z:0"}});
FunctionDef mul_func = FDH::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef add_or_mul_func = FDH::Create(
"AddOrMul", {"cond:bool", "x:float", "y:float"}, {"z:float"}, {},
{
{{"if_node"},
"If",
{"cond", "x", "y"},
{
{"Tcond", DT_BOOL},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"then_branch", FDH::FunctionRef("MyAdd", {{"T", DT_FLOAT}})},
{"else_branch", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})},
{"_lower_using_switch_merge", true},
}},
},
{{"z", "if_node:output:0"}}, {{"side_effect", "if_node"}});
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("is_add", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "PartitionedCall", {"is_add", "a", "b"},
{{"Tin", DataTypeSlice{DT_BOOL, DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("AddOrMul")}},
kDevice),
NDef("d", "Identity", {"c", "^c"}, {{"T", DT_FLOAT}}, kDevice)},
{add_or_mul_func, add_func, mul_func});
return item;
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithFunctionalControlFlow) {
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
GrapplerItem item = ConditionalAdd();
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const auto count_nodes_with_op = [&](const string& op) {
return absl::c_count_if(optimized_graph.node(), [&](const NodeDef& node) {
return node.op() == op;
});
};
EXPECT_EQ(count_nodes_with_op("PartitionedCall"), 0);
EXPECT_EQ(count_nodes_with_op("If"), 0);
EXPECT_EQ(count_nodes_with_op("Switch"), 3);
EXPECT_EQ(count_nodes_with_op("Merge"), 2);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
Tensor one = test::AsScalar<float>(1.0);
Tensor two = test::AsScalar<float>(2.0);
Tensor three = test::AsScalar<float>(3.0);
const auto feed_args = [&](bool is_add) {
std::vector<std::pair<string, Tensor>> feed;
feed.emplace_back("a", one);
feed.emplace_back("b", two);
feed.emplace_back("is_add", test::AsScalar<bool>(is_add));
return feed;
};
{
item.feed = feed_args(true);
optimized.feed = feed_args(true);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], three);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
item.feed = feed_args(false);
optimized.feed = feed_args(false);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], two);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionDontLowerControlFlow) {
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE,
false);
GrapplerItem item = ConditionalAdd();
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const auto count_nodes_with_op = [&](const string& op) {
return absl::c_count_if(optimized_graph.node(), [&](const NodeDef& node) {
return node.op() == op;
});
};
EXPECT_EQ(count_nodes_with_op("PartitionedCall"), 0);
EXPECT_EQ(count_nodes_with_op("If"), 1);
EXPECT_EQ(count_nodes_with_op("Switch"), 0);
EXPECT_EQ(count_nodes_with_op("Merge"), 0);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
Tensor one = test::AsScalar<float>(1.0);
Tensor two = test::AsScalar<float>(2.0);
Tensor three = test::AsScalar<float>(3.0);
const auto feed_args = [&](bool is_add) {
std::vector<std::pair<string, Tensor>> feed;
feed.emplace_back("a", one);
feed.emplace_back("b", two);
feed.emplace_back("is_add", test::AsScalar<bool>(is_add));
return feed;
};
{
item.feed = feed_args(true);
optimized.feed = feed_args(true);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], three);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
item.feed = feed_args(false);
optimized.feed = feed_args(false);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], two);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionXTimesTwo) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef x_times_two = test::function::XTimesTwo();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {x_times_two};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(1, output.library().function_size());
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph",
output.library().function(0).signature().name());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph", node.op());
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionXTimesTwo) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef x_times_two = test::function::XTimesTwo();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {x_times_two};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}},
kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(1, output.library().function_size());
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph",
output.library().function(0).signature().name());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("PartitionedCall", node.op());
auto& func = AttrSlice(node).Find("f")->func();
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph", func.name());
auto& tin = AttrSlice(node).Find("Tin")->list();
auto& tout = AttrSlice(node).Find("Tout")->list();
ASSERT_EQ(1, tin.type_size());
ASSERT_EQ(1, tout.type_size());
EXPECT_EQ(DT_FLOAT, tin.type(0));
EXPECT_EQ(DT_FLOAT, tout.type(0));
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionPushDownConstInput) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("init", "NoOp", {}, {}, kDevice),
NDef("two", "Const", {"^init", "^x"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("y", "MyMul", {"x", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(1, output.library().function_size());
const FunctionDef& specialized = output.library().function(0);
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph",
specialized.signature().name());
EXPECT_EQ(1, specialized.signature().input_arg_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^init", node.input(1));
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionPushDownConstInput) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("init", "NoOp", {}, {}, kDevice),
NDef("two", "Const", {"^init", "^x"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("y", "PartitionedCall", {"x", "two"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(1, output.library().function_size());
const FunctionDef& specialized = output.library().function(0);
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph",
specialized.signature().name());
EXPECT_EQ(1, specialized.signature().input_arg_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("PartitionedCall", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^init", node.input(1));
auto& func = AttrSlice(node).Find("f")->func();
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph", func.name());
auto& tin = AttrSlice(node).Find("Tin")->list();
auto& tout = AttrSlice(node).Find("Tout")->list();
ASSERT_EQ(1, tin.type_size());
ASSERT_EQ(1, tout.type_size());
EXPECT_EQ(DT_FLOAT, tin.type(0));
EXPECT_EQ(DT_FLOAT, tout.type(0));
}
}
ASSERT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunction_OncePerUniqueContext) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, int32}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
const Tensor kThree = test::AsScalar<float>(3.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("xi", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("yi", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("two", "Const", {"^init", "^xf"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("three", "Const", {"^init", "^xf"},
{{"dtype", DT_FLOAT}, {"value", kThree}}, kDevice),
NDef("mul_1", "MyMul", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_2", "MyMul", {"yf", "xf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_3", "MyMul", {"xi", "yi"}, {{"T", DT_INT32}}, kDevice),
NDef("mul_4", "MyMul", {"xf", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_5", "MyMul", {"yf", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_6", "MyMul", {"three", "xf"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
item.fetch = {"mul_1", "mul_2", "mul_3", "mul_4", "mul_5", "mul_6"};
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(4, output.library().function_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "mul_1" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_1_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("yf", node.input(1));
} else if (node.name() == "mul_2" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_1_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("yf", node.input(0));
EXPECT_EQ("xf", node.input(1));
} else if (node.name() == "mul_3" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_3_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xi", node.input(0));
EXPECT_EQ("yi", node.input(1));
} else if (node.name() == "mul_4" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_4_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("^init", node.input(1));
} else if (node.name() == "mul_5" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_4_at_tf_graph", node.op());
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("yf", node.input(0));
gtl::FlatSet<string> expected_ctrl = {"^init", "^xf"};
gtl::FlatSet<string> actual_ctrl = {node.input(1), node.input(2)};
EXPECT_EQ(expected_ctrl, actual_ctrl);
} else if (node.name() == "mul_6" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_6_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("^init", node.input(1));
}
}
EXPECT_EQ(6, count);
Tensor pi = test::AsScalar<float>(3.14f);
Tensor four = test::AsScalar<int32>(4);
item.feed = {{"xf", pi}, {"yf", pi}, {"xi", four}, {"yi", four}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
test::ExpectTensorEqual<int32>(tensors_expected[2], tensors[2]);
test::ExpectTensorEqual<float>(tensors_expected[3], tensors[3]);
test::ExpectTensorEqual<float>(tensors_expected[4], tensors[4]);
test::ExpectTensorEqual<float>(tensors_expected[5], tensors[5]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionForUsedOutputTensors) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T", "z3:T"}, {"T: {float, int32}"},
{{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output3"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "output1:z:0"}, {"z2", "output2:z:0"}, {"z3", "output3:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_func};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_0", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_1", "Identity", {"fn1:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_2", "Identity", {"fn1:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn2_0", "Identity", {"fn2:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn3", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn3_1", "Identity", {"fn3:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn4", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn4_2", "Identity", {"fn4:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn5", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_0", "Identity", {"fn5:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_2", "Identity", {"fn5:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn6", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(6, output.library().function_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn1_at_tf_graph", node.op());
} else if (node.name() == "fn2" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn2_at_tf_graph", node.op());
} else if (node.name() == "fn3" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn3_at_tf_graph", node.op());
} else if (node.name() == "fn4" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn4_at_tf_graph", node.op());
} else if (node.name() == "fn5" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn5_at_tf_graph", node.op());
} else if (node.name() == "fn6" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn6_at_tf_graph", node.op());
}
if (node.name() == "use_fn3_1" && ++found) {
EXPECT_EQ("fn3", node.input(0));
} else if (node.name() == "use_fn4_2" && ++found) {
EXPECT_EQ("fn4", node.input(0));
} else if (node.name() == "use_fn5_0" && ++found) {
EXPECT_EQ("fn5", node.input(0));
} else if (node.name() == "use_fn5_2" && ++found) {
EXPECT_EQ("fn5:1", node.input(0));
}
}
EXPECT_EQ(10, found);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"use_fn1_0", "use_fn1_1", "use_fn1_2", "use_fn2_0",
"use_fn3_1", "use_fn4_2", "use_fn5_0", "use_fn5_2"};
item.feed = {{"xf", pi}, {"yf", pi}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionForUsedOutputTensors) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T", "z3:T"}, {"T: {float, int32}"},
{{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output3"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "output1:z:0"}, {"z2", "output2:z:0"}, {"z3", "output3:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_func};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn1_0", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_1", "Identity", {"fn1:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_2", "Identity", {"fn1:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn2_0", "Identity", {"fn2:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn3", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn3_1", "Identity", {"fn3:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn4", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn4_2", "Identity", {"fn4:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn5", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn5_0", "Identity", {"fn5:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_2", "Identity", {"fn5:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn6", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(6, output.library().function_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn1_at_tf_graph", func.name());
ASSERT_EQ(3, tout.type_size());
} else if (node.name() == "fn2" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn2_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn3" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn3_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn4" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn4_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn5" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn5_at_tf_graph", func.name());
ASSERT_EQ(2, tout.type_size());
} else if (node.name() == "fn6" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn6_at_tf_graph", func.name());
ASSERT_EQ(0, tout.type_size());
}
if (node.name() == "use_fn3_1" && ++found) {
EXPECT_EQ("fn3", node.input(0));
} else if (node.name() == "use_fn4_2" && ++found) {
EXPECT_EQ("fn4", node.input(0));
} else if (node.name() == "use_fn5_0" && ++found) {
EXPECT_EQ("fn5", node.input(0));
} else if (node.name() == "use_fn5_2" && ++found) {
EXPECT_EQ("fn5:1", node.input(0));
}
}
EXPECT_EQ(10, found);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"use_fn1_0", "use_fn1_1", "use_fn1_2", "use_fn2_0",
"use_fn3_1", "use_fn4_2", "use_fn5_0", "use_fn5_2"};
item.feed = {{"xf", pi}, {"yf", pi}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
TEST_F(FunctionOptimizerTest, PruningUselessLibraryFunctions) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
auto func = test::function::XTimesTwo();
(*func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "test_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, "/device:CPU:0"),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, "/device:CPU:0")},
{
func,
test::function::XTimesTwoInt32(),
test::function::XTimes16(),
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
ASSERT_EQ(output.library().function().size(), 1);
EXPECT_EQ(output.library().function(0).signature().name(),
"XTimesTwo_specialized_for_y_at_test_graph");
}
TEST_F(FunctionOptimizerTest, PreserveSaverDefFunctions) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
auto func = test::function::XTimesTwo();
(*func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "test_graph";
item.graph = test::function::GDef(
{
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, "/device:CPU:0"),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("Restore", "StatefulPartitionedCall", {},
{{"Tin", {}},
{"Tout", {}},
{"f", FDH::FunctionRef("RestoreFn", {})}},
"/device:CPU:0"),
NDef("Save", "StatefulPartitionedCall", {},
{{"Tin", {}},
{"Tout", {}},
{"f", FDH::FunctionRef("SaveFn", {})}},
"/device:CPU:0"),
},
{
func,
test::function::XTimesTwoInt32(),
test::function::XTimes16(),
FDH::Create("RestoreFn", {}, {}, {}, {}, {}),
FDH::Create("SaveFn", {}, {}, {}, {}, {}),
});
item.restore_op = "Restore";
item.save_op = "Save";
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
ASSERT_EQ(output.library().function().size(), 3);
std::vector<std::string> signature_names;
for (const auto& function : output.library().function()) {
signature_names.push_back(function.signature().name());
}
EXPECT_THAT(signature_names, ::testing::UnorderedElementsAre(
"XTimesTwo_specialized_for_y_at_test_graph",
"RestoreFn", "SaveFn"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f1ff1941-455f-40bb-b02a-d14a5a2cd4cb | cpp | tensorflow/tensorflow | scoped_allocator_optimizer | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#define LOG_WARNING_AND_RETURN_IF_ERROR(...) \
do { \
const ::tensorflow::Status _status = (__VA_ARGS__); \
if (TF_PREDICT_FALSE(!_status.ok())) { \
LOG(WARNING) << "error: " << _status; \
return _status; \
} \
} while (0)
namespace tensorflow {
namespace grappler {
namespace {
const char kScopedAllocatorAttrName[] = "_scoped_allocator";
bool HasOpName(const string& node_name, const string& op_name) {
size_t begin = node_name.rfind('/');
if (begin == string::npos) {
begin = 0;
} else {
++begin;
}
size_t end = node_name.rfind('_');
if (end != string::npos) {
size_t p = end + 1;
while (p < node_name.size()) {
if (!isdigit(node_name[p])) {
end = node_name.size();
break;
}
++p;
}
} else {
end = node_name.size();
}
return node_name.substr(begin, end - begin) == op_name;
}
Status GetOutputDataType(
const std::vector<OpInfo::TensorProperties>& output_props, int output_index,
DataType* dtype) {
int output_props_size = output_props.size();
if (output_index >= output_props_size) {
return errors::Internal("Invalid output index ", output_index,
" size of output_props ", output_props.size());
}
*dtype = output_props[output_index].dtype();
return absl::OkStatus();
}
Status CheckTypesAndGetShapes(const GraphProperties& graph_properties,
const std::vector<NodeDef*>& ops, DataType* type,
std::vector<TensorShape>* shapes) {
VLOG(1) << "CheckTypesAndGetShapes";
*type = DT_INVALID;
for (NodeDef* n : ops) {
AttrSlice n_attrs = AttrSlice(*n);
DataType dtype;
LOG_WARNING_AND_RETURN_IF_ERROR(GetNodeAttr(n_attrs, "T", &dtype));
VLOG(2) << "op " << n->name() << " has type " << dtype << " shapes.size() "
<< shapes->size();
if (!graph_properties.HasOutputProperties(n->name())) {
LOG(ERROR) << "Node " << n->DebugString() << " lacks output shape.";
return errors::Aborted("Node ", n->name(), " lacks output shape.");
}
const std::vector<OpInfo::TensorProperties>& prop_list =
graph_properties.GetOutputProperties(n->name());
if (prop_list.size() != 1) {
return errors::Aborted("Node ", n->name(),
" does not have exactly one output as expected "
"by ScopedAllocatorOptimizer");
}
const OpInfo::TensorProperties& props = prop_list[0];
if (shapes->empty()) {
*type = props.dtype();
} else if (*type != props.dtype()) {
return errors::Aborted("Group ops don't all have same type");
}
if (*type != dtype) {
return errors::Internal(
"Type mismatch: type in op attr = ", DataTypeString(dtype),
", type in output props = ", DataTypeString(*type));
}
if (!TensorShape::IsValid(props.shape()) || props.shape().unknown_rank()) {
return errors::Aborted("Complete shape not known for ", n->name());
}
VLOG(2) << "Adding shape " << props.shape().DebugString();
shapes->push_back(TensorShape(props.shape()));
}
return absl::OkStatus();
}
struct InputDesc {
NodeDef* from_node_def;
int output_slot;
NodeDef* to_node_def;
InputDesc(NodeDef* f, int os, NodeDef* t)
: from_node_def(f), output_slot(os), to_node_def(t) {}
};
void RemoveNode(NodeDef* nd, GraphDef* graph, NodeMap* node_map) {
node_map->RemoveNode(nd->name());
protobuf::RepeatedPtrField<NodeDef>* nodes = graph->mutable_node();
for (int i = 0; i < nodes->size(); ++i) {
if (nd->name() == (*nodes)[i].name()) {
nodes->SwapElements(i, nodes->size() - 1);
nodes->RemoveLast();
return;
}
}
LOG(FATAL) << "Failed to find node " << nd->name() << " in graph";
}
Status RemoveEdge(const string& input_edge_name, const string& from_node_name,
NodeDef* to_node, NodeMap* node_map) {
protobuf::RepeatedPtrField<string>* inputs = to_node->mutable_input();
int edge_index = -1;
for (edge_index = 0; edge_index < inputs->size(); ++edge_index) {
VLOG(2) << " consider edge " << (*inputs)[edge_index];
if ((*inputs)[edge_index] == input_edge_name) {
break;
}
}
if (edge_index >= inputs->size()) {
return errors::Internal("Could not find input name ", input_edge_name,
" at node ", to_node->name());
}
if (node_map) {
node_map->RemoveOutput(from_node_name, to_node->name());
}
inputs->DeleteSubrange(edge_index, 1);
return absl::OkStatus();
}
Status MaybeRewriteInput(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const DataType& dtype,
NodeDef* input, const string& edge_name,
int output_index, NodeDef* op, NodeDef** new_input,
int* new_output_index, bool* rewrite) {
*rewrite = IsConstant(*input) || IsExit(*input) ||
(sa_opti->repeated_outputs().find(edge_name) !=
sa_opti->repeated_outputs().end());
if (!(*rewrite)) {
*new_input = input;
*new_output_index = output_index;
return absl::OkStatus();
}
int unique_id;
LOG_WARNING_AND_RETURN_IF_ERROR(sa_opti->NewIdentityId(&unique_id));
string identity_name = strings::StrCat("scoped_allocator_identity_",
unique_id, "_", invocation_count);
NodeDefBuilder identity_builder(identity_name, "Identity");
identity_builder.Device(op->device());
identity_builder.Attr("T", dtype);
identity_builder.Input(
NodeDefBuilder::NodeOut(input->name(), output_index, dtype));
NodeDef* identity = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(identity_builder.Finalize(identity));
node_map->AddNode(identity_name, identity);
node_map->AddOutput(input->name(), identity_name);
node_map->UpdateInput(op->name(), input->name(), identity_name);
*op->mutable_input(0) = identity_name;
*new_input = identity;
*new_output_index = 0;
VLOG(1) << "Rewrite input " << edge_name << " op " << op->name()
<< " old output index " << output_index << " with identity "
<< identity_name << " new output index 0";
return absl::OkStatus();
}
Status GetInputs(ScopedAllocatorOptimizer* sa_opti, int64_t invocation_count,
GraphDef* graph, const GraphProperties& graph_properties,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
DataType dtype, std::vector<InputDesc>* inputs) {
VLOG(1) << "Getinputs";
for (NodeDef* n : ops) {
NodeDef* inode = nullptr;
int output_index = 0;
DataType inode_dtype = DT_INVALID;
VLOG(2) << "for node " << n->name();
for (const auto& input_name : n->input()) {
if (!IsControlInput(input_name)) {
if (inode) {
return errors::Internal("Found more than one input for node ",
n->name());
}
ParseNodeName(input_name, &output_index);
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
bool rewrite;
LOG_WARNING_AND_RETURN_IF_ERROR(MaybeRewriteInput(
sa_opti, invocation_count, graph, node_map, dtype, inode,
input_name, output_index, n, &inode, &output_index, &rewrite));
if (rewrite) {
inode_dtype = dtype;
}
VLOG(2) << "inode after rewrite " << inode->DebugString()
<< " output_index " << output_index;
}
}
if (inode == nullptr) {
return errors::Internal("Did not find node");
}
if (inode_dtype == DT_INVALID) {
if (!graph_properties.HasOutputProperties(inode->name())) {
return errors::Internal("Input node ", inode->name(),
" does not have output properties");
}
const auto& inode_output_props =
graph_properties.GetOutputProperties(inode->name());
LOG_WARNING_AND_RETURN_IF_ERROR(
GetOutputDataType(inode_output_props, output_index, &inode_dtype));
}
if (inode_dtype != dtype) {
return errors::Aborted("ScopedAllocatorOptimizer expected input type ",
dtype, " but found ", inode_dtype);
}
inputs->emplace_back(inode, output_index, n);
}
return absl::OkStatus();
}
Status GetDataInputs(GraphDef* graph, NodeMap* node_map, NodeDef* op,
std::vector<InputDesc>* inputs) {
VLOG(2) << "GetDataInputs for node " << op->name();
NodeDef* inode = nullptr;
int output_index = 0;
for (const auto& input_name : op->input()) {
if (IsControlInput(input_name)) {
continue;
}
ParseNodeName(input_name, &output_index);
inode = nullptr;
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
inputs->emplace_back(inode, output_index, op);
}
return absl::OkStatus();
}
void DumpGraphToVLOG(const GraphDef& graph, int log_level) {
if (VLOG_IS_ON(log_level)) {
for (const auto& line : str_util::Split(graph.DebugString(), "\n\r")) {
VLOG(log_level) << line;
}
}
}
}
void ScopedAllocatorOptimizer::ExtendNodeAttr(StringPiece name,
const std::vector<int32>& values,
NodeDef* node_def) {
if (HasNodeAttr(*node_def, name)) {
VLOG(2) << "extending";
AttrValue* existing = &(*node_def->mutable_attr())[string(name)];
for (int32_t i : values) {
existing->mutable_list()->add_i(i);
}
} else {
VLOG(2) << "setting new attr value";
AddNodeAttr(name, values, node_def);
}
}
class UnaryElementwiseRewriter : public ScopedAllocatorOptimizer::Rewriter {
public:
~UnaryElementwiseRewriter() override {}
Status CheckUsesAllocatorAttributes(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (IsConstant(*nd.from_node_def)) {
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(),
" is a Const op which does not use AllocatorAttributes");
}
}
return absl::OkStatus();
}
Status CheckExistingScopedAllocator(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
VLOG(2) << "get attrs for " << nd.from_node_def->name();
AttrSlice n_attrs = AttrSlice(*nd.from_node_def);
std::vector<int32> scope_ids;
Status ss = GetNodeAttr(n_attrs, kScopedAllocatorAttrName, &scope_ids);
if (ss.ok() && scope_ids[0] == nd.output_slot) {
LOG(INFO) << "Abandoning ScopedAllocatorOptimizer because input "
<< nd.from_node_def->name() << " output " << scope_ids[0]
<< " is already assigned to scope_id " << scope_ids[1];
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(), " output ", scope_ids[0], " is already ",
"assigned to scope_id ", scope_ids[1]);
}
}
return absl::OkStatus();
}
Status CheckInternalDataDependency(const std::set<string>& op_set,
const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (op_set.find(nd.from_node_def->name()) != op_set.end()) {
if (nd.output_slot != tensorflow::Graph::kControlSlot) {
return errors::Aborted("Data edge exists between ",
nd.from_node_def->name(),
" and another "
"node in the set");
}
}
}
return absl::OkStatus();
}
void ClearInternalControlInputs(const std::set<string>& op_set,
const std::vector<NodeDef*>& ops,
NodeMap* node_map) {
for (NodeDef* n : ops) {
for (const auto& input_name : n->input()) {
if (IsControlInput(input_name)) {
int position = 0;
string input_node_name = ParseNodeName(input_name, &position);
CHECK_EQ(position, -1);
if (op_set.find(input_node_name) != op_set.end()) {
VLOG(1) << "Remove control output from " << input_node_name
<< " via edge " << input_name << " to " << n->name();
TF_CHECK_OK(RemoveEdge(input_name, input_node_name, n, node_map));
}
}
}
}
}
Status AnalyzeInputs(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
string* device_name, DataType* dtype,
std::vector<TensorShape>* input_shapes,
std::vector<InputDesc>* inputs, TensorShape* sa_shape) {
CHECK(graph_properties_);
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckTypesAndGetShapes(*graph_properties_, ops, dtype, input_shapes));
LOG_WARNING_AND_RETURN_IF_ERROR(
GetInputs(sa_opti, invocation_count, graph, *graph_properties_,
sa_opti->node_map(), ops, *dtype, inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckUsesAllocatorAttributes(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckExistingScopedAllocator(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckInternalDataDependency(op_instance_names, *inputs));
ClearInternalControlInputs(op_instance_names, ops, node_map);
*device_name = ops[0]->device();
CHECK(!device_name->empty());
CHECK(!input_shapes->empty());
CHECK_EQ(0, Allocator::kAllocatorAlignment % DataTypeSize(*dtype))
<< "ScopedAllocatorOptimizer only applies to types that evenly "
<< "divide kAllocatorAlignment";
std::vector<ScopedAllocator::Field> sa_fields;
int64_t num_bytes = ScopedAllocatorMgr::PopulateFields(
0 , *input_shapes, *dtype, &sa_fields);
int64_t num_elts = num_bytes / DataTypeSize(*dtype);
VLOG(2) << "num_bytes " << num_bytes << " num_elts=" << num_elts;
*sa_shape = TensorShape({num_elts});
return absl::OkStatus();
}
Status TransitiveFanoutWithinFrame(
GraphDef* graph, NodeMap* node_map,
const std::vector<const NodeDef*>& source_nodes,
absl::flat_hash_set<const NodeDef*>* fanout) {
std::deque<const NodeDef*> queue(source_nodes.begin(), source_nodes.end());
absl::flat_hash_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.front();
queue.pop_front();
if (!visited.insert(node).second) {
continue;
}
fanout->insert(node);
for (const NodeDef* output : node_map->GetOutputs(node->name())) {
if (!ModifiesFrameInfo(*output)) {
queue.push_back(output);
}
VLOG(2) << "TransitiveFanout parent: " << node->name()
<< " child: " << output->name() << " of type " << output->op();
}
}
return absl::OkStatus();
}
Status ConstructScopedAllocatorNode(
ScopedAllocatorOptimizer* sa_opti, GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops, const string& device_name,
DataType dtype, int sa_id, const string& sa_name,
const std::vector<TensorShape>& input_shapes,
const std::vector<InputDesc>& inputs, const TensorShape& sa_shape) {
VLOG(2) << "ConstructScopedAllocatorNode " << sa_name;
NodeDefBuilder sa_builder(sa_name, "_ScopedAllocator");
sa_builder.Device(device_name);
sa_builder.Attr("sa_name", sa_name);
sa_builder.Attr("T", dtype);
sa_builder.Attr("id", sa_id);
sa_builder.Attr("shapes", input_shapes);
sa_builder.Attr("shape", sa_shape);
sa_builder.Attr("expected_call_count", static_cast<int64_t>(ops.size()));
NodeDef* sa_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sa_builder.Finalize(sa_node));
node_map->AddNode(sa_name, sa_node);
std::vector<const NodeDef*> fanout_sources;
fanout_sources.reserve(inputs.size());
for (const auto& input : inputs) {
fanout_sources.push_back(input.from_node_def);
}
absl::flat_hash_set<const NodeDef*> fanout;
TF_RETURN_IF_ERROR(
TransitiveFanoutWithinFrame(graph, node_map, fanout_sources, &fanout));
for (int i = 0, end = inputs.size(); i < end; ++i) {
auto& nd = inputs[i];
if (IsArg(*nd.from_node_def)) {
return errors::Aborted(
"ScopedAllocatorOptimizer does not work well when the op inputs "
"are _Arg ops; skipping this optimizer for this function");
}
VLOG(2) << "To input " << i << ": " << nd.from_node_def->name()
<< " add control input "
<< "^" << sa_name;
nd.from_node_def->add_input(strings::StrCat("^", sa_name));
ScopedAllocatorOptimizer::ExtendNodeAttr(kScopedAllocatorAttrName,
{nd.output_slot, sa_id + 1 + i},
nd.from_node_def);
node_map->AddOutput(sa_name, nd.from_node_def->name());
}
bool added_delay_edge = false;
for (auto& nd : inputs) {
std::vector<InputDesc> inputs_to_first;
LOG_WARNING_AND_RETURN_IF_ERROR(GetDataInputs(
graph, sa_opti->node_map(), nd.from_node_def, &inputs_to_first));
for (int i = 0, end = inputs_to_first.size(); i < end; ++i) {
if (fanout.find(inputs_to_first[i].from_node_def) != fanout.end()) {
VLOG(2) << "Found node " << inputs_to_first[i].from_node_def->name()
<< " in the fanout of " << sa_name;
continue;
}
sa_node->add_input(
strings::StrCat("^", inputs_to_first[i].from_node_def->name()));
node_map->AddOutput(inputs_to_first[i].from_node_def->name(), sa_name);
added_delay_edge = true;
VLOG(2) << "Adding control dependency from "
<< inputs_to_first[i].from_node_def->name() << " to "
<< sa_node->name();
break;
}
if (added_delay_edge) {
break;
}
}
if (!added_delay_edge) {
LOG(WARNING) << "Found no node from which a control edge can be added to "
"scoped allocator node. If you run into issues with "
"graphs that contain control flow, turn off the "
"ScopedAllocatorOptimizer and file a bug.";
}
return absl::OkStatus();
}
Status BuildSAConcatNode(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
const string& device_name, DataType dtype, int sa_id,
const string& sa_name, const string& sac_name,
const TensorShape& sa_shape,
std::vector<NodeDefBuilder::NodeOut>* sac_inputs) {
VLOG(2) << "BuildSAConcatNode " << sac_name;
absl::flat_hash_map<string, string> sac_ctl_inputs;
for (int i = 0, end = ops.size(); i < end; ++i) {
NodeDef* old_op = ops[i];
for (const string& old_op_input : old_op->input()) {
int position = 0;
string input_name = ParseNodeName(old_op_input, &position);
if (position == -1) {
if (op_instance_names.find(old_op_input) == op_instance_names.end()) {
sac_ctl_inputs.emplace(old_op_input, input_name);
}
} else {
if (op_instance_names.find(old_op_input) != op_instance_names.end()) {
LOG(ERROR) << "Data edge between " << old_op_input << " and "
<< old_op->name() << " cannot build ScopedAllocator.";
return errors::Aborted("Data edge between ", old_op_input, " and ",
old_op->name(),
" cannot build ScopedAllocator.");
}
sac_inputs->push_back(
NodeDefBuilder::NodeOut(old_op_input, 0, dtype));
}
VLOG(3) << "from op " << i << ": " << old_op->name()
<< " sac_inputs append " << old_op_input;
}
}
NodeDefBuilder sac_builder(sac_name, "_ScopedAllocatorConcat");
VLOG(2) << "New sac_name " << sac_name << " shape "
<< sa_shape.DebugString();
sac_builder.Device(device_name);
sac_builder.Attr("sa_name", sa_name);
sac_builder.Attr("id", sa_id);
sac_builder.Attr("T", dtype);
sac_builder.Attr("shape", sa_shape);
sac_builder.Attr("N", static_cast<int>(sac_inputs->size()));
sac_builder.Input(NodeDefBuilder::NodeOut(sa_name, 0, dtype));
sac_builder.Input(*sac_inputs);
NodeDef* sac_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sac_builder.Finalize(sac_node));
node_map->AddNode(sac_name, sac_node);
node_map->AddOutput(sa_name, sac_name);
for (const auto& ctl_input : sac_ctl_inputs) {
const auto& ctl_edge = ctl_input.first;
const auto& input_name = ctl_input.second;
sac_node->add_input(ctl_edge);
node_map->AddOutput(input_name, sac_node->name());
}
return absl::OkStatus();
}
Status BuildReplacementOp(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const string& device_name, DataType dtype,
const string& op_name, const string& sac_name,
const string& sa_op_name) {
VLOG(2) << "BuildReplacementOp " << sa_op_name;
NodeDefBuilder op_builder(sa_op_name, op_name);
op_builder.Device(device_name);
AttrSlice first_slice(*ops[0]);
for (auto& it : first_slice) {
op_builder.Attr(it.first, it.second);
}
op_builder.Attr("_forward_input", {0, 0});
op_builder.Input(sac_name, 0, dtype);
NodeDef* sa_op_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(op_builder.Finalize(sa_op_node));
node_map->AddNode(sa_op_name, sa_op_node);
node_map->AddOutput(sac_name, sa_op_name);
return absl::OkStatus();
}
Status BuildSplitNode(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::vector<TensorShape>& input_shapes,
const std::vector<NodeDefBuilder::NodeOut>& sac_inputs,
const string& device_name, DataType dtype,
const string& op_name, int sa_id,
const string& sas_name, const string& sa_name,
const string& sa_op_name) {
VLOG(2) << "new ScopedAllocatorSplit " << sas_name;
NodeDefBuilder sas_builder(sas_name, "_ScopedAllocatorSplit");
sas_builder.Device(device_name);
sas_builder.Attr("sa_name", sa_name);
sas_builder.Attr("id", sa_id);
sas_builder.Attr("T", dtype);
sas_builder.Attr("shapes", input_shapes);
std::vector<NodeDefBuilder::NodeOut> sas_inputs = sac_inputs;
sas_builder.Attr("N", static_cast<int>(sas_inputs.size()));
sas_builder.Input(NodeDefBuilder::NodeOut(sa_op_name, 0, dtype));
sas_builder.Input(sas_inputs);
NodeDef* sas_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sas_builder.Finalize(sas_node));
node_map->AddNode(sas_name, sas_node);
node_map->AddOutput(sa_op_name, sas_name);
for (const auto& input : sas_inputs) {
node_map->AddOutput(input.node, sas_name);
}
return absl::OkStatus();
}
Status RewireSubgraph(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
const string& op_name, const string& sas_name) {
VLOG(2) << "RewireSubgraph";
for (int op_idx = 0, idx_limit = ops.size(); op_idx < idx_limit; ++op_idx) {
NodeDef* old_op = ops[op_idx];
auto output_nodes = node_map->GetOutputs(old_op->name());
VLOG(3) << "old_op " << old_op->name() << " had " << output_nodes.size()
<< " outputs. Moving them to the ScopedAllocatorSplit node.";
if (VLOG_IS_ON(2)) {
for (NodeDef* n : output_nodes) {
VLOG(3) << " output: " << n->name();
}
}
for (NodeDef* n : output_nodes) {
VLOG(3) << "really checking old output " << n->name()
<< " for corresponding input.";
if (op_instance_names.find(n->name()) != op_instance_names.end()) {
VLOG(3) << "Dropping control output from " << old_op->name() << " to "
<< n->name();
Status ignore = RemoveEdge(strings::StrCat("^", old_op->name()),
old_op->name(), n, node_map);
continue;
}
bool found = false;
VLOG(3) << "about to iterate over " << n->input_size() << " inputs";
for (int i = 0; i < n->input_size(); ++i) {
VLOG(3) << "input " << n->input(i);
int position = 0;
string input_node = ParseNodeName(n->input(i), &position);
if (input_node == old_op->name()) {
found = true;
VLOG(3) << "match pos=" << position;
if (position == -1) {
*n->mutable_input(i) = strings::StrCat("^", sas_name);
} else {
CHECK_EQ(0, position)
<< "name " << n->input(i) << " pos " << position;
*n->mutable_input(i) = strings::StrCat(sas_name, ":", op_idx);
}
node_map->UpdateInput(n->name(), old_op->name(), sas_name);
VLOG(3) << "breaking on success";
break;
} else {
VLOG(3) << "other input " << n->input(i);
}
}
VLOG(3) << "before HasOp";
if (!HasOpName(n->name(), op_name)) {
CHECK(found) << "old_op " << old_op->name() << " node "
<< " could not find input edge on " << n->DebugString()
<< " to replace."
<< " " << op_name << " not in " << n->name();
}
VLOG(3) << "bottom of for output_nodes";
}
VLOG(3) << "Clearing all inputs of " << old_op->name();
node_map->RemoveInputs(old_op->name());
old_op->clear_input();
node_map->RemoveOutputs(old_op->name());
VLOG(3) << "after clear: " << old_op->DebugString();
RemoveNode(old_op, graph, node_map);
}
return absl::OkStatus();
}
Status Rewrite(ScopedAllocatorOptimizer* sa_opti, int64_t invocation_count,
GraphDef* graph, const string& op_name,
const std::vector<NodeDef*>& ops, bool* applied) override {
if (VLOG_IS_ON(1)) {
VLOG(1) << "Rewrite";
string op_names;
for (auto& nd : ops) {
strings::StrAppend(&op_names, nd->name(), ", ");
}
VLOG(1) << "UnaryElementwiseRewriter::Rewrite " << op_name
<< " to: " << op_names;
}
NodeMap* node_map = sa_opti->node_map();
std::set<string> op_instance_names;
for (auto& nd : ops) {
op_instance_names.insert(nd->name());
VLOG(2) << "op_instance_name " << nd->name();
}
DataType dtype;
std::vector<TensorShape> input_shapes;
std::vector<InputDesc> inputs;
TensorShape sa_shape;
string device_name;
TF_RETURN_IF_ERROR(AnalyzeInputs(
sa_opti, invocation_count, graph, node_map, ops, op_instance_names,
&device_name, &dtype, &input_shapes, &inputs, &sa_shape));
int sa_id = sa_opti->NewScopedAllocatorId(input_shapes.size());
string sa_name =
strings::StrCat("scoped_allocator_", sa_id, "_", invocation_count);
TF_RETURN_IF_ERROR(ConstructScopedAllocatorNode(
sa_opti, graph, node_map, ops, device_name, dtype, sa_id, sa_name,
input_shapes, inputs, sa_shape));
std::vector<NodeDefBuilder::NodeOut> sac_inputs;
string sac_name = strings::StrCat("scoped_allocator_concat_", sa_id, "_",
invocation_count);
TF_RETURN_IF_ERROR(BuildSAConcatNode(
graph, node_map, ops, op_instance_names, device_name, dtype, sa_id,
sa_name, sac_name, sa_shape, &sac_inputs));
string sa_op_name = strings::StrCat(sa_name, "_", op_name);
TF_RETURN_IF_ERROR(BuildReplacementOp(graph, node_map, ops, device_name,
dtype, op_name, sac_name,
sa_op_name));
string sas_name = strings::StrCat("scoped_allocator_split_", sa_id, "_",
invocation_count);
TF_RETURN_IF_ERROR(BuildSplitNode(graph, node_map, ops, input_shapes,
sac_inputs, device_name, dtype, op_name,
sa_id, sas_name, sa_name, sa_op_name));
TF_RETURN_IF_ERROR(RewireSubgraph(graph, node_map, ops, op_instance_names,
op_name, sas_name));
*applied = true;
return absl::OkStatus();
}
};
ScopedAllocatorOptimizer::ScopedAllocatorOptimizer(
RewriterConfig::Toggle opt_level, const ScopedAllocatorOptions& opts)
: opt_level_(opt_level) {
VLOG(1) << "ScopedAllocatorOptimizer::ScopedAllocatorOptimizer";
Rewriter* r = new UnaryElementwiseRewriter();
to_delete_.push_back(r);
if (opts.enable_op_size() == 0) {
for (const auto& op_name : {"CollectiveReduce"}) {
op_name_set_.insert(op_name);
rewriters_[op_name] = r;
}
} else {
for (const auto& op_name : opts.enable_op()) {
op_name_set_.insert(op_name);
rewriters_[op_name] = r;
}
}
}
Status ScopedAllocatorOptimizer::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
VLOG(3) << "Input graph:";
DumpGraphToVLOG(item.graph, 3);
nodes_to_preserve_ = item.NodesToPreserve();
GraphProperties graph_properties(item);
const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE;
LOG_WARNING_AND_RETURN_IF_ERROR(graph_properties.InferStatically(
assume_valid_feeds, false,
false));
*optimized_graph = item.graph;
node_map_ = std::make_unique<NodeMap>(optimized_graph);
LOG_WARNING_AND_RETURN_IF_ERROR(ScopedAllocatorOptimizer::ProcessGraphDef(
optimized_graph, graph_properties));
VLOG(1) << "ScopedAllocatorOptimizer::Optimize() done";
VLOG(3) << "Optimized graph:";
DumpGraphToVLOG(*optimized_graph, 3);
return absl::OkStatus();
}
ScopedAllocatorOptimizer::Rewriter* ScopedAllocatorOptimizer::GetRewriter(
const string& op_name) {
auto it = rewriters_.find(op_name);
if (it != rewriters_.end()) {
return it->second;
}
return nullptr;
}
int ScopedAllocatorOptimizer::NewScopedAllocatorId(int num_fields) {
CHECK_GT(num_fields, 0);
int id = next_sa_id_;
next_sa_id_ += (num_fields + 1);
CHECK_GT(next_sa_id_, 0);
return id;
}
Status ScopedAllocatorOptimizer::NewIdentityId(int* id) {
*id = next_identity_id_++;
if (next_identity_id_ < 0) {
return errors::Aborted("NewIdentityId overflow");
}
return absl::OkStatus();
}
ScopedAllocatorOptimizer::~ScopedAllocatorOptimizer() {
for (auto ptr : to_delete_) {
delete ptr;
}
}
void ScopedAllocatorOptimizer::FindOpOccurrences(GraphDef* graph,
const OpNameSet& op_names,
GraphOpOccurrences* occs) {
VLOG(1) << "FindOpOccurrences ";
for (const auto& it : op_names) {
VLOG(1) << "search target " << it;
}
for (int ni = 0; ni < graph->node_size(); ++ni) {
NodeDef* node = graph->mutable_node(ni);
const string& op_name = node->op();
if (op_names.find(op_name) != op_names.end()) {
VLOG(1) << "found " << op_name << " on dev " << node->device();
(*occs)[node->device()][op_name].push_back(node);
}
}
}
namespace {
struct OpNameOrder {
bool operator()(const NodeDef* a, const NodeDef* b) {
return a->name() <= b->name();
}
};
class Tree {
public:
Tree(const string& edge, int depth) : edge_(edge), depth_(depth) {}
~Tree() {
for (const auto& it : subtrees_) delete it.second;
}
Tree* GetSubTree(const string& edge) {
auto it = subtrees_.find(edge);
if (it != subtrees_.end()) {
return it->second;
}
Tree* t = new Tree(edge, depth_ + 1);
subtrees_[edge] = t;
return t;
}
void InsertNode(NodeDef* n) { nodes_.push_back(n); }
string edge_;
int depth_;
std::vector<NodeDef*> nodes_;
absl::flat_hash_map<string, Tree*> subtrees_;
};
Status ApplyToAll(Tree* tree, const std::function<Status(Tree*)>& func) {
Status s;
for (const auto& it : tree->subtrees_) {
s = ApplyToAll(it.second, func);
if (!s.ok()) return s;
}
s = func(tree);
return s;
}
Tree* ComputeScopeTree(const string& op_name,
const std::vector<NodeDef*>& node_vec) {
Tree* root = new Tree("", 0);
for (NodeDef* n : node_vec) {
std::vector<string> pieces = str_util::Split(n->name(), "/");
int depth = pieces.size() - 1;
Tree* subtree = root;
for (int i = 0; i < depth; ++i) {
subtree = subtree->GetSubTree(pieces[i]);
}
subtree->InsertNode(n);
}
return root;
}
void PartitionByLoopStructure(const FrameView& frame_view,
std::vector<NodeDef*> nodes,
std::vector<std::vector<NodeDef*>>* loop_groups) {
absl::flat_hash_map<uint64, std::vector<NodeDef*>> loop_sets;
for (NodeDef* nd : nodes) {
uint64 hash = 0;
const std::vector<int>& loop_ids = frame_view.Frames(*nd);
for (int id : loop_ids) {
hash = Hash64Combine(hash, static_cast<uint64>(id));
}
loop_sets[hash].push_back(nd);
}
for (auto it : loop_sets) {
loop_groups->push_back(std::move(it.second));
}
}
void IdentifyRepeatedInputs(const std::vector<NodeDef*>& nodes,
absl::flat_hash_set<string>* seen_outputs,
absl::flat_hash_set<string>* repeated_outputs) {
for (NodeDef* node : nodes) {
for (const auto& input_name : node->input()) {
if (!seen_outputs->insert(input_name).second) {
repeated_outputs->insert(input_name);
}
}
}
}
}
Status ScopedAllocatorOptimizer::ProcessGraphDef(
GraphDef* graph, const GraphProperties& graph_properties) {
static std::atomic<int64_t> invocation_counter(1);
const int64_t invocation_count =
invocation_counter.fetch_add(1, std::memory_order_seq_cst);
VLOG(1) << "ProcessGraphDef " << invocation_count;
Status status;
GraphOpOccurrences occ;
FindOpOccurrences(graph, op_name_set_, &occ);
if (!occ.empty()) {
FrameView frame_view;
LOG_WARNING_AND_RETURN_IF_ERROR(frame_view.InferFromGraph(*graph));
for (auto& dt : occ) {
VLOG(2) << "Processing device " << dt.first;
const DevOpOccurrences& dev_occ = dt.second;
for (auto& it : dev_occ) {
string op_name = it.first;
VLOG(1) << "Processing " << op_name << " set size " << it.second.size();
Rewriter* rewriter = GetRewriter(op_name);
if (!rewriter) {
LOG(ERROR) << "Failed to find Rewriter in ScopedAllocatorOptimizer "
<< "for op_name " << op_name;
continue;
}
rewriter->SetGraphProperties(graph_properties);
std::unique_ptr<Tree> root(ComputeScopeTree(it.first, it.second));
absl::flat_hash_set<string> seen_outputs;
status = ApplyToAll(root.get(), [this, &seen_outputs](Tree* t) {
IdentifyRepeatedInputs(t->nodes_, &seen_outputs, &repeated_outputs_);
return absl::OkStatus();
});
if (!status.ok()) {
break;
}
status = ApplyToAll(root.get(), [this, rewriter, graph, &frame_view,
&op_name, invocation_count](Tree* t) {
VLOG(2) << "applied to tree node " << t->edge_ << " at depth "
<< t->depth_ << " of size " << t->nodes_.size();
if (t->nodes_.size() > 1) {
std::vector<std::vector<NodeDef*>> loop_groups;
PartitionByLoopStructure(frame_view, t->nodes_, &loop_groups);
for (auto& lg : loop_groups) {
if (lg.size() > 1) {
bool applied = false;
Status s = OrderNodeSet(&lg);
TF_RETURN_IF_ERROR(s);
VLOG(1) << "Applying Rewriter for " << op_name;
s = rewriter->Rewrite(this, invocation_count, graph, op_name,
lg, &applied);
LOG_WARNING_AND_RETURN_IF_ERROR(s);
}
}
}
return absl::OkStatus();
});
if (!status.ok()) {
break;
}
}
if (!status.ok()) {
break;
}
}
}
VLOG(1) << "ScopedAllocatorOptimizer returning " << status;
if (!status.ok()) {
LOG(ERROR) << "ScopedAllocatorOptimizer: " << status;
}
return status;
}
namespace {
struct InstanceKeyLess {
bool operator()(const NodeDef* a, const NodeDef* b) const {
AttrSlice a_attrs = AttrSlice(*a);
AttrSlice b_attrs = AttrSlice(*b);
int32_t a_key = -1;
int32_t b_key = -1;
Status s = GetNodeAttr(a_attrs, "instance_key", &a_key);
CHECK(s.ok());
s = GetNodeAttr(b_attrs, "instance_key", &b_key);
CHECK(s.ok());
return a_key < b_key;
}
};
struct NameLess {
bool operator()(const NodeDef* a, const NodeDef* b) const {
return a->name() < b->name();
}
};
bool IsCollectiveNode(const NodeDef& n) {
AttrSlice attrs = AttrSlice(n);
int key = -1;
if (!IsCollective(n)) return false;
Status s = GetNodeAttr(attrs, "instance_key", &key);
if (s.ok() && key >= 0) {
return true;
}
return false;
}
}
Status ScopedAllocatorOptimizer::OrderNodeSet(
std::vector<NodeDef*>* nodes) const {
if (nodes->size() <= 1) return absl::OkStatus();
if (IsCollectiveNode(*nodes->at(0))) {
std::sort(nodes->begin(), nodes->end(), InstanceKeyLess());
} else {
std::sort(nodes->begin(), nodes->end(), NameLess());
}
return absl::OkStatus();
}
}
}
#undef LOG_WARNING_AND_RETURN_IF_ERROR | #include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include <unordered_set>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
class ScopedAllocatorOptimizerTest : public ::testing::Test {
public:
std::unique_ptr<Session> CreateSession(const GraphDef& graph,
const ConfigProto& config) {
SessionOptions options;
options.config = config;
(*options.config.mutable_device_count())["CPU"] = 2;
Session* session = NewSession(options);
TF_CHECK_OK(session->Create(graph));
return std::unique_ptr<Session>(session);
}
std::vector<Tensor> EvaluateNodes(const GraphDef& graph,
const std::vector<string>& fetch) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph));
RunOptions run_options;
std::vector<Tensor> output_tensors;
TF_CHECK_OK(
session->Run(run_options, {}, fetch, fetch, &output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
void BuildAbsGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a =
ops::Const<float>(s.WithOpName("a"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b =
ops::Const<float>(s.WithOpName("b"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c =
ops::Const<float>(s.WithOpName("c"), {-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(s.WithOpName("s1"), a, b);
Output s2 = ops::Add(s.WithOpName("s2"), b, c);
Output int1, int2;
if (forward) {
int1 = ops::Identity(s.WithOpName("i1"), s1);
int2 = ops::Identity(s.WithOpName("i2"), s2);
} else {
int1 = s1;
int2 = s2;
}
Output a1 = ops::Abs(s.WithOpName("a1"), int1);
Output a2 = ops::Abs(s.WithOpName("a2"), int2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputDependencies(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output c = ops::Placeholder(s.WithOpName("c"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output s1 = ops::Add(s.WithOpName("s1"), b, c);
Output a1 = ops::Abs(s.WithOpName("a1"), a);
Output a2 = ops::Abs(s.WithOpName("a2"), b);
Output a3 = ops::Abs(s.WithOpName("a3"), s1);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
Output r3 = ops::Reshape(s.WithOpName("r3"), a3, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputAndOutputControlEdges(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl1 = ops::Placeholder(s.WithOpName("ctl1"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl2 = ops::Placeholder(s.WithOpName("ctl2"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output a1 = ops::Abs(s.WithOpName("a1").WithControlDependencies({ctl1}), a);
Output a2 = ops::Abs(s.WithOpName("a2").WithControlDependencies({ctl2}), b);
Output o1 = ops::Reshape(s.WithOpName("o1"), a1, {1, 4});
Output o2 = ops::Reshape(s.WithOpName("o2"), a2, {4, 1});
Output ctl3 =
ops::Const<float>(s.WithOpName("ctl3").WithControlDependencies({a1}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
Output ctl4 =
ops::Const<float>(s.WithOpName("ctl4").WithControlDependencies({a2}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildGraphWithMultipleScopes(GraphDef* graph_def) {
Scope root_scope = Scope::NewRootScope();
root_scope =
root_scope.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const<float>(root_scope.WithOpName("a"),
{1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b = ops::Const<float>(root_scope.WithOpName("b"),
{1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c = ops::Const<float>(root_scope.WithOpName("c"),
{-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(root_scope.WithOpName("s1"), a, b);
Output s2 = ops::Add(root_scope.WithOpName("s2"), b, c);
Output a1 = ops::Abs(root_scope.WithOpName("a1"), s1);
Output a2 = ops::Abs(root_scope.WithOpName("a2"), s2);
Output r1 = ops::Reshape(root_scope.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(root_scope.WithOpName("r2"), a2, {4, 1});
Scope sub_scope = root_scope.NewSubScope("sub");
Output s3 = ops::Add(sub_scope.WithOpName("s3"), a, b);
Output a3 = ops::Abs(sub_scope.WithOpName("a3"), s3);
Output a4 = ops::Abs(sub_scope.WithOpName("a4"), s2);
Output r3 = ops::Reshape(sub_scope.WithOpName("r3"), a3, {1, 4});
Output r4 = ops::Reshape(sub_scope.WithOpName("r4"), a4, {4, 1});
TF_CHECK_OK(root_scope.ToGraphDef(graph_def));
}
void BuildConstGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output c1 =
ops::Const<float>(s.WithOpName("c1"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output c2 =
ops::Const<float>(s.WithOpName("c2"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output a1 = ops::Abs(s.WithOpName("a1"), c1);
Output a2 = ops::Abs(s.WithOpName("a2"), c2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void SetShapes(GraphDef* graph_def) {
TensorShapeProto shape_proto;
shape_proto.add_dim()->set_size(2);
shape_proto.add_dim()->set_size(2);
for (NodeDef& n : *graph_def->mutable_node()) {
if (n.op() == "Add" || n.op() == "Abs") {
AddNodeAttr("_output_shapes", {shape_proto}, &n);
}
}
}
void ExecuteGraph(const GraphDef& graph_def,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
ConfigProto config;
GraphOptions* gopt = config.mutable_graph_options();
OptimizerOptions* opts = gopt->mutable_optimizer_options();
opts->set_do_common_subexpression_elimination(false);
opts->set_do_constant_folding(false);
opts->set_do_function_inlining(false);
opts->set_opt_level(OptimizerOptions::L0);
RewriterConfig* rwcfg = gopt->mutable_rewrite_options();
rwcfg->clear_optimizers();
(*rwcfg->add_optimizers()) = "scoped_allocator";
rwcfg->mutable_scoped_allocator_opts()->add_enable_op("Abs");
std::unique_ptr<Session> session(CreateSession(graph_def, config));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> target_nodes = {};
Status s = session->Run(inputs, output_names, target_nodes, outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(outputs->size(), output_names.size());
}
void ValidateValues(const std::vector<Tensor>& outputs,
const std::vector<std::vector<float>>& expected) {
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(expected[i].size(), outputs[i].NumElements());
for (int j = 0; j < expected[i].size(); ++j) {
EXPECT_EQ(expected[i][j], outputs[i].flat<float>()(j));
}
}
}
void GetNode(NodeMap* node_map, const string& node_name, NodeDef** node_def) {
*node_def = node_map->GetNode(node_name);
ASSERT_TRUE(*node_def);
}
NodeDef* ValidateSAControlInput(GraphDef* graph, NodeMap* node_map,
const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
string control_input_name;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
control_input_name = input;
}
}
EXPECT_EQ(num_control_inputs, 1);
NodeDef* control_input_node = nullptr;
GetNode(node_map, control_input_name, &control_input_node);
EXPECT_EQ(control_input_node->op(), "_ScopedAllocator");
return control_input_node;
}
int NumControlInputs(NodeMap* node_map, const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
}
}
return num_control_inputs;
}
};
#ifndef ENABLE_MKL
TEST_F(ScopedAllocatorOptimizerTest, UnaryRewriteOnly) {
GrapplerItem item;
BuildAbsGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* nd = nullptr;
GetNode(&node_map, "scoped_allocator_1_1", &nd);
{
auto& nd_set = node_map.GetOutputs(nd->name());
ASSERT_EQ(3, nd_set.size());
std::unordered_set<string> expected = {"scoped_allocator_concat_1_1", "s1",
"s2"};
for (auto it : nd_set) {
ASSERT_NE(expected.find(it->name()), expected.end())
<< "Failed to find " << it->name();
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_concat_1_1");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_1_1_Abs", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_1_1_Abs");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_split_1_1", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_split_1_1");
ASSERT_EQ(2, nd_set.size());
std::unordered_set<string> name_set;
for (auto it : nd_set) {
name_set.insert(it->name());
}
ASSERT_TRUE(name_set.find("r1") != name_set.end());
ASSERT_TRUE(name_set.find("r2") != name_set.end());
}
}
TEST_F(ScopedAllocatorOptimizerTest, UnaryExecute) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, false);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, MultipleScopes) {
GraphDef graph_def;
BuildGraphWithMultipleScopes(&graph_def);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0", "sub/r3:0", "sub/r4:0"},
&outputs);
ValidateValues(
outputs,
{{2, 2, 3, 3}, {4, 4, 3, 2}, {2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, Extend) {
NodeDef nd;
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {0, 2}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {6, 7}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {2, 3}, &nd);
VLOG(0) << "nd: " << nd.DebugString();
std::vector<int> scoped_allocator_attrs;
AttrSlice slice(nd);
Status sa_status =
GetNodeAttr(slice, "_scoped_allocator", &scoped_allocator_attrs);
for (int i : scoped_allocator_attrs) {
VLOG(0) << "extracted: " << i;
}
NodeDef nd2;
AddNodeAttr("_scoped_allocator", {0, 2}, &nd2);
AddNodeAttr("_scoped_allocator", {6, 7}, &nd2);
AddNodeAttr("_scoped_allocator", {2, 3}, &nd2);
VLOG(0) << "nd2: " << nd2.DebugString();
}
TEST_F(ScopedAllocatorOptimizerTest, ForwardInputToOutput) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, true);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def, {"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, InputDependencies) {
GrapplerItem item;
BuildAbsGraphWithInputDependencies(&item.graph);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Add");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* scoped_allocator_node =
ValidateSAControlInput(&optimized_graph, &node_map, "a");
VLOG(1) << scoped_allocator_node->DebugString();
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "b"));
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "s1"));
EXPECT_EQ(scoped_allocator_node->input_size(), 1);
EXPECT_EQ(scoped_allocator_node->input(0), "^c");
}
TEST_F(ScopedAllocatorOptimizerTest, ControlEdgeRewire) {
GrapplerItem item;
BuildAbsGraphWithInputAndOutputControlEdges(&item.graph);
SetShapes(&item.graph);
LOG(INFO) << item.graph.DebugString();
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Const");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
TF_ASSERT_OK(TopologicalSort(&optimized_graph));
NodeMap node_map(&optimized_graph);
LOG(INFO) << optimized_graph.DebugString();
NodeDef* ctl1 = nullptr;
GetNode(&node_map, "ctl1", &ctl1);
const auto& ctl1_outputs = node_map.GetOutputs("ctl1");
EXPECT_EQ(ctl1_outputs.size(), 1);
NodeDef* sa_concat = *ctl1_outputs.begin();
EXPECT_EQ(sa_concat->op(), "_ScopedAllocatorConcat");
NodeDef* ctl2 = nullptr;
GetNode(&node_map, "ctl2", &ctl2);
const auto& ctl2_outputs = node_map.GetOutputs("ctl2");
EXPECT_EQ(ctl2_outputs.size(), 1);
EXPECT_EQ(*ctl2_outputs.begin(), sa_concat);
EXPECT_EQ(NumControlInputs(&node_map, sa_concat->name()), 2);
const auto& sa_concat_outputs = node_map.GetOutputs(sa_concat->name());
EXPECT_EQ(sa_concat_outputs.size(), 1);
NodeDef* fused_abs = *sa_concat_outputs.begin();
EXPECT_EQ(NumControlInputs(&node_map, fused_abs->name()), 0);
const auto& fused_abs_outputs = node_map.GetOutputs(fused_abs->name());
EXPECT_EQ(fused_abs_outputs.size(), 1);
NodeDef* sa_split = *fused_abs_outputs.begin();
EXPECT_EQ(NumControlOutputs(*sa_split, node_map), 2);
EXPECT_EQ(NumControlInputs(&node_map, "ctl3"), 1);
EXPECT_EQ(NumControlInputs(&node_map, "ctl4"), 1);
}
TEST_F(ScopedAllocatorOptimizerTest, ConstInput) {
GrapplerItem item;
BuildConstGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
const NodeDef* sa_node = nullptr;
for (const NodeDef& node : optimized_graph.node()) {
if (node.op() == "_ScopedAllocator") {
sa_node = &node;
break;
}
}
ASSERT_NE(sa_node, nullptr);
int num_identity_ops = 0;
NodeMap node_map(&optimized_graph);
for (NodeDef* sa_output : node_map.GetOutputs(sa_node->name())) {
EXPECT_FALSE(IsConstant(*sa_output));
if (IsIdentity(*sa_output)) {
++num_identity_ops;
}
}
EXPECT_EQ(num_identity_ops, 2);
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cba17e35-f5cf-4369-abac-57976f000bab | cpp | tensorflow/tensorflow | pin_to_host_optimizer | tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc | tensorflow/core/grappler/optimizers/pin_to_host_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace internal {
constexpr int64_t kTensorMaxSize = 64;
bool IsDenylisted(const NodeDef& node) {
return
IsCollective(node) ||
IsControlFlow(node) ||
IsNoOp(node);
}
bool IsTensorSmall(const OpInfo::TensorProperties& prop) {
if (prop.dtype() == DataType::DT_STRING) {
return true;
}
if (prop.dtype() != DataType::DT_INT32 &&
prop.dtype() != DataType::DT_INT64 &&
prop.dtype() != DataType::DT_FLOAT) {
return false;
}
const int64_t size = NumCoefficients(prop.shape());
if (size < 0 || size > kTensorMaxSize) {
return false;
}
return true;
}
Status TryFindKernelDef(const std::vector<DeviceType>& devices,
const NodeDef& node, const KernelDef** kdef) {
for (const DeviceType& device : devices) {
const KernelDef* kernel = nullptr;
Status s = FindKernelDef(device, node, &kernel, nullptr);
if (s.ok()) {
if (kdef) {
*kdef = kernel;
}
return absl::OkStatus();
}
}
return errors::NotFound("Could not find KernelDef for op: ", node.op());
}
Status IsNodeOutputPortHostFriendly(const GraphView& graph,
GraphProperties* properties,
const NodeDef& node, int port_id,
bool* is_candidate) {
*is_candidate = false;
if (IsDenylisted(node)) {
return absl::OkStatus();
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
const auto& output_properties = properties->GetOutputProperties(node.name());
int output_properties_size = output_properties.size();
if (port_id >= output_properties_size) {
LOG(WARNING) << "port_id=" << port_id
<< " but output_properties.size()=" << output_properties.size()
<< "\n"
<< node.DebugString();
return absl::OkStatus();
}
if (!IsTensorSmall(output_properties[port_id])) {
return absl::OkStatus();
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
for (const auto& fanin : graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return absl::OkStatus();
}
const int output_arg_id = OpOutputPortIdToArgId(node, *op, port_id);
if (output_arg_id < 0) {
LOG(WARNING) << "Invalid port: " << port_id << "!\n"
<< node.DebugString() << "\n"
<< op->DebugString();
return absl::OkStatus();
}
const KernelDef* kernel = nullptr;
s = TryFindKernelDef({node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node,
&kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return absl::OkStatus();
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->output_arg(output_arg_id).name() == host_memory_arg) {
*is_candidate = true;
break;
}
}
return absl::OkStatus();
}
bool IsNodeInputPortHostFriendly(const NodeDef& node, int port_id) {
if (absl::StrContains(node.device(), DEVICE_CPU)) {
return true;
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return false;
}
const int input_arg_id = OpInputPortIdToArgId(node, *op, port_id);
const KernelDef* kernel = nullptr;
s = internal::TryFindKernelDef(
{node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node, &kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return false;
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->input_arg(input_arg_id).name() == host_memory_arg) {
return true;
}
}
return false;
}
Status IsNodeHostCandidate(const GraphView& graph, GraphProperties* properties,
const NodeDef& node, bool* is_candidate) {
*is_candidate = false;
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
if (IsDenylisted(node)) {
return absl::OkStatus();
}
Status s = TryFindKernelDef({DEVICE_CPU}, node, nullptr);
if (!s.ok()) {
return absl::OkStatus();
}
for (const GraphView::OutputPort& fanin :
graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
for (const auto& prop : properties->GetOutputProperties(node.name())) {
if (!IsTensorSmall(prop)) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device) {
if (device.empty() && has_device_cpu) {
return "/device:CPU:0";
} else if (absl::StrContains(device, DEVICE_GPU)) {
for (const auto& device_match :
{std::pair<string, string>("GPU", "CPU:0"),
std::pair<string, string>("/device", "/device:CPU:0")}) {
const string device_host =
strings::StrCat(device.substr(0, device.rfind(device_match.first)),
device_match.second);
if (devices.find(device_host) != devices.end()) {
return device_host;
}
}
}
return "";
}
}
Status PinToHostOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
*optimized_graph = item.graph;
if (IsLegacyTPUBridgeGraphDef(*optimized_graph)) {
return absl::OkStatus();
}
GraphProperties properties(item);
GraphView graph(optimized_graph);
gtl::FlatSet<string> devices;
if (cluster) {
const std::vector<string> device_names = cluster->GetDeviceNames();
devices.insert(device_names.begin(), device_names.end());
} else {
devices = {"/device:CPU:0"};
}
const bool has_device_cpu = devices.find("/device:CPU:0") != devices.end();
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
std::vector<std::pair<NodeDef*, string>> const_nodes;
for (auto& node : *optimized_graph->mutable_node()) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
bool is_candidate = false;
TF_RETURN_IF_ERROR(
internal::IsNodeHostCandidate(graph, &properties, node, &is_candidate));
if (!is_candidate) {
continue;
}
string device =
internal::TryFindHostDevice(devices, has_device_cpu, node.device());
if (!device.empty()) {
if (IsConstant(node)) {
const_nodes.emplace_back(&node, node.device());
}
VLOG(2) << "Moving node " << node.name() << " to device " << device;
*node.mutable_device() = std::move(device);
}
}
for (auto& it : const_nodes) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
NodeDef* node = it.first;
const string& device = it.second;
for (const GraphView::InputPort& fanout : graph.GetFanouts(*node, false)) {
if (!internal::IsNodeInputPortHostFriendly(*fanout.node,
fanout.port_id)) {
VLOG(2) << "Swapping node " << node->name() << " back to device "
<< device;
node->set_device(device);
break;
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class PinToHostOptimizerTest : public GrapplerTest {};
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceNoDevices) {
gtl::FlatSet<string> devices = {};
EXPECT_EQ(internal::TryFindHostDevice(devices, false, "ABC"), "");
}
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceCpuXlaGpu) {
gtl::FlatSet<string> devices = {"/device:CPU:0", "/device:XLA_GPU:0"};
EXPECT_EQ(internal::TryFindHostDevice(devices, true, ""), "/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:0"),
"/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:*"),
"/device:CPU:0");
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
int num_int32 = 4;
Output f = ops::Const(s.WithOpName("f"), {"test"});
GrapplerItem item;
item.fetch = {"a", "c", "d", "e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
if (i < num_int32) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
} else {
test::ExpectTensorEqual<tstring>(tensors[i], tensors_expected[i]);
}
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e" || node.name() == "f") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallFloatOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {1024, 1024});
Output input_min = ops::Const(s.WithOpName("input_min"), 0.0f);
Output input_max = ops::Const(s.WithOpName("input_max"), 6.0f);
Output b =
ops::QuantizeAndDequantizeV2(s.WithOpName("b"), a, input_min, input_max);
GrapplerItem item;
item.fetch = {"b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
for (const NodeDef& node : output.node()) {
if (node.name() == "input_min" || node.name() == "input_max") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
}
}
}
TEST_F(PinToHostOptimizerTest, TopologicalSort) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"a", "c", "d", "e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
std::reverse(item.graph.mutable_node()->begin(),
item.graph.mutable_node()->end());
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 4);
}
TEST_F(PinToHostOptimizerTest, NoSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 1});
Output b = ops::Const(s.WithOpName("b"), 1, {1, 1024 * 1024});
Output c = ops::MatMul(s.WithOpName("c"), a, b);
GrapplerItem item;
item.fetch = {"a", "b", "c"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_TRUE(node.device().empty());
++found;
}
EXPECT_EQ(found, 3);
}
TEST_F(PinToHostOptimizerTest, Identity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Const(s.WithOpName("a").WithDevice("/device:GPU:0"), 1, {64, 64});
Output b = ops::Const(s.WithOpName("b"), {0, 1}, {2});
Output c =
ops::ReduceProd(s.WithOpName("c").WithDevice("/device:GPU:0"), a, b);
Output d = ops::Identity(s.WithDevice("/device:CPU:0").WithOpName("d"), c);
Output e = ops::Multiply(s.WithOpName("e"), d, d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_EQ(node.device(), "/device:GPU:0");
} else if (node.name() == "b") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
} else if (node.name() == "d") {
EXPECT_EQ(node.device(), "/device:CPU:0");
} else if (node.name() == "e") {
EXPECT_TRUE(node.device().empty());
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, PortIdToArgId) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 2, 3});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GrapplerItem item;
item.fetch = {"a", "b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_EQ(node.device(), "/device:CPU:0");
++found;
}
EXPECT_EQ(found, 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/pin_to_host_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b10400e-2167-4933-8c90-81af979ac416 | cpp | tensorflow/tensorflow | arithmetic_optimizer | tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc | tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/strided_slice_op.h"
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAddOpsRewriteTag[] =
"_grappler_ArithmeticOptimizer_AddOpsRewriteStage";
constexpr char kMinimizeBroadcastsTag[] =
"_grappler_ArithmeticOptimizer_MinimizeBroadcasts";
template <typename T>
bool ValuesFromConstNode(const NodeDef& node, std::vector<T>* values) {
if (node.op() != "Const") {
return false;
}
if (node.attr().count("dtype") == 0 || node.attr().count("value") == 0 ||
node.attr().at("dtype").type() != DataTypeToEnum<T>::value) {
return false;
}
const TensorProto& tensor = node.attr().at("value").tensor();
typename checkpoint::SaveTypeTraits<T>::RepeatedField* tensor_values =
checkpoint::MutableTensorProtoData<T>(const_cast<TensorProto*>(&tensor));
if (!tensor_values->empty() && tensor.has_tensor_shape()) {
const TensorShapeProto& shape = tensor.tensor_shape();
if (shape.dim_size() == 1 && shape.dim(0).size() == tensor_values->size()) {
values->insert(values->end(), tensor_values->begin(),
tensor_values->end());
return true;
}
}
const auto tensor_content_size = tensor.tensor_content().size();
if (tensor_content_size > 0) {
CHECK_EQ(0, tensor_content_size % sizeof(T))
<< "tensor_content_size (" << tensor_content_size
<< ") is not a multiple of " << sizeof(T);
values->resize(tensor_content_size / sizeof(T));
port::CopyToArray(tensor.tensor_content(),
reinterpret_cast<char*>(values->data()));
return true;
}
return false;
}
bool MaybeAddControlInput(const string& new_input, NodeDef* node,
GraphDef* graph, NodeMap* node_map) {
bool already_exists = false;
for (const string& input : node->input()) {
if (input == new_input || AsControlDependency(input) == new_input) {
already_exists = true;
break;
}
}
if (!already_exists) {
const string ctrl_dep =
ConstantFolding::AddControlDependency(new_input, graph, node_map);
node->add_input(ctrl_dep);
node_map->AddOutput(NodeName(new_input), node->name());
}
return !already_exists;
}
void SetDataTypeToAttr(DataType dtype, const string& attr_name, NodeDef* node) {
(*node->mutable_attr())[attr_name].set_type(dtype);
}
NodeDef* GetTailOfValuePreservingChain(
const NodeDef& node, const NodeMap& node_map,
const std::unordered_set<string>& nodes_to_preserve) {
auto is_value_preserving_non_branching = [&](const NodeDef& node) {
return nodes_to_preserve.find(node.name()) == nodes_to_preserve.end() &&
IsValuePreserving(node) && NumNonControlOutputs(node, node_map) == 1;
};
return GetTailOfChain(node, node_map, false,
is_value_preserving_non_branching);
}
NodeDef* GetTailOfIdempotentChain(
const NodeDef& node, const NodeMap& node_map,
const std::unordered_set<string>& nodes_to_preserve) {
auto is_idempotent_non_branching = [&](const NodeDef& node) {
return nodes_to_preserve.find(node.name()) == nodes_to_preserve.end() &&
IsIdempotent(node) && NumNonControlOutputs(node, node_map) == 1;
};
return GetTailOfChain(node, node_map, false,
is_idempotent_non_branching);
}
bool GetElementUnexhaustive(const Tensor& t, int i, const std::set<int>& dtypes,
complex128* element) {
if (dtypes.find(t.dtype()) == dtypes.end()) return false;
switch (t.dtype()) {
case DT_BFLOAT16:
*element = complex128(t.flat<bfloat16>()(i));
return true;
case DT_HALF:
*element = complex128(static_cast<double>(t.flat<Eigen::half>()(i)), 0);
return true;
case DT_INT32:
*element = complex128(t.flat<int32>()(i));
return true;
case DT_INT64:
*element = complex128(t.flat<int64_t>()(i));
return true;
case DT_FLOAT:
*element = complex128(t.flat<float>()(i));
return true;
case DT_DOUBLE:
*element = complex128(t.flat<double>()(i));
return true;
case DT_COMPLEX64:
*element = complex128(t.flat<complex64>()(i));
return true;
case DT_COMPLEX128:
*element = t.flat<complex128>()(i);
return true;
default:
return false;
}
}
bool NodeIsOnCpu(const NodeDef& node) {
string task;
string device;
return DeviceNameUtils::SplitDeviceName(node.device(), &task, &device) &&
absl::StrContains(device, DEVICE_CPU);
}
bool AllRegularInputsEqual(const NodeDef& node) {
if (!HasRegularInputs(node)) return true;
for (int i = 1; i < node.input_size(); ++i) {
if (IsControlInput(node.input(i))) {
break;
}
if (node.input(0) != node.input(i)) {
return false;
}
}
return true;
}
void ReplaceWithNoOp(NodeDef* node, const GraphOptimizerContext& ctx) {
ctx.node_map->RemoveInputs(node->name());
ctx.graph_properties->ClearInputProperties(node->name());
ctx.graph_properties->ClearOutputProperties(node->name());
ChangeToNoOp(node);
EraseRegularNodeAttributes(node);
node->clear_input();
}
struct ArithmeticOptimizerContext {
explicit ArithmeticOptimizerContext(SetVector<NodeDef*>* nodes_to_simplify)
: nodes_to_simplify(nodes_to_simplify) {}
SetVector<NodeDef*>* nodes_to_simplify;
};
class ArithmeticOptimizerStage : public GraphOptimizerStage<string> {
public:
explicit ArithmeticOptimizerStage(const string& name,
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext ctx_ext)
: GraphOptimizerStage("ArithmeticOptimizer", name, ctx),
ctx_ext_(ctx_ext) {}
~ArithmeticOptimizerStage() override = default;
protected:
void AddToOptimizationQueue(NodeDef* node) {
ctx_ext_.nodes_to_simplify->PushBack(node);
}
Status UpdateConsumers(NodeDef* node, const string& new_input) {
const auto consumers = ctx().node_map->GetOutputs(node->name());
if (consumers.empty()) return absl::OkStatus();
const TensorId new_tensor = ParseTensorName(new_input);
for (NodeDef* consumer : consumers) {
if (consumer->name() == new_tensor.node()) continue;
bool updated = false;
for (int i = 0; i < consumer->input_size(); ++i) {
const TensorId input_tensor = ParseTensorName(consumer->input(i));
if (input_tensor.node() == node->name()) {
if (new_tensor.index() < 0 && input_tensor.index() >= 0) {
return errors::InvalidArgument(
"Cannot override data input ", input_tensor.ToString(),
" with control input ", new_tensor.ToString());
}
consumer->set_input(i, input_tensor.index() < 0
? absl::StrCat("^", new_tensor.node())
: new_input);
ctx().node_map->UpdateInput(consumer->name(), node->name(),
new_input);
updated = true;
}
}
if (updated) {
DedupControlInputs(consumer);
AddToOptimizationQueue(consumer);
}
}
return absl::OkStatus();
}
void ForwardControlDependencies(
NodeDef* target_node, const std::vector<const NodeDef*>& src_nodes) {
for (const auto& src : src_nodes) {
for (int i = src->input_size() - 1; i >= 0; --i) {
if (IsControlInput(src->input(i))) {
*target_node->add_input() = src->input(i);
ctx().node_map->AddOutput(NodeName(src->input(i)),
target_node->name());
} else {
break;
}
}
}
DedupControlInputs(target_node);
}
bool IsReallyConstant(const NodeDef& node) const {
if (!IsConstant(node)) {
return false;
}
return ctx().feed_nodes->find(node.name()) == ctx().feed_nodes->end();
}
bool IsInPreserveSet(const NodeDef& node) const {
return ctx().nodes_to_preserve->find(node.name()) !=
ctx().nodes_to_preserve->end();
}
bool IsDrivenByControlDependency(const NodeDef& node) const {
return std::any_of(
node.input().begin(), node.input().end(),
[](const string& input) { return IsControlInput(input); });
}
bool DrivesControlDependency(const NodeDef& node) const {
for (const NodeDef* output : ctx().node_map->GetOutputs(node.name())) {
for (int i = 0; i < output->input_size(); ++i) {
const TensorId tensor = ParseTensorName(output->input(i));
if (tensor.node() == node.name() && tensor.index() < 0) {
return true;
}
}
}
return false;
}
bool GetTensorFromConstNode(const string& node_name_or_input,
Tensor* tensor) {
const NodeDef* node = ctx().node_map->GetNode(node_name_or_input);
return node != nullptr && IsReallyConstant(*node) &&
CheckAttrExists(*node, "value").ok() &&
tensor->FromProto(node->attr().at("value").tensor());
}
private:
const ArithmeticOptimizerContext ctx_ext_;
};
class ArithmeticNodesGroupOptimizerStage : public ArithmeticOptimizerStage {
public:
explicit ArithmeticNodesGroupOptimizerStage(
const string& name, const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext ctx_ext)
: ArithmeticOptimizerStage(name, ctx, ctx_ext) {}
~ArithmeticNodesGroupOptimizerStage() override = default;
struct InputAndShape {
InputAndShape(const string& input, const TensorShapeProto& shape)
: input(input), shape(shape) {}
string input;
TensorShapeProto shape;
};
struct OptimizedNodesGroup {
NodeDef* root_node;
TensorShapeProto root_shape;
std::vector<NodeDef*> optimized_nodes;
std::vector<InputAndShape> inputs;
};
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
OptimizedNodesGroup group;
TF_RETURN_IF_ERROR(CreateOptimizedNodesGroup(node, &group));
if (!group.optimized_nodes.empty()) {
*simplified_node_name = RewriteOptimizedNodesGroup(group);
}
return absl::OkStatus();
}
protected:
virtual string RewriteOptimizedNodesGroup(
const OptimizedNodesGroup& group) = 0;
virtual bool IsAbsorbableByOptimizedNodesGroup(
const OptimizedNodesGroup& group, const NodeDef& node) const = 0;
Status AbsorbInputByOptimizedNodesGroup(const string& input,
OptimizedNodesGroup* group) const {
std::deque<const string*> input_tensors;
input_tensors.push_front(&input);
while (!input_tensors.empty()) {
const string* input_tensor = input_tensors.front();
input_tensors.pop_front();
NodeDef* input_node;
TF_RETURN_IF_ERROR(GetInputNode(*input_tensor, &input_node));
if (IsAbsorbableByOptimizedNodesGroup(*group, *input_node)) {
group->optimized_nodes.push_back(input_node);
for (int i = input_node->input_size() - 1; i >= 0; --i) {
const string& absorbed_node_input = input_node->input(i);
if (IsControlInput(absorbed_node_input)) continue;
input_tensors.push_front(&absorbed_node_input);
}
} else {
const OpInfo::TensorProperties* properties;
TF_RETURN_IF_ERROR(GetTensorProperties(*input_tensor, &properties));
group->inputs.emplace_back(*input_tensor, properties->shape());
}
}
return absl::OkStatus();
}
Status CreateOptimizedNodesGroup(NodeDef* root_node,
OptimizedNodesGroup* group) const {
const OpInfo::TensorProperties* root_node_output_properties;
TF_RETURN_IF_ERROR(
GetTensorProperties(root_node->name(), &root_node_output_properties));
group->root_node = root_node;
group->root_shape = root_node_output_properties->shape();
group->optimized_nodes.reserve(root_node->input_size());
for (int i = 0; i < root_node->input_size(); ++i) {
const string& input_i = root_node->input(i);
if (IsControlInput(input_i)) continue;
TF_RETURN_IF_ERROR(AbsorbInputByOptimizedNodesGroup(input_i, group));
}
return absl::OkStatus();
}
bool HasAllInputsBroadcastableToShape(
const NodeDef& node, const OpInfo::TensorProperties& properties) const {
auto is_broadcastable = [this, &properties](const string& input) {
const OpInfo::TensorProperties* input_props;
Status has_input_properties = GetTensorProperties(input, &input_props);
return has_input_properties.ok() &&
ShapesBroadcastable(properties, *input_props);
};
return std::all_of(node.input().begin(), node.input().end(),
is_broadcastable);
}
string ShapeSignature(const TensorShapeProto& shape) const {
string signature = strings::StrCat("rank:", shape.dim_size(), ":dim");
for (int i = 0; i < shape.dim_size(); ++i)
strings::StrAppend(&signature, ":", shape.dim(i).size());
return signature;
}
void MarkWithTag(const StringPiece tag, NodeDef* node) {
AddNodeAttr(tag, true, node);
}
void MarkAllMembersWithTag(const OptimizedNodesGroup& group,
const StringPiece tag) const {
AddNodeAttr(tag, true, group.root_node);
for (NodeDef* optimized_node : group.optimized_nodes) {
AddNodeAttr(tag, true, optimized_node);
}
}
bool IsOnTheSameDevice(const OptimizedNodesGroup& group,
const NodeDef& node) const {
return group.root_node->device() == node.device();
}
bool IsInPreserveSet(const NodeDef& node) const {
return ctx().nodes_to_preserve->find(node.name()) !=
ctx().nodes_to_preserve->end();
}
bool IsMarkedWithTag(const NodeDef& node, const StringPiece tag) const {
return HasNodeAttr(node, tag);
}
bool IsMarkedWithAnyTag(const NodeDef& node, const StringPiece tag1,
const StringPiece tag2) const {
return IsMarkedWithTag(node, tag1) || IsMarkedWithTag(node, tag2);
}
};
class AddOpsRewriteStage : public ArithmeticNodesGroupOptimizerStage {
public:
explicit AddOpsRewriteStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticNodesGroupOptimizerStage("AddOpsRewrite", ctx, ctx_ext) {}
~AddOpsRewriteStage() override = default;
bool IsSupported(const NodeDef* node) const override {
if (!CanOptimize(*node)) return false;
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node->name(), &properties);
return has_properties.ok() && ShapeIsSymbolicallyDefined(*properties) &&
HasAllInputsBroadcastableToShape(*node, *properties);
}
protected:
bool IsAbsorbableByOptimizedNodesGroup(const OptimizedNodesGroup& group,
const NodeDef& node) const override {
if (!CanOptimize(node)) return false;
if (!IsOnTheSameDevice(group, node)) {
return false;
}
if (NumNonControlDataOutputs(node, *ctx().node_map) != 1) {
return false;
}
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node.name(), &properties);
return has_properties.ok() &&
HasAllInputsBroadcastableToShape(node, *properties);
}
bool CanOptimize(const NodeDef& node) const {
if (!IsAdd(node) && !IsAddN(node)) {
return false;
}
if (IsInPreserveSet(node) || IsMarkedWithTag(node, kAddOpsRewriteTag)) {
return false;
}
return !(IsDrivenByControlDependency(node) ||
DrivesControlDependency(node));
}
string RewriteOptimizedNodesGroup(const OptimizedNodesGroup& group) override {
VLOG(2) << "Collapse Add/AddN: root=" << group.root_node->name()
<< " op=" << group.root_node->op()
<< " num_optimized_nodes=" << group.optimized_nodes.size()
<< " num_inputs=" << group.inputs.size();
MarkAllMembersWithTag(group, kAddOpsRewriteTag);
auto root_scope_and_name = ParseNodeScopeAndName(group.root_node->name());
std::unordered_map<string, std::vector<InputAndShape>> shape_sig_to_inputs;
for (const auto& input : group.inputs) {
shape_sig_to_inputs[ShapeSignature(input.shape)].push_back(input);
}
using SigKV = decltype(shape_sig_to_inputs)::value_type;
VLOG(3) << "Add/AddN group has " << shape_sig_to_inputs.size()
<< " unique shapes: "
<< absl::StrJoin(shape_sig_to_inputs, ", ",
[](string* out, SigKV p) {
strings::StrAppend(out, p.first);
});
std::vector<TensorShapeProto> shapes;
shapes.reserve(shape_sig_to_inputs.size());
for (const auto& el : shape_sig_to_inputs)
shapes.push_back(el.second[0].shape);
if (shapes.size() == 1) {
string node_name = UniqueOptimizedNodeName(root_scope_and_name);
AddInputsOfSymbolicallyEqualShape(*group.root_node, node_name,
group.inputs);
return node_name;
}
std::sort(shapes.begin(), shapes.end(),
[](const TensorShapeProto& left, const TensorShapeProto& right) {
return CompareSymbolicallyShapedTensorSizes(left, right);
});
auto leaf_node_name = [&root_scope_and_name, this](int i) {
return UniqueOptimizedNodeName(root_scope_and_name,
strings::StrCat("Leaf_", i));
};
auto internal_node_name = [&root_scope_and_name, this](int i) {
return UniqueOptimizedNodeName(root_scope_and_name,
strings::StrCat("Internal_", i));
};
std::deque<InputAndShape> add_ops;
for (int i = 0, end = shapes.size(); i < end; ++i) {
const auto node_name = leaf_node_name(i);
const auto& inputs = shape_sig_to_inputs[ShapeSignature(shapes[i])];
add_ops.push_back(AddInputsOfSymbolicallyEqualShape(*group.root_node,
node_name, inputs));
}
int internal_nodes = 0;
do {
const InputAndShape lhs = add_ops.front();
add_ops.pop_front();
const InputAndShape rhs = add_ops.front();
add_ops.pop_front();
string name = add_ops.empty()
? UniqueOptimizedNodeName(root_scope_and_name)
: internal_node_name(internal_nodes++);
InputAndShape add = AddAggregatedInputs(*group.root_node, name, lhs, rhs);
add_ops.push_front(add);
} while (add_ops.size() > 1);
InputAndShape optimized_root_node = add_ops.front();
return optimized_root_node.input;
}
InputAndShape AddInputsOfSymbolicallyEqualShape(
const NodeDef& root_node, const string& node_name,
const std::vector<InputAndShape>& inputs) {
CHECK(!inputs.empty()) << "Inputs must be non-empty";
if (inputs.size() == 1 || root_node.attr().count("T") == 0) {
return inputs[0];
}
auto shape = inputs[0].shape;
DataType dtype = root_node.attr().at("T").type();
NodeDef* node = AddEmptyNode(node_name);
node->set_op("AddN");
node->set_device(root_node.device());
(*node->mutable_attr())["T"].set_type(dtype);
(*node->mutable_attr())["N"].set_i(inputs.size());
for (const auto& inputAndShape : inputs) {
ctx().node_map->AddOutput(inputAndShape.input, node_name);
node->add_input(inputAndShape.input);
}
MarkWithTag(kAddOpsRewriteTag, node);
return InputAndShape(node_name, shape);
}
InputAndShape AddAggregatedInputs(const NodeDef& root_node,
const string& node_name,
const InputAndShape& left,
const InputAndShape& right) {
DataType dtype = root_node.attr().at("T").type();
NodeDef* node = AddEmptyNode(node_name);
node->set_op((dtype == DT_STRING || dtype == DT_STRING_REF) ? "Add"
: "AddV2");
node->set_device(root_node.device());
(*node->mutable_attr())["T"].set_type(dtype);
node->add_input(left.input);
node->add_input(right.input);
ctx().node_map->AddOutput(left.input, node_name);
ctx().node_map->AddOutput(right.input, node_name);
MarkWithTag(kAddOpsRewriteTag, node);
return InputAndShape(
node_name, TensorShapeProto());
}
};
class HoistCommonFactorOutOfAggregation : public ArithmeticOptimizerStage {
public:
explicit HoistCommonFactorOutOfAggregation(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("HoistCommonFactor", ctx, ctx_ext) {}
~HoistCommonFactorOutOfAggregation() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAggregate(*node) && NumNonControlInputs(*node) > 1 &&
!IsRewritten(node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
bool common_factor_is_denominator = false;
std::set<string> common_factors;
std::vector<string> ctrl_deps;
TF_RETURN_IF_ERROR(GetCommonFactors(
node, &common_factors, &common_factor_is_denominator, &ctrl_deps));
if (common_factors.size() == 1) {
const string& common_factor = *common_factors.begin();
bool shapes_match = true;
std::vector<string> unique_factors;
TF_RETURN_IF_ERROR(GetUniqueFactors(node, common_factor,
common_factor_is_denominator,
&shapes_match, &unique_factors));
if (shapes_match) {
NodeDef* input_0;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input_0));
NodeDef* new_outer_node = AddCopyNode(
OuterNodeName(node, common_factor_is_denominator), input_0);
NodeDef* new_add_node = AddCopyNode(InnerAddNodeName(node), node);
new_outer_node->set_device(node->device());
if (common_factor_is_denominator) {
new_outer_node->set_input(0, new_add_node->name());
new_outer_node->set_input(1, common_factor);
} else {
new_outer_node->set_input(0, common_factor);
new_outer_node->set_input(1, new_add_node->name());
}
ctx().node_map->AddOutput(common_factor, new_outer_node->name());
ctx().node_map->AddOutput(new_add_node->name(), new_outer_node->name());
for (int i = 0, end = unique_factors.size(); i < end; ++i) {
const string& unique_factor_i = unique_factors[i];
new_add_node->set_input(i, unique_factor_i);
ctx().node_map->AddOutput(unique_factor_i, new_add_node->name());
}
for (const string& ctrl_dep : ctrl_deps) {
*new_add_node->add_input() = ctrl_dep;
ctx().node_map->AddOutput(NodeName(ctrl_dep), new_add_node->name());
}
AddToOptimizationQueue(new_add_node);
rewritten_nodes_.insert(node->name());
*simplified_node_name = new_outer_node->name();
}
}
return absl::OkStatus();
}
private:
string OuterNodeName(const NodeDef* node, bool is_div) const {
auto scope_and_name = ParseNodeScopeAndName(node->name());
return is_div ? OptimizedNodeName(scope_and_name, "Div")
: OptimizedNodeName(scope_and_name, "Mul");
}
string InnerAddNodeName(const NodeDef* node) const {
auto scope_and_name = ParseNodeScopeAndName(node->name());
return OptimizedNodeName(scope_and_name, "AddV2");
}
Status GetCommonFactors(const NodeDef* node, std::set<string>* common_factors,
bool* common_factor_is_denominator,
std::vector<string>* ctrl_deps) const {
CHECK(common_factors->empty());
CHECK_NOTNULL(common_factor_is_denominator);
*common_factor_is_denominator = false;
bool has_mul = false;
bool has_div = false;
for (int i = 0; i < node->input_size(); ++i) {
if (i > 0 && common_factors->empty()) break;
if (IsControlInput(node->input(i))) {
ctrl_deps->push_back(node->input(i));
continue;
}
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(i), &input));
if ((!IsMul(*input) && !IsAnyDiv(*input)) || (IsMul(*input) && has_div) ||
(IsAnyDiv(*input) && has_mul)) {
common_factors->clear();
break;
} else if (IsAnyDiv(*input)) {
has_div = true;
const OpInfo::TensorProperties* properties0;
const OpInfo::TensorProperties* properties1;
TF_RETURN_IF_ERROR(GetTensorProperties(input->input(0), &properties0));
TF_RETURN_IF_ERROR(GetTensorProperties(input->input(1), &properties1));
if (properties0->dtype() != DT_FLOAT &&
properties0->dtype() != DT_DOUBLE &&
properties1->dtype() != DT_FLOAT &&
properties1->dtype() != DT_DOUBLE) {
common_factors->clear();
break;
}
} else if (IsMul(*input)) {
has_mul = true;
}
std::set<string> factors_i =
has_mul ? std::set<string>{input->input(0), input->input(1)}
: std::set<string>{input->input(1)};
if (i == 0) {
std::swap(*common_factors, factors_i);
} else {
std::set<string> intersection;
std::set_intersection(
factors_i.begin(), factors_i.end(), common_factors->begin(),
common_factors->end(),
std::inserter(intersection, intersection.begin()));
std::swap(*common_factors, intersection);
}
for (int i = 2; i < input->input_size(); ++i) {
ctrl_deps->push_back(input->input(i));
}
}
*common_factor_is_denominator = has_div;
return absl::OkStatus();
}
Status GetUniqueFactors(const NodeDef* node, const string& common_factor,
const bool common_factor_is_denominator,
bool* shapes_match,
std::vector<string>* unique_factors) const {
*shapes_match = true;
unique_factors->reserve(node->input_size());
for (int i = 0; i < node->input_size() && *shapes_match; ++i) {
const string& input = node->input(i);
if (IsControlInput(input)) {
break;
}
NodeDef* inner_node;
TF_RETURN_IF_ERROR(GetInputNode(input, &inner_node));
const int unique_factor_index =
common_factor_is_denominator
? 0
: (inner_node->input(0) == common_factor ? 1 : 0);
unique_factors->push_back(inner_node->input(unique_factor_index));
if (i > 0 && !IsAdd(*node)) {
const OpInfo::TensorProperties* lhs;
const OpInfo::TensorProperties* rhs;
TF_RETURN_IF_ERROR(GetTensorProperties(unique_factors->front(), &lhs));
TF_RETURN_IF_ERROR(GetTensorProperties(unique_factors->back(), &rhs));
*shapes_match = ShapesSymbolicallyEqual(*lhs, *rhs);
}
}
return absl::OkStatus();
}
bool IsRewritten(const NodeDef* node) const {
return rewritten_nodes_.find(node->name()) != rewritten_nodes_.end() ||
ctx().node_map->NodeExists(OuterNodeName(node, false)) ||
ctx().node_map->NodeExists(OuterNodeName(node, true)) ||
ctx().node_map->NodeExists(InnerAddNodeName(node));
}
std::unordered_set<string> rewritten_nodes_;
};
class MinimizeBroadcasts : public ArithmeticNodesGroupOptimizerStage {
public:
explicit MinimizeBroadcasts(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticNodesGroupOptimizerStage("MinimizeBroadcasts", ctx, ctx_ext) {
}
~MinimizeBroadcasts() override = default;
bool IsSupported(const NodeDef* node) const override {
if (!IsBinaryAssociative(*node)) return false;
if (IsMarkedWithAnyTag(*node, kMinimizeBroadcastsTag, kAddOpsRewriteTag))
return false;
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node->name(), &properties);
return has_properties.ok() && ShapeIsSymbolicallyDefined(*properties) &&
HasAllInputsBroadcastableToShape(*node, *properties);
}
protected:
bool IsBinaryAssociative(const NodeDef& node) const {
return IsMul(node) || IsAdd(node);
}
bool IsSameOp(const OptimizedNodesGroup& group, const NodeDef& node) const {
return group.root_node->op() == node.op();
}
bool IsAbsorbableByOptimizedNodesGroup(const OptimizedNodesGroup& group,
const NodeDef& node) const override {
if (!IsSameOp(group, node)) {
return false;
}
if (IsInPreserveSet(node)) {
return false;
}
if (IsMarkedWithAnyTag(node, kMinimizeBroadcastsTag, kAddOpsRewriteTag)) {
return false;
}
if (IsDrivenByControlDependency(node) || DrivesControlDependency(node)) {
return false;
}
if (!IsOnTheSameDevice(group, node)) {
return false;
}
if (NumNonControlOutputs(node, *ctx().node_map) != 1) {
return false;
}
const OpInfo::TensorProperties* properties;
Status has_properties = GetTensorProperties(node.name(), &properties);
return has_properties.ok() &&
HasAllInputsBroadcastableToShape(node, *properties);
}
std::size_t CountUniqueShapes(const std::vector<InputAndShape>& inputs) {
std::set<string> sigs;
for (const auto& ias : inputs) {
sigs.insert(ShapeSignature(ias.shape));
}
return sigs.size();
}
string RewriteOptimizedNodesGroup(const OptimizedNodesGroup& group) override {
VLOG(2) << "Minimize broadcast: root=" << group.root_node->name()
<< " op=" << group.root_node->op()
<< " num_optimized_nodes=" << group.optimized_nodes.size();
MarkAllMembersWithTag(group, kMinimizeBroadcastsTag);
if (CountUniqueShapes(group.inputs) <= 1) {
VLOG(3) << "Skip min-bcast group with single unique shape";
return group.root_node->name();
}
auto num_nodes = 1 + group.optimized_nodes.size();
auto num_inputs = group.inputs.size();
CHECK_EQ(num_nodes, num_inputs - 1)
<< "Can't build a tree with " << num_inputs << " inputs, using "
<< num_nodes << "binary op nodes.";
std::deque<InputAndShape> add_ops(group.inputs.begin(), group.inputs.end());
std::deque<NodeDef*> optimized_nodes(group.optimized_nodes.begin(),
group.optimized_nodes.end());
std::stable_sort(add_ops.begin(), add_ops.end(),
[](const InputAndShape& lhs, const InputAndShape& rhs) {
return CompareSymbolicallyShapedTensorSizes(lhs.shape,
rhs.shape);
});
std::deque<InputAndShape> add_ops_leftover;
if (add_ops.size() % 2 != 0) {
add_ops_leftover.push_back(add_ops.back());
add_ops.pop_back();
}
do {
const InputAndShape lhs = add_ops.front();
add_ops.pop_front();
const InputAndShape rhs = add_ops.front();
add_ops.pop_front();
NodeDef* node;
if (!optimized_nodes.empty()) {
node = optimized_nodes.back();
optimized_nodes.pop_back();
} else {
node = group.root_node;
}
InputAndShape updated_node = UpdateInputs(lhs.input, rhs.input, node);
if (add_ops.size() >= 2 &&
CompareSymbolicallyShapedTensorSizes(add_ops.at(0).shape,
add_ops.at(1).shape)) {
add_ops.push_front(updated_node);
} else {
add_ops.push_back(updated_node);
}
} while (add_ops.size() > 1);
CHECK_EQ(1, add_ops.size());
if (!add_ops_leftover.empty()) {
const InputAndShape lhs = add_ops.front();
add_ops.pop_front();
const InputAndShape rhs = add_ops_leftover.front();
InputAndShape updated_node =
UpdateInputs(lhs.input, rhs.input, group.root_node);
add_ops.push_back(updated_node);
}
return add_ops.front().input;
}
InputAndShape UpdateInputs(const string& input_0, const string& input_1,
NodeDef* node) {
string old_input_0 = node->input(0);
string old_input_1 = node->input(1);
if (old_input_0 != input_0 || old_input_1 != input_1) {
node->set_input(0, input_0);
node->set_input(1, input_1);
ctx().graph_properties->ClearOutputProperties(node->name());
ctx().graph_properties->ClearInputProperties(node->name());
ctx().node_map->RemoveOutput(NodeName(old_input_0), node->name());
ctx().node_map->RemoveOutput(NodeName(old_input_1), node->name());
ctx().node_map->AddOutput(NodeName(input_0), node->name());
ctx().node_map->AddOutput(NodeName(input_1), node->name());
AddToOptimizationQueue(node);
}
TensorShapeProto shape;
return InputAndShape(node->name(), shape);
}
};
class RemoveIdentityTranspose : public ArithmeticOptimizerStage {
public:
explicit RemoveIdentityTranspose(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveIdentityTranspose", ctx, ctx_ext) {}
~RemoveIdentityTranspose() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsTranspose(*node) || IsConjugateTranspose(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
NodeDef* tail = node;
tail = GetTailOfIdempotentChain(*tail, *ctx().node_map,
*ctx().nodes_to_preserve);
NodeDef* first_transpose;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &first_transpose));
NodeDef* node_perm;
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &node_perm));
if (!IsConstant(*node_perm)) {
return absl::OkStatus();
}
std::vector<int64_t> node_perm_values;
TF_RETURN_IF_ERROR(GetPermutation(*node_perm, &node_perm_values));
if (first_transpose->op() == node->op()) {
NodeDef* first_transpose_perm;
TF_RETURN_IF_ERROR(
GetInputNode(first_transpose->input(1), &first_transpose_perm));
if (!IsConstant(*first_transpose_perm)) {
return absl::OkStatus();
}
std::vector<int64_t> first_transpose_perm_values;
TF_RETURN_IF_ERROR(
GetPermutation(*first_transpose_perm, &first_transpose_perm_values));
if (AreInversePermutations(node_perm_values,
first_transpose_perm_values)) {
if (tail == node) {
*simplified_node_name = first_transpose->input(0);
} else {
tail->set_input(0, first_transpose->input(0));
ctx().node_map->UpdateInput(tail->name(), first_transpose->name(),
first_transpose->input(0));
ForwardControlDependencies(tail, {first_transpose});
*simplified_node_name = node->input(0);
}
}
} else {
if (IsIdentityPermutation(node_perm_values)) {
if (IsConjugateTranspose(*node)) {
const NodeScopeAndName transpose =
ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(transpose);
NodeDef* new_op = AddCopyNode(optimized_node_name, node);
new_op->set_op("Conj");
new_op->mutable_input()->RemoveLast();
new_op->mutable_attr()->erase("Tperm");
ForwardControlDependencies(new_op, {node});
*simplified_node_name = new_op->name();
} else {
*simplified_node_name = node->input(0);
}
}
}
return absl::OkStatus();
}
private:
Status GetPermutation(const NodeDef& node_perm,
std::vector<int64_t>* perm64) const {
std::vector<int> perm32;
if (ValuesFromConstNode(node_perm, &perm32)) {
perm64->reserve(perm32.size());
for (int val : perm32) {
perm64->push_back(static_cast<int64_t>(val));
}
return absl::OkStatus();
}
if (ValuesFromConstNode(node_perm, perm64)) {
return absl::OkStatus();
}
return errors::InvalidArgument("Couldn't extract permutation from ",
node_perm.name());
}
bool AreInversePermutations(const std::vector<int64_t>& a,
const std::vector<int64_t>& b) {
if (a.size() != b.size()) {
return false;
}
for (int i = 0, end = a.size(); i < end; ++i) {
if (a[b[i]] != i) {
return false;
}
}
return true;
}
bool IsIdentityPermutation(const std::vector<int64_t>& perm) {
for (int64_t i = 0, end = perm.size(); i < end; ++i) {
if (i != perm[i]) {
return false;
}
}
return true;
}
};
class RemoveInvolution : public ArithmeticOptimizerStage {
public:
explicit RemoveInvolution(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveInvolution", ctx, ctx_ext) {}
~RemoveInvolution() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsInvolution(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* tail = GetTailOfValuePreservingChain(*node, *ctx().node_map,
*ctx().nodes_to_preserve);
NodeDef* involution;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &involution));
if (involution->op() == node->op()) {
if (tail == node) {
*simplified_node_name = involution->input(0);
} else {
tail->set_input(0, involution->input(0));
ctx().node_map->UpdateInput(tail->name(), involution->name(),
involution->input(0));
*simplified_node_name = node->input(0);
}
}
return absl::OkStatus();
}
};
class RemoveRedundantBitcastStage : public ArithmeticOptimizerStage {
public:
explicit RemoveRedundantBitcastStage(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveRedundantBitcast", ctx, ctx_ext) {}
~RemoveRedundantBitcastStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsBitcast(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
AttrSlice attrs(*node);
DataType input_type;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "T", &input_type));
DataType output_type;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "type", &output_type));
if ((input_type == output_type) && !IsInPreserveSet(*node)) {
*simplified_node_name = node->input(0);
return absl::OkStatus();
}
NodeDef* bitcast;
TF_RETURN_IF_ERROR(GetInputNode(node->name(), &bitcast));
NodeDef* operand;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &operand));
if (IsBitcast(*operand) && !IsInPreserveSet(*operand)) {
AttrSlice operand_attrs(*operand);
DataType operand_input_type;
TF_RETURN_IF_ERROR(GetNodeAttr(operand_attrs, "T", &operand_input_type));
bitcast->set_input(0, operand->input(0));
SetDataTypeToAttr(operand_input_type, "T", bitcast);
ctx().node_map->UpdateInput(bitcast->name(), bitcast->input(0),
operand->input(0));
AddToOptimizationQueue(bitcast);
*simplified_node_name = bitcast->name();
}
return absl::OkStatus();
}
};
class RemoveRedundantCastStage : public ArithmeticOptimizerStage {
public:
explicit RemoveRedundantCastStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveRedundantCast", ctx, ctx_ext) {}
~RemoveRedundantCastStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsCast(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(EnsureNodeIsSupported(node));
AttrSlice attrs(*node);
DataType input_type;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "SrcT", &input_type));
DataType output_type;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "DstT", &output_type));
if (input_type == output_type) {
*simplified_node_name = node->input(0);
}
return absl::OkStatus();
}
};
class RemoveNegationStage : public ArithmeticOptimizerStage {
public:
explicit RemoveNegationStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveNegation", ctx, ctx_ext) {}
~RemoveNegationStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return (IsAdd(*node) || IsSub(*node)) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* x;
NodeDef* y;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &x));
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &y));
bool updated = false;
if (IsNeg(*y)) {
ForwardControlDependencies(node, {y});
ctx().node_map->UpdateInput(node->name(), node->input(1), y->input(0));
node->set_op(IsAdd(*node) ? "Sub" : "AddV2");
node->set_input(1, y->input(0));
updated = true;
} else if (IsAdd(*node) && IsNeg(*x)) {
ForwardControlDependencies(node, {x});
ctx().node_map->UpdateInput(node->name(), node->input(0), x->input(0));
node->set_op("Sub");
node->mutable_input()->SwapElements(0, 1);
node->set_input(1, x->input(0));
updated = true;
}
if (updated) {
AddToOptimizationQueue(node);
}
return absl::OkStatus();
}
};
class RemoveLogicalNotStage : public ArithmeticOptimizerStage {
public:
explicit RemoveLogicalNotStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveLogicalNot", ctx, ctx_ext) {}
~RemoveLogicalNotStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsLogicalNot(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
const string node_name = node->name();
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
if (IsInPreserveSet(*input) ||
NumNonControlOutputs(*input, *ctx().node_map) > 1) {
return absl::OkStatus();
}
string new_op;
if (IsEqual(*input)) {
new_op = "NotEqual";
} else if (IsNotEqual(*input)) {
new_op = "Equal";
} else if (IsLess(*input)) {
new_op = "GreaterEqual";
} else if (IsLessEqual(*input)) {
new_op = "Greater";
} else if (IsGreater(*input)) {
new_op = "LessEqual";
} else if (IsGreaterEqual(*input)) {
new_op = "Less";
}
if (!new_op.empty()) {
input->set_op(new_op);
*simplified_node_name = input->name();
}
return absl::OkStatus();
}
};
class HoistCWiseUnaryChainsStage : public ArithmeticOptimizerStage {
public:
explicit HoistCWiseUnaryChainsStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("", ctx, ctx_ext) {}
~HoistCWiseUnaryChainsStage() override = default;
struct ChainLink {
ChainLink() = default;
ChainLink(NodeDef* _node, int _port_origin)
: node(_node), port_origin(_port_origin) {}
NodeDef* node;
int port_origin;
bool operator<(const ChainLink& other) const {
if (port_origin < other.port_origin) {
return true;
} else if (port_origin > other.port_origin) {
return false;
} else {
return node->name() < other.node->name();
}
}
};
using ChainLinkSet = std::set<ChainLink>;
bool IsSupported(const NodeDef* node) const override {
if (IsInPreserveSet(*node)) return false;
if (IsConcat(*node) && node->attr().count("N") != 0) {
const int n = node->attr().at("N").i();
return n > 1 && FirstNInputsAreUnique(*node, n);
} else if ((IsSplit(*node) || IsSplitV(*node)) &&
node->attr().count("num_split") != 0) {
const int num_split = node->attr().at("num_split").i();
if (NumNonControlOutputs(*node, *ctx().node_map) > num_split) {
return false;
}
if (NumControlOutputs(*node, *ctx().node_map) > 0) {
return false;
}
return num_split > 1 && !IsAlreadyOptimized(*node);
}
return false;
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
node_is_concat_ = IsConcat(*node);
int prefix_length;
std::set<string> ctrl_inputs;
ChainLinkSet tails;
TF_RETURN_IF_ERROR(
FindCommonUnaryOpChain(*node, &prefix_length, &tails, &ctrl_inputs));
if (prefix_length > 0 && !tails.empty()) {
TF_RETURN_IF_ERROR(
HoistUnaryOpChain(prefix_length, tails, &ctrl_inputs, node));
}
return absl::OkStatus();
}
private:
bool FirstNInputsAreUnique(const NodeDef& node, int n) const {
if (n > node.input_size()) return false;
absl::flat_hash_set<string> unique_inputs;
const int start = node.op() == "Concat" ? 1 : 0;
const int end = start + n;
for (int i = start; i < end; ++i) {
unique_inputs.insert(node.input(i));
}
int unique_input_size = unique_inputs.size();
return unique_input_size == n;
}
Status FindCommonUnaryOpChain(const NodeDef& root_node, int* prefix_length,
ChainLinkSet* tails,
std::set<string>* ctrl_inputs) const {
*prefix_length = 0;
ChainLinkSet cur_tails;
TF_RETURN_IF_ERROR(InitializeChains(root_node, &cur_tails));
if (cur_tails.size() < 2) {
return absl::OkStatus();
}
ctrl_inputs->clear();
bool stop = false;
while (!stop && !cur_tails.empty() &&
OpsAreSafeToHoist(root_node, cur_tails)) {
++(*prefix_length);
tails->swap(cur_tails);
GatherControlInputs(ctrl_inputs, *tails);
TF_RETURN_IF_ERROR(AdvanceTails(*tails, &cur_tails, &stop));
}
return absl::OkStatus();
}
Status HoistUnaryOpChain(const int prefix_length, const ChainLinkSet& tails,
std::set<string>* ctrl_inputs, NodeDef* root_node) {
VLOG(3) << "Hoist unary op chain:"
<< " root=" << root_node->DebugString()
<< " prefix_length=" << prefix_length << " ctrl_inputs=["
<< absl::StrJoin(*ctrl_inputs, ", ") << "]";
if (tails.empty()) {
return absl::OkStatus();
}
AddToOptimizationQueue(root_node);
optimized_nodes_.insert(root_node->name());
if (node_is_concat_) {
AddControlInputs(ctrl_inputs, root_node);
return HoistChainForConcat(prefix_length, tails, root_node);
} else {
return HoistChainForSplit(prefix_length, tails, ctrl_inputs, root_node);
}
}
void GatherControlInputs(std::set<string>* ctrl_inputs,
const ChainLinkSet& ops) const {
for (const auto& link : ops) {
const NodeDef* node = link.node;
for (int i = node->input_size() - 1; i >= 0; --i) {
const string& input = node->input(i);
if (!IsControlInput(input)) break;
ctrl_inputs->insert(input);
}
}
}
void AddControlInputs(std::set<string>* new_ctrl_inputs,
NodeDef* node) const {
for (int i = node->input_size() - 1; i >= 0; --i) {
const string& existing_input = node->input(i);
if (!IsControlInput(existing_input)) break;
new_ctrl_inputs->erase(existing_input);
}
for (const string& new_input : *new_ctrl_inputs) {
ctx().node_map->AddOutput(NodeName(new_input), node->name());
node->add_input(new_input);
}
}
Status InitializeChains(const NodeDef& node, ChainLinkSet* tails) const {
if (node_is_concat_) {
TF_RETURN_IF_ERROR(CheckAttrExists(node, "N"));
const int n = node.attr().at("N").i();
const int start = node.op() == "Concat" ? 1 : 0;
const int end = start + n;
if (end > node.input_size()) {
return errors::FailedPrecondition("Got attr N=", n,
" without enough inputs.");
}
for (int input_port = start; input_port < end; ++input_port) {
if (IsControlInput(node.input(input_port))) {
return errors::FailedPrecondition(
"Got control input ", node.input(input_port),
" where normal input was expected.");
}
NodeDef* tail;
TF_RETURN_IF_ERROR(GetInputNode(node.input(input_port), &tail));
tails->insert(ChainLink(tail, input_port));
}
return absl::OkStatus();
} else {
const auto& outputs = ctx().node_map->GetOutputs(node.name());
for (NodeDef* output : outputs) {
if (output->input_size() == 0 || IsControlInput(output->input(0))) {
continue;
}
TensorId tensor_id = ParseTensorName(output->input(0));
if (tensor_id.node() == node.name()) {
tails->insert(ChainLink(output, tensor_id.index()));
} else {
tails->clear();
return absl::OkStatus();
}
}
}
return absl::OkStatus();
}
bool OpsAreSafeToHoist(const NodeDef& root_node,
const ChainLinkSet& ops) const {
if (ops.empty()) return true;
const NodeDef* op0 = ops.begin()->node;
if (ModifiesFrameInfo(*op0) || !IsUnaryElementWise(*op0)) return false;
for (const auto& link : ops) {
const NodeDef* op = link.node;
if (op->device() != root_node.device() || op->op() != op0->op() ||
IsInPreserveSet(*op)) {
return false;
}
if (ctx().node_map->GetOutputs(op->name()).size() > 1) {
return false;
}
if (IsRelu(*op) || IsRelu6(*op)) {
NodeDef* operand = nullptr;
if (!GetInputNode(op->input(0), &operand).ok()) {
return false;
}
if (IsFusedBatchNorm(*operand) || IsBiasAdd(*operand)) {
return false;
}
}
}
return true;
}
Status AdvanceTails(const ChainLinkSet& tails, ChainLinkSet* new_tails,
bool* stop) const {
*stop = true;
new_tails->clear();
for (const auto& link : tails) {
const NodeDef* tail = link.node;
if (node_is_concat_) {
if (tail->input_size() == 0 || IsControlInput(tail->input(0))) {
return absl::OkStatus();
}
NodeDef* new_tail;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &new_tail));
new_tails->insert(ChainLink(new_tail, link.port_origin));
} else {
for (NodeDef* new_tail : ctx().node_map->GetOutputs(tail->name())) {
const TensorId tensor = ParseTensorName(new_tail->input(0));
if (tensor.node() != tail->name()) {
return absl::OkStatus();
}
if (tensor.index() >= 0) {
new_tails->insert(ChainLink(new_tail, link.port_origin));
}
}
}
}
*stop = false;
return absl::OkStatus();
}
Status HoistChainForConcat(const int prefix_length, const ChainLinkSet& tails,
NodeDef* concat_node) {
const string& concat_name = concat_node->name();
const int first_input = concat_node->op() == "Concat" ? 1 : 0;
for (const auto& link : tails) {
NodeDef* tail = CHECK_NOTNULL(link.node);
const int concat_port = link.port_origin;
CHECK_GE(concat_port, 0);
CHECK_LT(concat_port, concat_node->input_size());
const string concat_input = concat_node->input(concat_port);
const string tail_input = tail->input(0);
concat_node->set_input(concat_port, tail_input);
ctx().node_map->UpdateInput(concat_name, concat_input, tail_input);
if (concat_port == first_input) {
TF_RETURN_IF_ERROR(UpdateConsumers(concat_node, concat_input));
tail->set_input(0, concat_name);
ctx().node_map->UpdateInput(tail->name(), tail_input, concat_name);
}
}
return absl::OkStatus();
}
Status HoistChainForSplit(const int prefix_length, const ChainLinkSet& tails,
std::set<string>* ctrl_inputs,
NodeDef* split_node) {
const string& split_name = split_node->name();
auto root_scope_and_name = ParseNodeScopeAndName(split_name);
NodeDef* cur_tail = tails.begin()->node;
NodeDef* cur_copy = AddCopyNode(
OptimizedNodeName(root_scope_and_name, cur_tail->name()), cur_tail);
cur_copy->clear_input();
const int value_slot = split_node->op() == "SplitV" ? 0 : 1;
const string orig_input = split_node->input(value_slot);
split_node->set_input(value_slot, cur_copy->name());
ctx().node_map->UpdateInput(split_node->name(), orig_input,
cur_copy->name());
TF_RETURN_IF_ERROR(GetInputNode(cur_tail->input(0), &cur_tail));
while (cur_tail != split_node) {
NodeDef* new_copy = AddCopyNode(
OptimizedNodeName(root_scope_and_name, cur_tail->name()), cur_tail);
new_copy->clear_input();
cur_copy->add_input(new_copy->name());
ctx().node_map->AddOutput(new_copy->name(), cur_copy->name());
cur_copy = new_copy;
TF_RETURN_IF_ERROR(GetInputNode(cur_tail->input(0), &cur_tail));
}
cur_copy->add_input(orig_input);
ctx().node_map->UpdateOutput(NodeName(orig_input), split_name,
cur_copy->name());
AddControlInputs(ctrl_inputs, cur_copy);
for (const auto& link : tails) {
TF_RETURN_IF_ERROR(UpdateConsumers(
link.node, link.port_origin == 0
? split_name
: strings::StrCat(split_name, ":", link.port_origin)));
}
return absl::OkStatus();
}
bool IsAlreadyOptimized(const NodeDef& node) const {
return optimized_nodes_.find(node.name()) != optimized_nodes_.end();
}
private:
bool node_is_concat_;
std::unordered_set<string> optimized_nodes_;
};
class RemoveIdempotentStage : public ArithmeticOptimizerStage {
public:
explicit RemoveIdempotentStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveIdempotent", ctx, ctx_ext) {}
~RemoveIdempotentStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return node->input_size() == 1 && IsIdempotent(*node) &&
!IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
if (input->op() == node->op() && input->device() == node->device()) {
*simplified_node_name = node->input(0);
}
return absl::OkStatus();
}
};
class SqrtDivToRsqrtMulStage : public ArithmeticOptimizerStage {
public:
explicit SqrtDivToRsqrtMulStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("SqrtDivToRsqrtMul", ctx, ctx_ext) {}
~SqrtDivToRsqrtMulStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAnyDiv(*node) && !IsDivNoNan(*node) && !IsFloorDiv(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* y;
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &y));
if (IsSqrt(*y) && !IsInPreserveSet(*y) &&
(NumNonControlOutputs(*y, *ctx().node_map) == 1)) {
if (IsXdivy(*node)) {
node->set_op("MulNoNan");
node->mutable_input()->SwapElements(0, 1);
} else {
node->set_op("Mul");
}
y->set_op("Rsqrt");
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
}
return absl::OkStatus();
}
};
class FuseSquaredDiffStage : public ArithmeticOptimizerStage {
public:
explicit FuseSquaredDiffStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("FuseSquaredDiffStage", ctx, ctx_ext) {}
~FuseSquaredDiffStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsSquare(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* b;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &b));
if (IsSub(*b) && !IsInPreserveSet(*b) &&
(NumNonControlOutputs(*b, *ctx().node_map) == 1)) {
const DataType type = GetDataTypeFromAttr(*b, "T");
if ((type == DT_COMPLEX64) || (type == DT_COMPLEX128))
return absl::OkStatus();
node->set_op("Identity");
b->set_op("SquaredDifference");
AddToOptimizationQueue(node);
AddToOptimizationQueue(b);
}
return absl::OkStatus();
}
};
class LogSoftmaxStage : public ArithmeticOptimizerStage {
public:
explicit LogSoftmaxStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("LogSoftmaxStage", ctx, ctx_ext) {}
~LogSoftmaxStage() override = default;
bool IsSupported(const NodeDef* node) const override { return IsLog(*node); }
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* x;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &x));
if (IsSoftmax(*x) && !IsInPreserveSet(*x) &&
(NumNonControlOutputs(*x, *ctx().node_map) == 1)) {
node->set_op("LogSoftmax");
x->set_op("Identity");
AddToOptimizationQueue(node);
AddToOptimizationQueue(x);
}
return absl::OkStatus();
}
};
class RemoveRedundantReshapeOrBroadcastTo : public ArithmeticOptimizerStage {
public:
explicit RemoveRedundantReshapeOrBroadcastTo(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveRedundantReshapeOrBroadcastTo", ctx,
ctx_ext) {}
~RemoveRedundantReshapeOrBroadcastTo() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsReshape(*node) || IsBroadcastTo(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
if (!IsInPreserveSet(*node) && InputMatchesTargetShape(*node) &&
!HasControlInputs(*node)) {
*simplified_node_name = node->input(0);
return absl::OkStatus();
}
if (IsReshape(*node)) {
bool skip = false;
absl::InlinedVector<const NodeDef*, 4UL> nodes_in_chain;
const auto predicate_fn = [this, node, &skip,
&nodes_in_chain](const NodeDef& input) {
nodes_in_chain.push_back(&input);
if ((input.name() != node->name() &&
NumNonControlOutputs(input, *ctx().node_map) > 1) ||
IsInPreserveSet(input) || ModifiesFrameInfo(input)) {
skip = true;
return false;
}
return IsUnaryElementWise(input);
};
NodeDef* tail =
GetTailOfChain(*node, *ctx().node_map,
false, predicate_fn);
if (!skip && tail != nullptr && !IsInPreserveSet(*tail)) {
NodeDef* reshape_to_bypass;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &reshape_to_bypass));
if (reshape_to_bypass == nullptr ||
(!IsReshape(*reshape_to_bypass) ||
NumNonControlOutputs(*reshape_to_bypass, *ctx().node_map) > 1 ||
IsInPreserveSet(*reshape_to_bypass))) {
return absl::OkStatus();
}
for (const NodeDef* node_in_chain : nodes_in_chain) {
ctx().graph_properties->ClearInputProperties(node_in_chain->name());
if (node_in_chain != node) {
ctx().graph_properties->ClearOutputProperties(
node_in_chain->name());
}
}
TF_RETURN_IF_ERROR(
UpdateConsumers(reshape_to_bypass, reshape_to_bypass->input(0)));
ForwardControlDependencies(tail, {reshape_to_bypass});
ReplaceWithNoOp(reshape_to_bypass, ctx());
*simplified_node_name = node->name();
return absl::OkStatus();
}
}
return absl::OkStatus();
}
private:
bool InputMatchesTargetShape(const NodeDef& reshape) {
const OpInfo::TensorProperties* reshape_props;
const OpInfo::TensorProperties* input_props;
if (!GetTensorProperties(reshape.name(), &reshape_props).ok() ||
!GetTensorProperties(reshape.input(0), &input_props).ok()) {
return false;
}
return ShapesSymbolicallyEqual(input_props->shape(),
reshape_props->shape());
}
};
class ReorderCastLikeAndValuePreserving : public ArithmeticOptimizerStage {
public:
explicit ReorderCastLikeAndValuePreserving(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReorderCastLikeAndValuePreserving", ctx,
ctx_ext) {}
~ReorderCastLikeAndValuePreserving() override = default;
bool IsSupported(const NodeDef* node) const override {
return (IsValuePreserving(*node) || IsCastLike(*node)) &&
!IsCheckNumerics(*node) && NodeIsOnCpuOrGpu(node) &&
!IsControlFlow(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* consumer, string* simplified_node_name) override {
NodeDef* producer;
if (consumer->input_size() < 1) {
return errors::FailedPrecondition("Node ", simplified_node_name,
" lacks inputs");
}
TF_RETURN_IF_ERROR(GetInputNode(consumer->input(0), &producer));
const bool producer_is_cast = IsCastLike(*producer);
const bool can_optimize =
!IsCheckNumerics(*producer) &&
((producer_is_cast && IsValuePreserving(*consumer)) ||
(IsValuePreserving(*producer) && IsCastLike(*consumer)));
if (!can_optimize || IsControlFlow(*producer) ||
IsInPreserveSet(*producer) ||
producer->device() != consumer->device()) {
return absl::OkStatus();
}
const NodeDef* cast_like_node = producer_is_cast ? producer : consumer;
const OpDef* cast_like_op_def = nullptr;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(cast_like_node->op(),
&cast_like_op_def));
DataType cast_src_type;
TF_RETURN_IF_ERROR(InputTypeForNode(*cast_like_node, *cast_like_op_def, 0,
&cast_src_type));
DataType cast_dst_type;
TF_RETURN_IF_ERROR(OutputTypeForNode(*cast_like_node, *cast_like_op_def, 0,
&cast_dst_type));
if (!IsFixedSizeType(cast_src_type) || !IsFixedSizeType(cast_dst_type)) {
return absl::OkStatus();
} else if (producer_is_cast &&
DataTypeSize(cast_dst_type) <= DataTypeSize(cast_src_type)) {
return absl::OkStatus();
} else if (!producer_is_cast &&
DataTypeSize(cast_dst_type) >= DataTypeSize(cast_src_type)) {
return absl::OkStatus();
}
const string optimized_producer_name = OptimizedNodeName(
ParseNodeScopeAndName(producer->name()), DataTypeString(cast_dst_type));
const string optimized_consumer_name = OptimizedNodeName(
ParseNodeScopeAndName(consumer->name()), DataTypeString(cast_src_type));
const bool is_already_optimized =
ctx().node_map->NodeExists(optimized_consumer_name) ||
ctx().node_map->NodeExists(optimized_producer_name);
if (is_already_optimized) {
return absl::OkStatus();
}
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(producer->input(0), &input));
NodeDef* new_producer = AddCopyNode(optimized_consumer_name, consumer);
new_producer->set_input(0, producer->input(0));
ctx().node_map->AddOutput(input->name(), new_producer->name());
NodeDef* new_consumer = AddCopyNode(optimized_producer_name, producer);
new_consumer->set_input(0, new_producer->name());
NodeDef* new_value_preserving =
producer_is_cast ? new_producer : new_consumer;
const DataType new_input_type =
producer_is_cast ? cast_src_type : cast_dst_type;
TF_RETURN_IF_ERROR(SetInputType(new_input_type, new_value_preserving));
TF_RETURN_IF_ERROR(IsKernelRegisteredForNode(*new_value_preserving));
ctx().node_map->AddOutput(new_producer->name(), new_consumer->name());
AddToOptimizationQueue(new_producer);
*simplified_node_name = new_consumer->name();
return absl::OkStatus();
}
private:
Status SetInputType(DataType dtype, NodeDef* node) {
const OpDef* op_def = nullptr;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node->op(), &op_def));
const OpDef::ArgDef& input_arg = op_def->input_arg(0);
const string& type_attr_name = input_arg.type_attr();
if (type_attr_name.empty()) {
if (input_arg.type() == DT_INVALID || input_arg.type() != dtype) {
return errors::InvalidArgument("Could not set input type of ",
node->op(), " op to ",
DataTypeString(dtype));
} else {
return absl::OkStatus();
}
}
SetDataTypeToAttr(dtype, type_attr_name, node);
return absl::OkStatus();
}
bool NodeIsOnCpuOrGpu(const NodeDef* node) const {
using absl::StrContains;
string task;
string device;
return DeviceNameUtils::SplitDeviceName(node->device(), &task, &device) &&
(StrContains(device, DEVICE_CPU) || StrContains(device, DEVICE_GPU));
}
bool IsFixedSizeType(DataType dtype) {
return dtype != DT_STRING && dtype != DT_VARIANT && dtype != DT_RESOURCE &&
!kQuantizedTypes.Contains(dtype);
}
};
class FoldMultiplyIntoConv : public ArithmeticOptimizerStage {
public:
explicit FoldMultiplyIntoConv(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("FoldMultiplyIntoConv", ctx, ctx_ext) {}
~FoldMultiplyIntoConv() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsConv2D(*node) || IsConv3D(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
#define TF_RETURN_IF_TRUE(...) \
if ((__VA_ARGS__)) return OkStatus()
NodeDef* conv = node;
NodeDef* weights;
TF_RETURN_IF_ERROR(GetInputNode(conv->input(1), &weights));
TF_RETURN_IF_TRUE(!IsConstant(*weights));
const string scaled_weights_node_name =
OptimizedNodeName(ParseNodeScopeAndName(weights->name()),
strings::StrCat("scaled", "_", conv->name()));
TF_RETURN_IF_TRUE(ctx().node_map->NodeExists(scaled_weights_node_name));
NodeDef* tail = GetTailOfValuePreservingChain(*conv, *ctx().node_map,
*ctx().nodes_to_preserve);
NodeDef* source;
TF_RETURN_IF_ERROR(GetInputNode(tail->input(0), &source));
TF_RETURN_IF_TRUE(!IsAnyMul(*source));
TF_RETURN_IF_TRUE(NumNonControlOutputs(*source, *ctx().node_map) != 1);
TF_RETURN_IF_TRUE(IsInPreserveSet(*source));
const NodeDef* mul = source;
int input_idx = 0;
int scale_idx = 1;
NodeDef* scale;
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(mul->input(scale_idx), &scale));
TF_RETURN_IF_ERROR(GetInputNode(mul->input(input_idx), &input));
if (!IsConstant(*scale) && IsConstant(*input)) {
VLOG(3) << "Swapped inputs to mul";
std::swap(scale_idx, input_idx);
std::swap(scale, input);
}
TF_RETURN_IF_TRUE(!IsConstant(*scale));
const TensorProto& scale_tensor = scale->attr().at("value").tensor();
bool scale_is_a_scalar = scale_tensor.has_tensor_shape() &&
scale_tensor.tensor_shape().dim_size() == 0;
TF_RETURN_IF_TRUE(!scale_is_a_scalar);
TF_RETURN_IF_TRUE(!IsConstant(*scale));
TF_RETURN_IF_ERROR(CheckAttrsExist(*scale, {"dtype"}));
TF_RETURN_IF_ERROR(CheckAttrExists(*weights, "dtype"));
TF_RETURN_IF_TRUE(scale->attr().at("dtype").type() !=
weights->attr().at("dtype").type());
VLOG(3) << "Fold multiply into conv: conv=" << conv->name()
<< " mul=" << mul->name() << " weights=" << weights->name();
NodeDef* scaled_weights = AddEmptyNode(scaled_weights_node_name);
scaled_weights->set_op(source->op());
scaled_weights->set_device(weights->device());
(*scaled_weights->mutable_attr())["T"] = weights->attr().at("dtype");
AddToOptimizationQueue(scaled_weights);
scaled_weights->add_input(conv->input(1));
ctx().node_map->AddOutput(weights->name(), scaled_weights->name());
scaled_weights->add_input(mul->input(scale_idx));
ctx().node_map->AddOutput(scale->name(), scaled_weights->name());
ForwardControlDependencies(scaled_weights, {source});
conv->set_input(1, scaled_weights->name());
ctx().node_map->UpdateInput(conv->name(), weights->name(),
scaled_weights->name());
AddToOptimizationQueue(conv);
tail->set_input(0, mul->input(input_idx));
ctx().node_map->UpdateInput(tail->name(), mul->name(), input->name());
AddToOptimizationQueue(tail);
*simplified_node_name = conv->name();
return absl::OkStatus();
#undef TF_RETURN_IF_TRUE
}
};
class FoldTransposeIntoMatMul : public ArithmeticOptimizerStage {
public:
explicit FoldTransposeIntoMatMul(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("FoldTransposeIntoMatMul", ctx, ctx_ext) {}
~FoldTransposeIntoMatMul() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAnyMatMul(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
const NodeScopeAndName matmul = ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(matmul);
if (ctx().node_map->NodeExists(optimized_node_name))
return absl::OkStatus();
NodeDef* a;
NodeDef* b;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &a));
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &b));
bool is_complex = false;
if (node->op() != "SparseMatMul") {
const DataType type = GetDataTypeFromAttr(*node, "T");
is_complex = (type == DT_COMPLEX64) || (type == DT_COMPLEX128);
}
const std::set<string> foldable_transpose_ops =
!is_complex
? std::set<string>{"ConjugateTranspose", "Transpose"}
: (IsAnyBatchMatMul(*node) ? std::set<string>{"ConjugateTranspose"}
: std::set<string>{"Transpose"});
const bool a_is_foldable = foldable_transpose_ops.count(a->op()) > 0 &&
IsInnerMatrixTransposeNode(*a, ctx().node_map);
const bool b_is_foldable = foldable_transpose_ops.count(b->op()) > 0 &&
IsInnerMatrixTransposeNode(*b, ctx().node_map);
if (!a_is_foldable && !b_is_foldable) return absl::OkStatus();
NodeDef* new_op = AddCopyNode(optimized_node_name, node);
if (a_is_foldable) {
const string attr_a = IsAnyBatchMatMul(*node) ? "adj_x" : "transpose_a";
FlipBooleanAttr(attr_a, new_op);
new_op->set_input(0, a->input(0));
ctx().node_map->UpdateInput(new_op->name(), a->name(), a->input(0));
} else {
ctx().node_map->UpdateOutput(a->name(), node->name(), new_op->name());
}
if (b_is_foldable) {
const string attr_b = IsAnyBatchMatMul(*node) ? "adj_y" : "transpose_b";
FlipBooleanAttr(attr_b, new_op);
new_op->set_input(1, b->input(0));
ctx().node_map->UpdateInput(new_op->name(), b->name(), b->input(0));
} else {
ctx().node_map->UpdateOutput(b->name(), node->name(), new_op->name());
}
std::vector<const NodeDef*> deps_to_forward = {node};
if (a_is_foldable) deps_to_forward.push_back(a);
if (b_is_foldable) deps_to_forward.push_back(b);
ForwardControlDependencies(new_op, deps_to_forward);
*simplified_node_name = new_op->name();
return absl::OkStatus();
}
private:
void FlipBooleanAttr(const string& attr_name, NodeDef* node) {
const bool old_value =
!node->attr().count(attr_name) ? false : node->attr().at(attr_name).b();
(*node->mutable_attr())[attr_name].set_b(!old_value);
}
template <typename T>
bool IsInnerMatrixTranspose(const std::vector<T>& perm) {
const T n = perm.size();
if (n < 2) {
return false;
}
for (T i = 0; i < n - 2; ++i) {
if (perm[i] != i) {
return false;
}
}
return perm[n - 1] == n - 2 && perm[n - 2] == n - 1;
}
bool IsInnerMatrixTransposeNode(const NodeDef& transpose_node,
const NodeMap* node_map) {
if (transpose_node.op() != "Transpose" &&
transpose_node.op() != "ConjugateTranspose") {
return false;
}
const NodeDef* perm_node = node_map->GetNode(transpose_node.input(1));
std::vector<int> perm32;
if (ValuesFromConstNode(*perm_node, &perm32)) {
return IsInnerMatrixTranspose(perm32);
}
std::vector<int64_t> perm64;
if (ValuesFromConstNode(*perm_node, &perm64)) {
return IsInnerMatrixTranspose(perm64);
}
return false;
}
};
class FoldConjugateIntoTranspose : public ArithmeticOptimizerStage {
public:
explicit FoldConjugateIntoTranspose(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("FoldConjugateIntoTranspose", ctx, ctx_ext) {}
~FoldConjugateIntoTranspose() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsConj(*node) || IsTranspose(*node) || IsConjugateTranspose(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
const NodeScopeAndName matmul = ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(matmul);
if (ctx().node_map->NodeExists(optimized_node_name))
return absl::OkStatus();
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
const NodeDef* transpose_op = node->op() == "Conj" ? input : node;
const NodeDef* conj_op = node->op() == "Conj" ? node : input;
if ((IsTranspose(*transpose_op) || IsConjugateTranspose(*transpose_op)) &&
IsConj(*conj_op)) {
NodeDef* new_op = AddCopyNode(optimized_node_name, transpose_op);
new_op->set_op(transpose_op->op() == "Transpose" ? "ConjugateTranspose"
: "Transpose");
new_op->set_input(0, input->input(0));
ctx().node_map->UpdateInput(new_op->name(), node->name(),
input->input(0));
ForwardControlDependencies(new_op, {node, input});
*simplified_node_name = new_op->name();
}
return absl::OkStatus();
}
};
class ReplaceMulWithSquare : public ArithmeticOptimizerStage {
public:
explicit ReplaceMulWithSquare(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReplaceMulWithSquare", ctx, ctx_ext) {}
~ReplaceMulWithSquare() override = default;
bool IsSupported(const NodeDef* node) const override {
if (!node || node->input_size() < 2) {
return false;
}
return IsAnyMul(*node) && node->input(0) == node->input(1);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
const NodeScopeAndName mul = ParseNodeScopeAndName(node->name());
const string optimized_node_name = OptimizedNodeName(mul);
if (ctx().node_map->NodeExists(optimized_node_name))
return absl::OkStatus();
const DataType type = GetDataTypeFromAttr(*node, "T");
bool is_complex = (type == DT_COMPLEX64) || (type == DT_COMPLEX128);
if (!is_complex || NodeIsOnCpu(*node)) {
NodeDef* new_square_node = AddCopyNode(optimized_node_name, node);
new_square_node->set_op("Square");
for (int i = 1; i < new_square_node->input_size(); ++i) {
new_square_node->set_input(i - 1, new_square_node->input(i));
}
new_square_node->mutable_input()->RemoveLast();
for (const string& input : new_square_node->input()) {
ctx().node_map->AddOutput(NodeName(input), new_square_node->name());
}
*simplified_node_name = new_square_node->name();
}
return absl::OkStatus();
}
};
class ReplaceMulWithBroadcastByTile : public ArithmeticOptimizerStage {
public:
explicit ReplaceMulWithBroadcastByTile(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReplaceMulWithBroadcastByTile", ctx,
ctx_ext) {}
~ReplaceMulWithBroadcastByTile() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsMul(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef *input, *ones;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &ones));
if (IsInPreserveSet(*node) || IsInPreserveSet(*input) ||
IsInPreserveSet(*ones)) {
return absl::OkStatus();
}
if (IsConstant(*input) || !IsOnes(*ones)) return absl::OkStatus();
const NodeScopeAndName scope_and_name = ParseNodeScopeAndName(node->name());
const string tile_node_name = OptimizedNodeName(scope_and_name, "Tile");
const string const_node_name = OptimizedNodeName(scope_and_name, "Const");
if (ctx().node_map->NodeExists(tile_node_name) ||
ctx().node_map->NodeExists(const_node_name)) {
return absl::OkStatus();
}
const std::vector<OpInfo::TensorProperties>& props =
ctx().graph_properties->GetInputProperties(node->name());
if (props.size() != 2) return absl::OkStatus();
const TensorShapeProto& input_shape = props[0].shape();
const TensorShapeProto& ones_shape = props[1].shape();
TensorShapeProto output_shape;
if (!ShapeAfterBroadcast(input_shape, ones_shape, &output_shape)) {
return absl::OkStatus();
}
if (ShapesSymbolicallyEqual(input_shape, output_shape)) {
return absl::OkStatus();
}
if (input_shape.dim_size() != output_shape.dim_size() ||
ones_shape.dim_size() != output_shape.dim_size())
return absl::OkStatus();
VLOG(3) << "Simplify multiply with all ones input: node=" << node->name()
<< "@" << output_shape << " ones=" << ones->name() << "@"
<< ones_shape << " input=" << input->name() << "@" << input_shape;
Tensor multiples(DT_INT32, TensorShape({output_shape.dim_size()}));
for (int i = 0; i < output_shape.dim_size(); ++i) {
int64_t size = output_shape.dim(i).size() / input_shape.dim(i).size();
if (TF_PREDICT_FALSE(size >= INT_MAX)) {
return Status(absl::StatusCode::kOutOfRange, "int32 overflow");
}
multiples.flat<int32>()(i) = static_cast<int32>(size);
}
NodeDef* const_node = AddEmptyNode(const_node_name);
TF_RETURN_IF_ERROR(ConstantFolding::CreateNodeDef(
const_node->name(), TensorValue(&multiples), const_node));
const_node->set_device(node->device());
ForwardControlDependencies(const_node, {ones});
AddToOptimizationQueue(const_node);
const DataType type = GetDataTypeFromAttr(*node, "T");
NodeDef* tile_node = AddEmptyNode(tile_node_name);
tile_node->set_op("Tile");
tile_node->set_device(node->device());
SetDataTypeToAttr(type, "T", tile_node);
SetDataTypeToAttr(DT_INT32, "Tmultiples", tile_node);
tile_node->add_input(input->name());
tile_node->add_input(const_node->name());
ForwardControlDependencies(tile_node, {node});
*simplified_node_name = tile_node->name();
return absl::OkStatus();
}
protected:
bool IsOnes(const NodeDef& node) const {
if (!IsReallyConstant(node)) return false;
if (node.attr().at("dtype").type() != DT_FLOAT) return false;
Tensor tensor;
if (!tensor.FromProto(node.attr().at("value").tensor())) {
return false;
}
auto values = tensor.flat<float>();
for (int i = 0; i < tensor.NumElements(); ++i) {
if (values(i) != 1.0f) {
return false;
}
}
return true;
}
};
class ReduceUpsamplingDims : public ArithmeticOptimizerStage {
public:
explicit ReduceUpsamplingDims(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReduceUpsamplingDims", ctx, ctx_ext) {}
~ReduceUpsamplingDims() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsReshape(*node) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* tile;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &tile));
if (!IsTile(*tile) || IsInPreserveSet(*tile)) {
return absl::OkStatus();
}
if (NumNonControlOutputs(*tile, *ctx().node_map) != 1) {
return absl::OkStatus();
}
NodeDef* reshape;
TF_RETURN_IF_ERROR(GetInputNode(tile->input(0), &reshape));
if (!IsReshape(*reshape) || IsInPreserveSet(*reshape)) {
return absl::OkStatus();
}
NodeDef* multiples;
TF_RETURN_IF_ERROR(GetInputNode(tile->input(1), &multiples));
NodeDef* shape;
TF_RETURN_IF_ERROR(GetInputNode(reshape->input(1), &shape));
const NodeScopeAndName scope_and_name = ParseNodeScopeAndName(node->name());
const string new_reshape_name =
OptimizedNodeName(scope_and_name, "Reshape");
const string new_tile_name = OptimizedNodeName(scope_and_name, "Tile");
const string new_multiples_name =
OptimizedNodeName(scope_and_name, "Multiples");
const string new_shape_name = OptimizedNodeName(scope_and_name, "Shape");
if (ctx().node_map->NodeExists(new_reshape_name) ||
ctx().node_map->NodeExists(new_tile_name) ||
ctx().node_map->NodeExists(new_shape_name) ||
ctx().node_map->NodeExists(new_multiples_name)) {
return absl::OkStatus();
}
AttrValue new_multiples_attr;
if (!CreateUpdatedMultiplesProto(multiples,
new_multiples_attr.mutable_tensor())) {
return absl::OkStatus();
}
AttrValue new_shape_attr;
if (!CreateUpdatedShapeProto(shape, new_shape_attr.mutable_tensor())) {
return absl::OkStatus();
}
NodeDef* new_multiples = AddEmptyNode(new_multiples_name);
new_multiples->set_op("Const");
SetDataTypeToAttr(DT_INT32, "dtype", new_multiples);
new_multiples->mutable_attr()->insert({"value", new_multiples_attr});
new_multiples->set_device(multiples->device());
NodeDef* new_shape = AddEmptyNode(new_shape_name);
new_shape->set_op("Const");
SetDataTypeToAttr(DT_INT32, "dtype", new_shape);
new_shape->mutable_attr()->insert({"value", new_shape_attr});
new_shape->set_device(shape->device());
NodeDef* new_reshape = AddEmptyNode(new_reshape_name);
CopyReshapeWithInput(reshape, new_reshape, reshape->input(0),
new_shape->name());
NodeDef* new_tile = AddEmptyNode(new_tile_name);
CopyTileWithInput(tile, new_tile, new_reshape->name(),
new_multiples->name());
node->set_input(0, new_tile->name());
ctx().node_map->UpdateInput(node->name(), tile->name(), new_tile->name());
ForwardControlDependencies(new_tile, {tile});
ForwardControlDependencies(new_multiples, {multiples});
ForwardControlDependencies(new_reshape, {reshape});
ForwardControlDependencies(new_shape, {shape});
*simplified_node_name = node->name();
return absl::OkStatus();
}
private:
bool CreateUpdatedMultiplesProto(const NodeDef* node, TensorProto* proto) {
Tensor multiples;
if (!GetTensorFromConstNode(node->name(), &multiples)) {
return false;
}
if (multiples.dtype() != DT_INT32 || multiples.NumElements() != 6) {
return false;
}
const auto& multiples_values = multiples.flat<int32>();
if (multiples_values(3) != 1 || multiples_values(5) != 1) {
return false;
}
Tensor new_multiples(DT_INT32, {4});
new_multiples.flat<int32>()(0) = multiples_values(0);
new_multiples.flat<int32>()(1) = multiples_values(1);
new_multiples.flat<int32>()(2) = multiples_values(2);
new_multiples.flat<int32>()(3) = multiples_values(4);
new_multiples.AsProtoTensorContent(proto);
return true;
}
bool CreateUpdatedShapeProto(const NodeDef* node, TensorProto* proto) {
Tensor shape;
if (!GetTensorFromConstNode(node->name(), &shape)) {
return false;
}
if (shape.dtype() != DT_INT32 || shape.NumElements() != 6) {
return false;
}
const auto& shape_values = shape.flat<int32>();
if (shape_values(2) != 1 || shape_values(4) != 1) {
return false;
}
Tensor new_shape(DT_INT32, {4});
new_shape.flat<int32>()(0) = shape_values(0);
new_shape.flat<int32>()(1) = shape_values(1);
new_shape.flat<int32>()(2) = shape_values(3);
new_shape.flat<int32>()(3) = shape_values(5);
new_shape.AsProtoTensorContent(proto);
return true;
}
void CopyReshapeWithInput(const NodeDef* reshape, NodeDef* new_reshape,
const string& input, const string& shape) {
new_reshape->set_op("Reshape");
new_reshape->set_device(reshape->device());
SetDataTypeToAttr(GetDataTypeFromAttr(*reshape, "T"), "T", new_reshape);
SetDataTypeToAttr(GetDataTypeFromAttr(*reshape, "Tshape"), "Tshape",
new_reshape);
new_reshape->add_input(input);
ctx().node_map->AddOutput(NodeName(input), new_reshape->name());
new_reshape->add_input(shape);
ctx().node_map->AddOutput(NodeName(shape), new_reshape->name());
AddToOptimizationQueue(new_reshape);
}
void CopyTileWithInput(const NodeDef* tile, NodeDef* new_tile,
const string& input, const string& multiples) {
new_tile->set_op("Tile");
new_tile->set_device(tile->device());
SetDataTypeToAttr(GetDataTypeFromAttr(*tile, "T"), "T", new_tile);
SetDataTypeToAttr(GetDataTypeFromAttr(*tile, "Tmultiples"), "Tmultiples",
new_tile);
new_tile->add_input(input);
ctx().node_map->AddOutput(NodeName(input), new_tile->name());
new_tile->add_input(multiples);
ctx().node_map->AddOutput(NodeName(multiples), new_tile->name());
AddToOptimizationQueue(new_tile);
}
};
class ReplacePackWithTileReshape : public ArithmeticOptimizerStage {
public:
explicit ReplacePackWithTileReshape(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ReplacePackWithTileReshape", ctx, ctx_ext) {}
~ReplacePackWithTileReshape() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsPack(*node) && NumNonControlInputs(*node) > 1 &&
!IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* input = node;
std::vector<const NodeDef*> chain;
while (IsPack(*input) && NumNonControlInputs(*node) > 1 &&
!IsInPreserveSet(*input)) {
if (!AllRegularInputsEqual(*input)) {
break;
}
chain.push_back(input);
TF_RETURN_IF_ERROR(GetInputNode(input->input(0), &input));
}
if (chain.empty()) {
return absl::OkStatus();
}
const NodeScopeAndName node_scope_and_name =
ParseNodeScopeAndName(node->name());
const string new_const_name =
OptimizedNodeName(node_scope_and_name, "Multiples");
const string new_tile_name = OptimizedNodeName(node_scope_and_name, "Tile");
const string new_shape_name =
OptimizedNodeName(node_scope_and_name, "Shape");
const string new_reshape_name =
OptimizedNodeName(node_scope_and_name, "Reshape");
if (ctx().node_map->NodeExists(new_const_name) ||
ctx().node_map->NodeExists(new_tile_name) ||
ctx().node_map->NodeExists(new_shape_name) ||
ctx().node_map->NodeExists(new_reshape_name)) {
return absl::OkStatus();
}
const OpInfo::TensorProperties* input_props;
TF_RETURN_IF_ERROR(GetTensorProperties(input->name(), &input_props));
const TensorShapeProto& input_shape = input_props->shape();
if (!PartialTensorShape(input_shape).IsFullyDefined()) {
return absl::OkStatus();
}
Tensor multiples(DT_INT32, TensorShape({input_shape.dim_size()}));
TF_RETURN_IF_ERROR(CalculateMultiplesFromChain(chain, &multiples));
const OpInfo::TensorProperties* output_props;
TF_RETURN_IF_ERROR(GetTensorProperties(node->name(), &output_props));
const TensorShapeProto& output_shape = output_props->shape();
if (!PartialTensorShape(output_shape).IsFullyDefined()) {
return absl::OkStatus();
}
Tensor output_shape_tensor(DT_INT32,
TensorShape({output_shape.dim_size()}));
for (int i = 0; i < output_shape.dim_size(); ++i) {
output_shape_tensor.flat<int32>()(i) = output_shape.dim(i).size();
}
NodeDef* new_const_node = AddEmptyNode(new_const_name);
TF_RETURN_IF_ERROR(ConstantFolding::CreateNodeDef(
new_const_node->name(), TensorValue(&multiples), new_const_node));
new_const_node->set_device(node->device());
MaybeAddControlInput(input->name(), new_const_node, ctx().optimized_graph,
ctx().node_map);
AddToOptimizationQueue(new_const_node);
DataType dtype = GetDataTypeFromAttr(*node, "T");
NodeDef* new_tile_node = AddEmptyNode(new_tile_name);
new_tile_node->set_op("Tile");
new_tile_node->set_device(node->device());
SetDataTypeToAttr(dtype, "T", new_tile_node);
SetDataTypeToAttr(DT_INT32, "Tmultiples", new_tile_node);
new_tile_node->add_input(input->name());
ctx().node_map->AddOutput(input->name(), new_tile_node->name());
new_tile_node->add_input(new_const_node->name());
ctx().node_map->AddOutput(new_const_node->name(), new_tile_node->name());
ForwardControlDependencies(new_tile_node, chain);
AddToOptimizationQueue(new_tile_node);
NodeDef* new_shape_node = AddEmptyNode(new_shape_name);
TF_RETURN_IF_ERROR(ConstantFolding::CreateNodeDef(
new_shape_node->name(), TensorValue(&output_shape_tensor),
new_shape_node));
new_shape_node->set_device(node->device());
MaybeAddControlInput(input->name(), new_shape_node, ctx().optimized_graph,
ctx().node_map);
AddToOptimizationQueue(new_shape_node);
NodeDef* new_reshape_node = AddEmptyNode(new_reshape_name);
new_reshape_node->set_op("Reshape");
new_reshape_node->set_device(node->device());
SetDataTypeToAttr(dtype, "T", new_reshape_node);
SetDataTypeToAttr(DT_INT32, "Tshape", new_reshape_node);
new_reshape_node->add_input(new_tile_node->name());
ctx().node_map->AddOutput(new_tile_node->name(), new_reshape_node->name());
new_reshape_node->add_input(new_shape_node->name());
ctx().node_map->AddOutput(new_shape_node->name(), new_reshape_node->name());
*simplified_node_name = new_reshape_node->name();
return absl::OkStatus();
}
protected:
Status CalculateMultiplesFromChain(const std::vector<const NodeDef*>& chain,
Tensor* multiples) {
std::vector<int32> dims(multiples->NumElements());
std::iota(dims.begin(), dims.end(), 0);
for (int i = 0; i < multiples->NumElements(); ++i) {
multiples->flat<int32>()(i) = 1;
}
for (auto it = chain.rbegin(); it != chain.rend(); ++it) {
AttrSlice attrs(**it);
int64_t axis, n;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "axis", &axis));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "N", &n));
if (axis >= dims.size()) {
return Status(absl::StatusCode::kOutOfRange,
"axis value out of range of dims");
}
int64_t m = multiples->flat<int32>()(dims[axis]) * n;
if (TF_PREDICT_FALSE(m > INT_MAX)) {
return Status(absl::StatusCode::kOutOfRange, "int32 overflow");
}
multiples->flat<int32>()(dims[axis]) = static_cast<int32>(m);
dims.insert(dims.begin() + axis, dims[axis]);
}
return absl::OkStatus();
}
};
class SimplifyAggregation : public ArithmeticOptimizerStage {
public:
explicit SimplifyAggregation(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("SimplifyAggregation", ctx, ctx_ext) {}
~SimplifyAggregation() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAggregate(*node) && HasRegularInputs(*node) &&
GetDataTypeFromAttr(*node, "T") !=
DT_VARIANT;
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
if (node->input_size() == 1) {
*simplified_node_name = node->input(0);
return absl::OkStatus();
}
bool all_equal = true;
int num_inputs = 1;
for (int i = 1; i < node->input_size(); ++i) {
if (IsControlInput(node->input(i))) break;
++num_inputs;
if (node->input(i) != node->input(0)) {
all_equal = false;
break;
}
}
if (!all_equal) return absl::OkStatus();
const NodeScopeAndName node_scope_and_name =
ParseNodeScopeAndName(node->name());
const string optimized_const_name =
OptimizedNodeName(node_scope_and_name, "Const");
const string optimized_mul_name =
OptimizedNodeName(node_scope_and_name, "Mul");
bool is_already_optimized =
ctx().node_map->NodeExists(optimized_const_name) ||
ctx().node_map->NodeExists(optimized_mul_name);
if (is_already_optimized) return absl::OkStatus();
VLOG(3) << "Simplify aggregation with identical inputs: node="
<< node->name() << " num_inputs=" << num_inputs;
const auto type = GetDataTypeFromAttr(*node, "T");
Tensor t(type, TensorShape({}));
Status status = SetTensorValue(type, num_inputs, &t);
if (!status.ok()) {
return errors::Internal("Failed to create const node: ",
status.message());
}
TensorValue value(&t);
NodeDef* new_const_node = AddEmptyNode(optimized_const_name);
status = ConstantFolding::CreateNodeDef(new_const_node->name(), value,
new_const_node);
if (!status.ok()) {
return errors::Internal("Failed to create const node: ",
status.message());
}
new_const_node->set_device(node->device());
MaybeAddControlInput(NodeName(node->input(0)), new_const_node,
ctx().optimized_graph, ctx().node_map);
AddToOptimizationQueue(new_const_node);
NodeDef* new_mul_node = AddEmptyNode(optimized_mul_name);
new_mul_node->set_op("Mul");
new_mul_node->set_device(node->device());
SetDataTypeToAttr(type, "T", new_mul_node);
new_mul_node->add_input(new_const_node->name());
ctx().node_map->AddOutput(new_const_node->name(), new_mul_node->name());
new_mul_node->add_input(node->input(0));
ctx().node_map->AddOutput(node->input(0), new_mul_node->name());
ForwardControlDependencies(new_mul_node, {node});
*simplified_node_name = new_mul_node->name();
return absl::OkStatus();
}
};
class ConvertPowStage : public ArithmeticOptimizerStage {
public:
explicit ConvertPowStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ConvertPow", ctx, ctx_ext) {}
bool IsSupported(const NodeDef* node) const override {
return IsPow(*node) &&
ctx().graph_properties->HasOutputProperties(node->name()) &&
ctx().graph_properties->HasInputProperties(node->name());
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
Tensor pow;
if (!GetTensorFromConstNode(node->input(1), &pow)) return absl::OkStatus();
complex128 prev, curr;
for (int i = 0; i < pow.NumElements(); ++i) {
if (!GetElementUnexhaustive(pow, i, {pow.dtype()}, &curr)) {
return absl::OkStatus();
}
if (i != 0 && curr != prev) {
return absl::OkStatus();
}
prev = curr;
}
NodeDef *x, *y;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &x));
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &y));
const auto& value_props =
ctx().graph_properties->GetInputProperties(node->name())[0];
const TensorShapeProto& output_shape =
ctx().graph_properties->GetOutputProperties(node->name())[0].shape();
if (curr == complex128(2, 0)) {
node->set_op("Square");
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
} else if (curr == complex128(3, 0)) {
if (NodeIsOnCpu(*node)) {
const NodeScopeAndName scope_and_name =
ParseNodeScopeAndName(node->name());
const string inner_square_name =
OptimizedNodeName(scope_and_name, "_inner");
NodeDef* inner_square_node = ctx().node_map->GetNode(inner_square_name);
if (inner_square_node == nullptr) {
inner_square_node = AddCopyNode(inner_square_name, node);
inner_square_node->set_op("Square");
inner_square_node->mutable_input()->RemoveLast();
}
ctx().node_map->AddOutput(x->name(), inner_square_node->name());
node->set_op("Mul");
node->set_input(1, inner_square_node->name());
node->add_input(AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(inner_square_node);
AddToOptimizationQueue(y);
}
} else if (curr == complex128(1, 0) &&
ShapesSymbolicallyEqual(value_props.shape(), output_shape)) {
node->set_op("Identity");
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
} else if (curr == complex128(0.5, 0)) {
node->set_op("Sqrt");
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
} else if (curr == complex128(0, 0) &&
ShapesSymbolicallyEqual(value_props.shape(), output_shape) &&
PartialTensorShape(output_shape).IsFullyDefined()) {
const auto dtype = node->attr().at("T").type();
Tensor ones(dtype, output_shape);
for (int i = 0; i < ones.NumElements(); ++i) {
TF_RETURN_IF_ERROR(SetElementToOne(i, &ones));
}
node->set_op("Const");
(*node->mutable_attr())["dtype"].set_type(dtype);
node->mutable_attr()->erase("T");
ones.AsProtoTensorContent(
(*node->mutable_attr())["value"].mutable_tensor());
node->set_input(0, AsControlDependency(x->name()));
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(x);
AddToOptimizationQueue(y);
} else if (curr == complex128(-0.5, 0)) {
node->set_op("Rsqrt");
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
} else if (curr == complex128(-1, 0)) {
node->set_op("Reciprocal");
node->set_input(1, AsControlDependency(y->name()));
AddToOptimizationQueue(node);
AddToOptimizationQueue(y);
}
return absl::OkStatus();
}
private:
Status SetElementToOne(int i, Tensor* t) {
switch (t->dtype()) {
case DT_INT32:
t->flat<int32>()(i) = 1;
return absl::OkStatus();
case DT_INT64:
t->flat<int64_t>()(i) = 1L;
return absl::OkStatus();
case DT_FLOAT:
t->flat<float>()(i) = 1.0f;
return absl::OkStatus();
case DT_DOUBLE:
t->flat<double>()(i) = 1.0;
return absl::OkStatus();
case DT_COMPLEX64:
t->flat<complex64>()(i) = complex64(1);
return absl::OkStatus();
case DT_COMPLEX128:
t->flat<complex128>()(i) = complex128(1);
return absl::OkStatus();
default:
return errors::InvalidArgument("Invalid data type: ", t->dtype());
}
}
};
class ConvertLog1pStage : public ArithmeticOptimizerStage {
public:
explicit ConvertLog1pStage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ConvertLog1p", ctx, ctx_ext) {}
~ConvertLog1pStage() override = default;
bool IsSupported(const NodeDef* node) const override { return IsLog(*node); }
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* input;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &input));
if (!IsAdd(*input)) {
return absl::OkStatus();
}
if (ctx().graph_properties->GetInputProperties(input->name()).size() < 2) {
return absl::OkStatus();
}
bool modified = false;
TF_RETURN_IF_ERROR(TrySimplifyInternal(node, input, 0, 1, &modified));
if (!modified) {
TF_RETURN_IF_ERROR(TrySimplifyInternal(node, input, 1, 0, &modified));
}
if (modified) {
*simplified_node_name = node->name();
}
return absl::OkStatus();
}
private:
Status TrySimplifyInternal(NodeDef* node, NodeDef* add_node, int i, int j,
bool* modified) {
const auto& t =
ctx().graph_properties->GetInputProperties(add_node->name())[i];
const auto& c =
ctx().graph_properties->GetInputProperties(add_node->name())[j];
for (int k = 0; k < c.shape().dim_size(); ++k) {
if (c.shape().dim(k).size() < 0) {
return absl::OkStatus();
}
}
TensorShapeProto broadcast_shape;
if (!ShapeAfterBroadcast(t.shape(), c.shape(), &broadcast_shape)) {
return absl::OkStatus();
}
if (!ShapesSymbolicallyEqual(t.shape(), broadcast_shape)) {
return absl::OkStatus();
}
Tensor constant;
if (GetTensorFromConstNode(add_node->input(j), &constant)) {
complex128 element;
for (int k = 0; k < constant.NumElements(); ++k) {
if (!GetElementUnexhaustive(constant, k,
{DT_BFLOAT16, DT_HALF, DT_FLOAT, DT_DOUBLE,
DT_COMPLEX64, DT_COMPLEX128},
&element)) {
return absl::OkStatus();
}
if (element != complex128(1)) {
return absl::OkStatus();
}
}
NodeDef *x, *y;
TF_RETURN_IF_ERROR(GetInputNode(add_node->input(i), &x));
TF_RETURN_IF_ERROR(GetInputNode(add_node->input(j), &y));
node->set_op("Log1p");
node->set_input(0, add_node->input(i));
node->add_input(AsControlDependency(y->name()));
ForwardControlDependencies(node, {add_node});
AddToOptimizationQueue(node);
AddToOptimizationQueue(add_node);
AddToOptimizationQueue(x);
AddToOptimizationQueue(y);
*modified = true;
}
return absl::OkStatus();
}
};
class ConvertExpm1Stage : public ArithmeticOptimizerStage {
public:
explicit ConvertExpm1Stage(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("ConvertExpm1", ctx, ctx_ext) {}
~ConvertExpm1Stage() override = default;
bool IsSupported(const NodeDef* node) const override {
if (!IsSub(*node)) return false;
NodeDef* input;
if (!GetInputNode(node->input(0), &input).ok()) return false;
return IsExp(*input);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
if (ctx().graph_properties->GetInputProperties(node->name()).size() < 2) {
return absl::OkStatus();
}
const auto& t = ctx().graph_properties->GetInputProperties(node->name())[0];
const auto& c = ctx().graph_properties->GetInputProperties(node->name())[1];
TensorShapeProto broadcast_shape;
if (!ShapeAfterBroadcast(t.shape(), c.shape(), &broadcast_shape)) {
return absl::OkStatus();
}
if (!ShapesSymbolicallyEqual(t.shape(), broadcast_shape)) {
return absl::OkStatus();
}
Tensor constant;
if (!GetTensorFromConstNode(node->input(1), &constant))
return absl::OkStatus();
complex128 element;
for (int k = 0; k < constant.NumElements(); ++k) {
if (!GetElementUnexhaustive(constant, k,
{DT_BFLOAT16, DT_HALF, DT_FLOAT, DT_DOUBLE,
DT_COMPLEX64, DT_COMPLEX128},
&element)) {
return absl::OkStatus();
}
if (element != complex128(1)) {
return absl::OkStatus();
}
}
NodeDef* exp;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &exp));
NodeDef *exp_input, *ones;
TF_RETURN_IF_ERROR(GetInputNode(exp->input(0), &exp_input));
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &ones));
node->set_op("Expm1");
node->set_input(0, exp->input(0));
node->set_input(1, AsControlDependency(ones->name()));
ForwardControlDependencies(node, {exp});
AddToOptimizationQueue(node);
AddToOptimizationQueue(exp);
AddToOptimizationQueue(exp_input);
AddToOptimizationQueue(ones);
*simplified_node_name = node->name();
return absl::OkStatus();
}
};
class OptimizeMaxOrMinOfMonotonicStage : public ArithmeticOptimizerStage {
public:
explicit OptimizeMaxOrMinOfMonotonicStage(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("OptimizeMaxOrMinOfMonotonicStage", ctx,
ctx_ext) {}
~OptimizeMaxOrMinOfMonotonicStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsMax(*node) || IsMin(*node) || IsAnyMaxPool(*node) ||
IsArgMax(*node) || IsArgMin(*node);
}
Status TrySimplify(NodeDef* reduction_node,
string* simplified_node_name) override {
if (IsInPreserveSet(*reduction_node)) {
return absl::OkStatus();
}
NodeDef* inner_function;
TF_RETURN_IF_ERROR(GetInputNode(reduction_node->input(0), &inner_function));
NodeDef* inner_function_input = nullptr;
if (inner_function->input_size() > 0) {
TF_RETURN_IF_ERROR(
GetInputNode(inner_function->input(0), &inner_function_input));
}
auto can_be_fused_by_remapper = [](const NodeDef& consumer,
const NodeDef& producer) -> bool {
if (IsRelu(consumer) || IsRelu6(consumer)) {
if (IsFusedBatchNorm(producer) || IsBiasAdd(producer)) {
return true;
}
}
return false;
};
bool is_non_decreasing = false;
if (!IsInPreserveSet(*inner_function) &&
IsElementWiseMonotonic(*inner_function, &is_non_decreasing) &&
ctx().node_map->GetOutputs(inner_function->name()).size() == 1 &&
(is_non_decreasing || !IsAnyMaxPool(*reduction_node)) &&
!can_be_fused_by_remapper(*inner_function, *inner_function_input)) {
NodeDef* inner_input;
TF_RETURN_IF_ERROR(GetInputNode(inner_function->input(0), &inner_input));
reduction_node->set_input(0, inner_input->name());
ctx().node_map->UpdateInput(reduction_node->name(),
inner_function->name(), inner_input->name());
inner_function->set_input(0, reduction_node->name());
TF_RETURN_IF_ERROR(
UpdateConsumers(reduction_node, inner_function->name()));
ctx().node_map->UpdateInput(inner_function->name(), inner_input->name(),
reduction_node->name());
if (!is_non_decreasing) {
const string opposite = FlipMinMax(*reduction_node);
reduction_node->set_op(opposite);
}
if (IsArgMax(*reduction_node) || IsArgMin(*reduction_node)) {
inner_function->set_op("Identity");
}
AddToOptimizationQueue(reduction_node);
AddToOptimizationQueue(inner_function);
AddToOptimizationQueue(inner_input);
}
return absl::OkStatus();
}
private:
string FlipMinMax(const NodeDef& node) {
const string& op = node.op();
if (IsAnyMax(node) || IsArgMax(node)) {
return str_util::StringReplace(op, "Max", "Min", false);
} else {
return str_util::StringReplace(op, "Min", "Max", false);
}
}
};
class UnaryOpsComposition : public ArithmeticOptimizerStage {
public:
explicit UnaryOpsComposition(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("UnaryOpsComposition", ctx, ctx_ext) {
supported_ops_ = {
{"Abs", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Acos", {DT_FLOAT, DT_DOUBLE}},
{"Acosh", {DT_FLOAT, DT_DOUBLE}},
{"Asin", {DT_FLOAT, DT_DOUBLE}},
{"Asinh", {DT_FLOAT, DT_DOUBLE}},
{"Atan", {DT_FLOAT, DT_DOUBLE}},
{"Atanh", {DT_FLOAT, DT_DOUBLE}},
{"Ceil", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Cos", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Cosh", {DT_FLOAT, DT_DOUBLE}},
{"Expm1", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Exp", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Floor", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Inv", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Log", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Log1p", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Neg", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Reciprocal", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Rint", {DT_FLOAT, DT_DOUBLE}},
{"Round", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Rsqrt", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Sigmoid", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Sin", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Sinh", {DT_FLOAT, DT_DOUBLE}},
{"Sqrt", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Square", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Tan", {DT_FLOAT, DT_DOUBLE}},
{"Tanh", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Elu", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Relu", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Relu6", {DT_FLOAT, DT_HALF, DT_DOUBLE}},
{"Selu", {DT_FLOAT, DT_HALF, DT_DOUBLE}}};
}
~UnaryOpsComposition() override = default;
bool IsSupported(const NodeDef* node) const override {
return CanOptimize(*node) &&
!ctx().node_map->NodeExists(OptimizedNodeName(*node));
}
Status TrySimplify(NodeDef* root, string* simplified_node_name) override {
TF_RETURN_IF_ERROR(CheckAttrExists(*root, "T"));
DataType dtype = root->attr().at("T").type();
std::vector<string> op_nodes = {root->name()};
std::vector<string> op_names = {root->op()};
const auto predicate_fn = [&](const NodeDef& input) {
if (input.name() == root->name()) return true;
bool follow_input_node =
dtype == GetDataTypeFromAttr(input, "T") &&
NumNonControlDataOutputs(input, *ctx().node_map) == 1 &&
CanOptimize(input);
if (follow_input_node) {
op_nodes.push_back(input.name());
op_names.push_back(input.op());
}
return follow_input_node;
};
NodeDef* last_op = GetTailOfChain(
*root, *ctx().node_map, false, predicate_fn);
if (op_names.size() == 1) return absl::OkStatus();
std::for_each(op_nodes.begin(), op_nodes.end(),
[this](const string& name) { AddToFusedNodes(name); });
std::reverse(op_names.begin(), op_names.end());
VLOG(2) << "Fuse unary ops: root=" << root->name() << " op_names=["
<< absl::StrJoin(op_names, ", ") << "]";
NodeDef* composition_node = ctx().optimized_graph->add_node();
composition_node->set_name(OptimizedNodeName(*root));
composition_node->set_op("_UnaryOpsComposition");
composition_node->add_input(last_op->input(0));
composition_node->set_device(root->device());
auto attr = composition_node->mutable_attr();
SetAttrValue(dtype, &(*attr)["T"]);
SetAttrValue(op_names, &(*attr)["op_names"]);
ctx().node_map->AddNode(composition_node->name(), composition_node);
ctx().node_map->AddOutput(NodeName(last_op->input(0)),
composition_node->name());
*simplified_node_name = composition_node->name();
return absl::OkStatus();
}
private:
bool CanOptimize(const NodeDef& node) const {
DataType dtype = GetDataTypeFromAttr(node, "T");
if (!IsSupported(node.op(), dtype)) {
return false;
}
if (IsInPreserveSet(node)) {
return false;
}
if (!NodeIsOnCpu(node)) {
return false;
}
if (NodeIsAlreadyFused(node)) {
return false;
}
return !(IsDrivenByControlDependency(node) ||
DrivesControlDependency(node));
}
bool NodeIsAlreadyFused(const NodeDef& node) const {
return fused_nodes_.count(node.name()) > 0;
}
string OptimizedNodeName(const NodeDef& node) const {
return strings::StrCat(node.name(), "/unary_ops_composition");
}
void AddToFusedNodes(const string& name) { fused_nodes_.insert(name); }
bool IsSupported(const string& op_name, DataType dtype) const {
const auto it = supported_ops_.find(op_name);
return it != supported_ops_.end() && it->second.count(dtype) > 0;
}
std::unordered_map<string, std::set<DataType>> supported_ops_;
std::unordered_set<string> fused_nodes_;
};
class RemoveStackSliceSameAxis : public ArithmeticOptimizerStage {
public:
explicit RemoveStackSliceSameAxis(const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveStackStridedSliceSameAxis", ctx,
ctx_ext) {}
~RemoveStackSliceSameAxis() override = default;
bool IsSupported(const NodeDef* node) const override {
return (IsStridedSlice(*node) || IsSlice(*node)) && !IsInPreserveSet(*node);
}
Status TrySimplify(NodeDef* node, string* simplified_node_name) override {
NodeDef* pack;
TF_RETURN_IF_ERROR(GetInputNode(node->input(0), &pack));
if (!IsPack(*pack)) return absl::OkStatus();
bool return_early;
PartialTensorShape pack_output_shape;
int pack_axis;
TF_RETURN_IF_ERROR(
CheckInputs(node, pack, &pack_output_shape, &pack_axis, &return_early));
if (return_early) return absl::OkStatus();
int64_t slice_start_value;
bool found;
bool must_expand_dims;
TF_RETURN_IF_ERROR(GetSliceAxis(node, pack, pack_output_shape, pack_axis,
&slice_start_value, &found,
&must_expand_dims));
if (!found) return absl::OkStatus();
return RewriteGraph(node, pack, slice_start_value, pack_axis,
must_expand_dims, simplified_node_name);
}
protected:
Status CheckInputs(const NodeDef* node, const NodeDef* pack,
PartialTensorShape* pack_output_shape, int* pack_axis,
bool* return_early) {
*return_early = true;
TF_RETURN_IF_ERROR(CheckAttrExists(*pack, "axis"));
*pack_axis = pack->attr().at("axis").i();
auto slice_properties =
ctx().graph_properties->GetInputProperties(node->name());
if (slice_properties.empty() ||
slice_properties[0].shape().unknown_rank()) {
return absl::OkStatus();
}
*pack_output_shape = slice_properties[0].shape();
const int pack_output_rank = pack_output_shape->dims();
if (*pack_axis < 0) {
*pack_axis += pack_output_rank;
}
if (*pack_axis < 0 || *pack_axis >= pack_output_rank) {
return errors::InvalidArgument(
"Pack node (", pack->name(),
") axis attribute is out of bounds: ", pack->attr().at("axis").i());
}
*return_early = false;
return absl::OkStatus();
}
Status GetSliceAxis(const NodeDef* node, const NodeDef* pack,
const PartialTensorShape& pack_output_shape,
int pack_axis, int64_t* slice_start_value, bool* found,
bool* must_expand_dims) {
*found = false;
if (IsSlice(*node)) {
*must_expand_dims = true;
return GetSimpleSliceAxis(node, pack, pack_output_shape, pack_axis,
slice_start_value, found);
} else {
return GetStridedSliceAxis(node, pack, pack_output_shape, pack_axis,
slice_start_value, found, must_expand_dims);
}
}
Status GetSimpleSliceAxis(const NodeDef* node, const NodeDef* pack,
const PartialTensorShape& pack_output_shape,
int pack_axis, int64_t* slice_start_value,
bool* found) {
NodeDef* slice_begin;
NodeDef* slice_size;
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &slice_begin));
TF_RETURN_IF_ERROR(GetInputNode(node->input(2), &slice_size));
for (const auto* n : {slice_begin, slice_size}) {
if (!IsReallyConstant(*n)) return absl::OkStatus();
}
Tensor slice_begin_t;
Tensor slice_size_t;
TF_RETURN_IF_ERROR(CheckAttrExists(*slice_begin, "value"));
if (!slice_begin_t.FromProto(slice_begin->attr().at("value").tensor())) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CheckAttrExists(*slice_size, "value"));
if (!slice_size_t.FromProto(slice_size->attr().at("value").tensor())) {
return absl::OkStatus();
}
auto copy_tensor_values_to_vector =
[node](const Tensor& t, absl::InlinedVector<int64, 4UL>* vec) {
if (t.dtype() == DT_INT32) {
auto t_flat = t.flat<int32>();
vec->assign(&t_flat(0), &t_flat(t.NumElements()));
} else if (t.dtype() == DT_INT64) {
auto t_flat = t.flat<int64_t>();
vec->assign(&t_flat(0), &t_flat(t.NumElements()));
} else {
return errors::InvalidArgument("Node ", node->name(),
" has invalid type for Index attr: ",
DataTypeString(t.dtype()));
}
return absl::OkStatus();
};
absl::InlinedVector<int64_t, 4UL> slice_begin_vec;
absl::InlinedVector<int64_t, 4UL> slice_size_vec;
TF_RETURN_IF_ERROR(
copy_tensor_values_to_vector(slice_begin_t, &slice_begin_vec));
TF_RETURN_IF_ERROR(
copy_tensor_values_to_vector(slice_size_t, &slice_size_vec));
if (slice_begin_vec.size() != slice_size_vec.size()) {
return errors::InvalidArgument("Node ", node->name(),
" has mismatched lengths for begin (",
slice_begin_vec.size(), ") and size (",
slice_size_vec.size(), ") vectors.");
}
int slice_begin_vec_size = slice_begin_vec.size();
if (!pack_output_shape.unknown_rank() &&
slice_begin_vec_size != pack_output_shape.dims()) {
return absl::OkStatus();
}
if (pack_axis >= slice_begin_vec_size) {
return errors::InvalidArgument(
"Input to node ", node->name(), " had pack_axis ", pack_axis,
" but rank was ", slice_begin_vec_size, ".");
}
*slice_start_value = slice_begin_vec[pack_axis];
if (slice_size_vec[pack_axis] != 1) {
return absl::OkStatus();
}
for (int i = 0; i < slice_begin_vec_size; ++i) {
if (i != pack_axis) {
if (slice_begin_vec[i] != 0 ||
!(slice_size_vec[i] == -1 ||
slice_size_vec[i] == pack_output_shape.dim_size(i))) {
return absl::OkStatus();
}
}
}
if (*slice_start_value < 0 || *slice_start_value >= pack->input_size()) {
return errors::InvalidArgument(
"Node ", node->name(), " requested invalid slice index ",
*slice_start_value, " on axis ", pack_axis,
" from tensor of shape: ", pack_output_shape.DebugString());
}
*found = true;
return absl::OkStatus();
}
Status GetStridedSliceAxis(const NodeDef* node, const NodeDef* pack,
const PartialTensorShape& pack_output_shape,
int pack_axis, int64_t* slice_start_value,
bool* found, bool* must_expand_dims) {
TF_RETURN_IF_ERROR(
CheckAttrsExist(*node, {"begin_mask", "end_mask", "ellipsis_mask",
"new_axis_mask", "shrink_axis_mask"}));
const int begin_mask = node->attr().at("begin_mask").i();
const int end_mask = node->attr().at("end_mask").i();
const int ellipsis_mask = node->attr().at("ellipsis_mask").i();
const int new_axis_mask = node->attr().at("new_axis_mask").i();
const int shrink_axis_mask = node->attr().at("shrink_axis_mask").i();
NodeDef* slice_begin;
NodeDef* slice_end;
NodeDef* slice_strides;
TF_RETURN_IF_ERROR(GetInputNode(node->input(1), &slice_begin));
TF_RETURN_IF_ERROR(GetInputNode(node->input(2), &slice_end));
TF_RETURN_IF_ERROR(GetInputNode(node->input(3), &slice_strides));
for (const auto* n : {slice_begin, slice_end, slice_strides}) {
if (!IsReallyConstant(*n)) return absl::OkStatus();
}
Tensor slice_begin_t;
Tensor slice_end_t;
Tensor slice_strides_t;
TF_RETURN_IF_ERROR(CheckAttrExists(*slice_begin, "value"));
if (!slice_begin_t.FromProto(slice_begin->attr().at("value").tensor())) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CheckAttrExists(*slice_end, "value"));
if (!slice_end_t.FromProto(slice_end->attr().at("value").tensor())) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(CheckAttrExists(*slice_strides, "value"));
if (!slice_strides_t.FromProto(
slice_strides->attr().at("value").tensor())) {
return absl::OkStatus();
}
TensorShape processing_shape;
TensorShape final_shape;
bool is_identity;
bool is_simple_slice;
bool slice_dim0;
absl::InlinedVector<int64_t, 4UL> slice_begin_vec;
absl::InlinedVector<int64_t, 4UL> slice_end_vec;
absl::InlinedVector<int64_t, 4UL> slice_strides_vec;
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
&slice_begin_t, &slice_end_t, slice_strides_t, pack_output_shape,
begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask,
&processing_shape, &final_shape, &is_identity, &is_simple_slice,
&slice_dim0, &slice_begin_vec, &slice_end_vec, &slice_strides_vec));
if (!is_simple_slice) return absl::OkStatus();
int begin_index = -1;
int64_t begin_value = 0;
for (int i = 0, end = slice_begin_vec.size(); i < end; ++i) {
const int64_t v = slice_begin_vec[i];
if (v != 0) {
if (begin_index != -1) {
return absl::OkStatus();
}
begin_index = i;
begin_value = v;
}
}
int end_index = -1;
int64_t end_value = 0;
for (int i = 0, end = slice_begin_vec.size(); i < end; ++i) {
const int64_t v = slice_end_vec[i];
if (v != pack_output_shape.dim_size(i)) {
if (end_index != -1) {
return absl::OkStatus();
}
end_index = i;
end_value = v;
}
}
if (begin_index == -1 && end_index == -1) return absl::OkStatus();
if (begin_index != -1 && end_index != -1 && begin_index != end_index) {
return absl::OkStatus();
}
const int slice_axis = (begin_index == -1) ? end_index : begin_index;
if (slice_axis != pack_axis) {
return absl::OkStatus();
}
*slice_start_value = (begin_index == -1) ? 0 : begin_value;
const int64_t slice_end_value =
(end_index == -1) ? pack_output_shape.dim_size(slice_axis) : end_value;
if (slice_end_value != *slice_start_value + 1) {
return absl::OkStatus();
}
if (*slice_start_value < 0 || *slice_start_value >= pack->input_size()) {
return errors::InvalidArgument(
"Node ", node->name(), " requested invalid slice index ",
*slice_start_value, " on axis ", slice_axis,
" from tensor of shape: ", pack_output_shape.DebugString());
}
if (shrink_axis_mask == 0) {
*must_expand_dims = true;
} else if (shrink_axis_mask == (1 << slice_axis)) {
*must_expand_dims = false;
} else {
return absl::OkStatus();
}
*found = true;
return absl::OkStatus();
}
Status RewriteGraph(const NodeDef* node, const NodeDef* pack,
int64_t slice_start_value, int pack_axis,
bool must_expand_dims, string* simplified_node_name) {
const string& input_slice = pack->input(slice_start_value);
const OpInfo::TensorProperties* input_slice_properties;
TF_RETURN_IF_ERROR(GetTensorProperties(pack->input(slice_start_value),
&input_slice_properties));
PartialTensorShape input_slice_shape(input_slice_properties->shape());
const OpInfo::TensorProperties* output_properties;
TF_RETURN_IF_ERROR(GetTensorProperties(
strings::StrCat(node->name(), ":", 0), &output_properties));
PartialTensorShape output_shape(output_properties->shape());
NodeDef* output =
AddEmptyNode(OptimizedNodeName(ParseNodeScopeAndName(node->name())));
if (!must_expand_dims) {
output->set_op("Identity");
output->set_device(node->device());
SetDataTypeToAttr(output_properties->dtype(), "T", output);
output->add_input(input_slice);
} else {
NodeDef* axis = AddEmptyNode(
OptimizedNodeName(ParseNodeScopeAndName(node->name()), "Axis"));
axis->set_op("Const");
axis->set_device(node->device());
axis->add_input(absl::StrCat("^", ParseTensorName(input_slice).node()));
auto axis_attr = axis->mutable_attr();
SetDataTypeToAttr(DT_INT32, "dtype", axis);
auto* axis_t = (*axis_attr)["value"].mutable_tensor();
axis_t->set_dtype(DT_INT32);
axis_t->add_int_val(pack_axis);
AddToOptimizationQueue(axis);
output->set_op("ExpandDims");
output->set_device(node->device());
SetDataTypeToAttr(output_properties->dtype(), "T", output);
SetDataTypeToAttr(DT_INT32, "Tdim", output);
output->add_input(input_slice);
output->add_input(axis->name());
}
ForwardControlDependencies(output, {node, pack});
AddToOptimizationQueue(output);
*simplified_node_name = output->name();
return absl::OkStatus();
}
};
class SimplifyEmbeddingLookupStage : public ArithmeticOptimizerStage {
public:
explicit SimplifyEmbeddingLookupStage(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("SimplifyEmbeddingLookupStage", ctx, ctx_ext) {
}
~SimplifyEmbeddingLookupStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAnySparseSegmentReduction(*node);
}
Status TrySimplify(NodeDef* reduction_node,
string* simplified_node_name) override {
if (IsInPreserveSet(*reduction_node)) return absl::OkStatus();
NodeDef* gather_node = nullptr;
TF_RETURN_IF_ERROR(GetInputNode(reduction_node->input(0), &gather_node));
if (!IsGather(*gather_node) || IsInPreserveSet(*gather_node) ||
gather_node->device() != reduction_node->device())
return absl::OkStatus();
if (gather_node->op() == "GatherV2" && !IsAxis0(*gather_node, 2))
return absl::OkStatus();
NodeDef* unique_node = nullptr;
TF_RETURN_IF_ERROR(GetInputNode(gather_node->input(1), &unique_node));
if (!IsUnique(*unique_node) || IsInPreserveSet(*unique_node) ||
unique_node->device() != gather_node->device())
return absl::OkStatus();
if (unique_node->op() == "UniqueV2" && !IsAxis0(*unique_node, 1))
return absl::OkStatus();
DataType unique_element_type;
TF_RETURN_IF_ERROR(GetNodeAttr(*unique_node, "T", &unique_element_type));
const TensorId idx_tensor = ParseTensorName(reduction_node->input(1));
if (idx_tensor != TensorId(unique_node->name(), 1)) return absl::OkStatus();
reduction_node->set_input(1, unique_node->input(0));
ctx().node_map->UpdateInput(reduction_node->name(),
reduction_node->input(1),
unique_node->input(0));
SetDataTypeToAttr(unique_element_type, "Tidx", reduction_node);
const OpInfo::TensorProperties* gather_input_properties;
TF_RETURN_IF_ERROR(
GetTensorProperties(gather_node->input(0), &gather_input_properties));
if (gather_input_properties->dtype() == DT_RESOURCE) {
NodeDef* variable_node = nullptr;
TF_RETURN_IF_ERROR(GetInputNode(gather_node->input(0), &variable_node));
NodeDef* read_var_node = ctx().optimized_graph->add_node();
read_var_node->set_name(OptimizedNodeName(
ParseNodeScopeAndName(reduction_node->name()), "ReadVar"));
read_var_node->set_op("ReadVariableOp");
read_var_node->add_input(gather_node->input(0));
read_var_node->set_device(variable_node->device());
auto attr = read_var_node->mutable_attr();
if (variable_node->attr().count("dtype")) {
SetAttrValue(variable_node->attr().at("dtype").type(),
&(*attr)["dtype"]);
}
if (gather_node->attr().count("dtype")) {
SetAttrValue(gather_node->attr().at("dtype").type(), &(*attr)["dtype"]);
}
if (gather_node->attr().count("_class")) {
(*attr)["_class"] = gather_node->attr().at("_class");
}
if (variable_node->attr().count("shape")) {
SetAttrValue(variable_node->attr().at("shape").shape(),
&(*attr)["_output_shapes"]);
}
ctx().node_map->AddNode(read_var_node->name(), read_var_node);
reduction_node->set_input(0, read_var_node->name());
ctx().node_map->UpdateInput(reduction_node->name(),
reduction_node->input(0),
read_var_node->name());
} else {
reduction_node->set_input(0, gather_node->input(0));
ctx().node_map->UpdateInput(reduction_node->name(),
reduction_node->input(0),
gather_node->input(0));
}
*simplified_node_name = reduction_node->name();
return absl::OkStatus();
}
private:
bool IsAxis0(const NodeDef& node, int axis_input) {
Tensor axis_tensor;
if (!GetTensorFromConstNode(node.input(axis_input), &axis_tensor))
return false;
if (axis_tensor.NumElements() != 1) return false;
if (axis_tensor.dtype() == DT_INT32) {
return axis_tensor.flat<int32>()(0) == 0;
} else if (axis_tensor.dtype() == DT_INT64) {
return axis_tensor.flat<int64_t>()(0) == 0;
} else {
return false;
}
}
};
class RemoveCastIntoSegmentReductionStage : public ArithmeticOptimizerStage {
public:
explicit RemoveCastIntoSegmentReductionStage(
const GraphOptimizerContext& ctx,
const ArithmeticOptimizerContext& ctx_ext)
: ArithmeticOptimizerStage("RemoveCastIntoSegmentReductionStage", ctx,
ctx_ext) {}
~RemoveCastIntoSegmentReductionStage() override = default;
bool IsSupported(const NodeDef* node) const override {
return IsAnySparseSegmentReduction(*node);
}
Status TrySimplify(NodeDef* reduction_node,
string* simplified_node_name) override {
if (IsInPreserveSet(*reduction_node)) return absl::OkStatus();
bool optimized = false;
std::array<std::pair<int, string>, 2> input_details = {
std::make_pair(1, "Tidx"), std::make_pair(2, "Tsegmentids")};
for (const auto& input : input_details) {
int input_index = input.first;
const string& type_attr_name = input.second;
NodeDef* cast_node = nullptr;
TF_RETURN_IF_ERROR(
GetInputNode(reduction_node->input(input_index), &cast_node));
DataType original_index_type;
if (IsCastFromSupportedType(*cast_node, &original_index_type)) {
reduction_node->set_input(input_index, cast_node->input(0));
ctx().node_map->UpdateInput(reduction_node->name(),
reduction_node->input(1),
cast_node->input(0));
SetDataTypeToAttr(original_index_type, type_attr_name, reduction_node);
optimized = true;
}
}
if (optimized) *simplified_node_name = reduction_node->name();
return absl::OkStatus();
}
private:
bool IsCastFromSupportedType(const NodeDef& node, DataType* out_input_type) {
if (!IsCast(node)) return false;
if (!GetNodeAttr(node, "SrcT", out_input_type).ok()) return false;
return *out_input_type == DT_INT32 || *out_input_type == DT_INT64;
}
};
}
Status ArithmeticOptimizer::SimplifyArithmeticOps(bool can_use_shapes) {
SetVector<NodeDef*> nodes_to_simplify;
nodes_to_simplify.Reserve(optimized_graph_->node_size());
for (int i = 0; i < optimized_graph_->node_size(); ++i) {
nodes_to_simplify.PushBack(optimized_graph_->mutable_node(i));
}
const GraphOptimizerContext ctx(&nodes_to_preserve_, optimized_graph_,
graph_properties_.get(), node_map_.get(),
&feed_nodes_, opt_level_);
const ArithmeticOptimizerContext ctx_ext(&nodes_to_simplify);
const auto stop = [](const string& result) { return !result.empty(); };
GraphOptimizerStagePipeline<string> pipeline(stop);
const bool is_aggressive = opt_level_ == RewriterConfig::AGGRESSIVE;
if (options_.combine_add_to_addn && can_use_shapes)
pipeline.AddStage<AddOpsRewriteStage>(ctx, ctx_ext);
if (options_.fold_conjugate_into_transpose)
pipeline.AddStage<FoldConjugateIntoTranspose>(ctx, ctx_ext);
if (options_.fold_multiply_into_conv)
pipeline.AddStage<FoldMultiplyIntoConv>(ctx, ctx_ext);
if (options_.fold_transpose_into_matmul)
pipeline.AddStage<FoldTransposeIntoMatMul>(ctx, ctx_ext);
if (is_aggressive && options_.hoist_common_factor_out_of_aggregation &&
can_use_shapes)
pipeline.AddStage<HoistCommonFactorOutOfAggregation>(ctx, ctx_ext);
if (options_.minimize_broadcasts && can_use_shapes)
pipeline.AddStage<MinimizeBroadcasts>(ctx, ctx_ext);
if (options_.remove_identity_transpose && can_use_shapes)
pipeline.AddStage<RemoveIdentityTranspose>(ctx, ctx_ext);
if (options_.remove_involution)
pipeline.AddStage<RemoveInvolution>(ctx, ctx_ext);
if (options_.remove_redundant_bitcast)
pipeline.AddStage<RemoveRedundantBitcastStage>(ctx, ctx_ext);
if (options_.remove_redundant_cast)
pipeline.AddStage<RemoveRedundantCastStage>(ctx, ctx_ext);
if (options_.replace_pack_with_tile_reshape)
pipeline.AddStage<ReplacePackWithTileReshape>(ctx, ctx_ext);
if (options_.replace_mul_with_tile && can_use_shapes)
pipeline.AddStage<ReplaceMulWithBroadcastByTile>(ctx, ctx_ext);
if (options_.reduce_upsampling_dims)
pipeline.AddStage<ReduceUpsamplingDims>(ctx, ctx_ext);
if (options_.remove_redundant_reshape && can_use_shapes)
pipeline.AddStage<RemoveRedundantReshapeOrBroadcastTo>(ctx, ctx_ext);
if (options_.remove_negation)
pipeline.AddStage<RemoveNegationStage>(ctx, ctx_ext);
if (options_.replace_mul_with_square)
pipeline.AddStage<ReplaceMulWithSquare>(ctx, ctx_ext);
if (options_.remove_logical_not)
pipeline.AddStage<RemoveLogicalNotStage>(ctx, ctx_ext);
if (options_.reorder_cast_like_and_value_preserving)
pipeline.AddStage<ReorderCastLikeAndValuePreserving>(ctx, ctx_ext);
if (options_.simplify_aggregation)
pipeline.AddStage<SimplifyAggregation>(ctx, ctx_ext);
if (options_.hoist_cwise_unary_chains)
pipeline.AddStage<HoistCWiseUnaryChainsStage>(ctx, ctx_ext);
if (options_.convert_sqrt_div_to_rsqrt_mul)
pipeline.AddStage<SqrtDivToRsqrtMulStage>(ctx, ctx_ext);
if (options_.remove_idempotent)
pipeline.AddStage<RemoveIdempotentStage>(ctx, ctx_ext);
if (options_.convert_pow) pipeline.AddStage<ConvertPowStage>(ctx, ctx_ext);
if (options_.convert_log1p)
pipeline.AddStage<ConvertLog1pStage>(ctx, ctx_ext);
if (options_.convert_log_softmax)
pipeline.AddStage<LogSoftmaxStage>(ctx, ctx_ext);
if (options_.optimize_max_or_min_of_monotonic)
pipeline.AddStage<OptimizeMaxOrMinOfMonotonicStage>(ctx, ctx_ext);
if (options_.convert_expm1)
pipeline.AddStage<ConvertExpm1Stage>(ctx, ctx_ext);
if (options_.unary_ops_composition)
pipeline.AddStage<UnaryOpsComposition>(ctx, ctx_ext);
if (options_.remove_stack_slice_same_axis)
pipeline.AddStage<RemoveStackSliceSameAxis>(ctx, ctx_ext);
if (options_.simplify_embedding_lookup)
pipeline.AddStage<SimplifyEmbeddingLookupStage>(ctx, ctx_ext);
if (options_.remove_cast_into_segment_reduction)
pipeline.AddStage<RemoveCastIntoSegmentReductionStage>(ctx, ctx_ext);
if (options_.fuse_squared_diff)
pipeline.AddStage<FuseSquaredDiffStage>(ctx, ctx_ext);
VLOG(1) << "Run " << pipeline.NumStages() << " arithmetic optimizer stages: "
<< absl::StrJoin(pipeline.StageNames(), ", ");
while (!nodes_to_simplify.Empty()) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
NodeDef* node = nodes_to_simplify.PopBack();
string simplified_tensor = "";
bool optimized = pipeline.PassThroughAllStages(node, &simplified_tensor);
if (!optimized) continue;
if (NodeName(simplified_tensor) != node->name()) {
NodeDef* simplified_node = node_map_->GetNode(simplified_tensor);
if (simplified_node != nullptr) {
nodes_to_simplify.PushBack(simplified_node);
}
const std::vector<NodeDef*> consumers =
node_map_->GetOutputsOrderedByNodeName(node->name());
for (NodeDef* consumer : consumers) {
for (int i = 0; i < consumer->input_size(); ++i) {
int operand_pos;
string operand_node_name =
ParseNodeName(consumer->input(i), &operand_pos);
if (operand_node_name == node->name()) {
*consumer->mutable_input(i) =
(operand_pos < 0
? AsControlDependency(NodeName(simplified_tensor))
: simplified_tensor);
}
}
node_map_->UpdateInput(consumer->name(), node->name(),
simplified_tensor);
nodes_to_simplify.PushBack(consumer);
}
}
}
return absl::OkStatus();
}
Status ArithmeticOptimizer::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
GrapplerItem optimized_item(item);
optimized_graph_ = &optimized_item.graph;
node_map_.reset(new NodeMap(optimized_graph_));
for (const auto& feed : item.feed) {
feed_nodes_.insert(NodeName(feed.first));
}
options_.unary_ops_composition &=
item.optimization_options().allow_non_differentiable_rewrites;
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph_));
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
graph_properties_.reset(new GraphProperties(optimized_item));
const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE;
const Status status =
graph_properties_->InferStatically(assume_valid_feeds,
false,
false);
const bool can_use_shapes = status.ok();
if (!can_use_shapes) {
VLOG(1) << "Shape inference failed." << status.message();
}
TF_RETURN_IF_ERROR(SimplifyArithmeticOps(can_use_shapes));
*optimized_graph = std::move(*optimized_graph_);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
#include <complex>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer_test_utils.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kHoistFactorOptimizerDiv[] =
"ArithmeticOptimizer/HoistCommonFactor_Div_";
constexpr char kHoistFactorOptimizerMul[] =
"ArithmeticOptimizer/HoistCommonFactor_Mul_";
constexpr char kHoistFactorOptimizerAdd[] =
"ArithmeticOptimizer/HoistCommonFactor_AddV2_";
constexpr char kSimplifyAggregationConst[] =
"ArithmeticOptimizer/SimplifyAggregation_Const_";
constexpr char kSimplifyAggregationMul[] =
"ArithmeticOptimizer/SimplifyAggregation_Mul_";
string HoistMulName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerMul, "");
}
string HoistDivName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerDiv, "");
}
string HoistAddName(const string& name) {
return AddPrefixToNodeName(name, kHoistFactorOptimizerAdd, "");
}
string AggregationConstName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationConst, "");
}
string AggregationMulName(const string& name) {
return AddPrefixToNodeName(name, kSimplifyAggregationMul, "");
}
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
}
TEST_F(ArithmeticOptimizerTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
ArithmeticOptimizer optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTile) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 44, 1, 96, 1, 64}));
Output ones = ops::Const(s.WithOpName("ones"), 1.0f, {1, 1, 2, 1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("mul"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor =
GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 44, 1, 96, 1, 64}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
ASSERT_EQ(CountOpNodes(g, "Mul"), 0);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplaceMulWithBroadcastByTile";
const NodeDef* t = node_map.GetNode(absl::StrCat(p, "_", "Tile_mul"));
const NodeDef* c = node_map.GetNode(absl::StrCat(p, "_", "Const_mul"));
ASSERT_NE(t, nullptr);
ASSERT_NE(c, nullptr);
EXPECT_EQ(t->op(), "Tile");
ASSERT_EQ(t->input_size(), 2);
EXPECT_EQ(t->input(0), "input");
EXPECT_EQ(t->input(1), c->name());
EXPECT_EQ(t->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(t->attr().at("Tmultiples").type(), c->attr().at("dtype").type());
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTilePreserveControl) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 1, 1}));
Output ones = ops::Const(s.WithOpName("ones").WithControlDependencies(input),
1.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("mul"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
ASSERT_EQ(CountOpNodes(g, "Mul"), 0);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplaceMulWithBroadcastByTile";
const NodeDef* c = node_map.GetNode(absl::StrCat(p, "_", "Const_mul"));
ASSERT_NE(c, nullptr);
ASSERT_EQ(c->input_size(), 1);
EXPECT_TRUE(IsControlInput(c->input(0)));
EXPECT_EQ(c->input(0), "^input");
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNoBroadcast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({1, 2, 1}));
Output ones = ops::Const(s.WithOpName("ones"), 1.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("multiply"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 1}));
auto expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(g, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNotConst) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input1 = ops::Placeholder(s.WithOpName("input1"), DT_FLOAT,
ops::Placeholder::Shape({1, 1, 1}));
Output input2 = ops::Placeholder(s.WithOpName("input2"), DT_FLOAT,
ops::Placeholder::Shape({1, 2, 1}));
Output multiply = ops::Mul(s.WithOpName("multiply"), input1, input2);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor1 = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto tensor2 = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 2, 1}));
auto expected = EvaluateNodes(item.graph, item.fetch,
{{"input1", tensor1}, {"input2", tensor2}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(item.graph, item.fetch,
{{"input1", tensor1}, {"input2", tensor2}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithBroadcastByTileNotOnes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({1, 1, 1}));
Output ones = ops::Const(s.WithOpName("ones"), 2.0f, {1, 2, 1});
Output multiply = ops::Mul(s.WithOpName("multiply"), input, ones);
Output output = ops::Identity(s.WithOpName("output"), multiply);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 1, 1}));
auto expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithBroadcastByTile(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 4);
VerifyGraphsMatch(item.graph, g, __LINE__);
auto result = EvaluateNodes(g, item.fetch, {{"Placeholder", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReduceUpsamplingDims) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({1, 22, 48, 64}));
Output reshape_a = ops::Reshape(
s.WithOpName("reshape_a"), input,
ops::Const(s.WithOpName("shape_a"), {1, 22, 1, 48, 1, 64}, {6}));
Output tile =
ops::Tile(s.WithOpName("tile"), reshape_a,
ops::Const(s.WithOpName("multiples"), {1, 1, 2, 1, 2, 1}, {6}));
Output reshape_b =
ops::Reshape(s.WithOpName("reshape_b"), tile,
ops::Const(s.WithOpName("shape_b"), {1, 44, 96, 64}));
Output output = ops::Identity(s.WithOpName("output"), reshape_b);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensor = GenerateRandomTensor<DT_FLOAT>(TensorShape({1, 22, 48, 64}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"input", tensor}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReduceUpsamplingDims(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
ASSERT_EQ(CountOpNodes(g, "Tile"), 1);
ASSERT_EQ(CountOpNodes(g, "Reshape"), 2);
ASSERT_EQ(CountOpNodes(g, "Const"), 3);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReduceUpsamplingDims";
const NodeDef* ra =
node_map.GetNode(absl::StrCat(p, "_", "Reshape_reshape_b"));
const NodeDef* rb = node_map.GetNode("reshape_b");
const NodeDef* t = node_map.GetNode(absl::StrCat(p, "_", "Tile_reshape_b"));
ASSERT_NE(ra, nullptr);
ASSERT_NE(rb, nullptr);
ASSERT_NE(t, nullptr);
ASSERT_EQ(rb->input_size(), 2);
EXPECT_EQ(rb->input(0), t->name());
ASSERT_EQ(t->input_size(), 2);
EXPECT_EQ(t->input(0), ra->name());
ASSERT_EQ(ra->input_size(), 2);
EXPECT_EQ(ra->input(0), "input");
{
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
{
auto result = EvaluateNodes(g, item.fetch, {{"input", tensor}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output d = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithControlDependencies(d).WithOpName("mul"), c, c);
Output mul_no_nan = ops::MulNoNan(s.WithOpName("mul_no_nan"), d, d);
Output id = ops::Identity(s.WithOpName("id"), mul);
Output id2 = ops::Identity(s.WithOpName("id2"), mul_no_nan);
GrapplerItem item;
item.fetch = {"id", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyReplaceMulWithSquare(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 6);
NodeMap node_map(&output);
const string p = "ArithmeticOptimizer/ReplaceMulWithSquare";
const NodeDef* square_node = node_map.GetNode(absl::StrCat(p, "_", "mul"));
ASSERT_NE(square_node, nullptr);
EXPECT_EQ(square_node->op(), "Square");
ASSERT_EQ(square_node->input_size(), 2);
EXPECT_EQ(square_node->input(0), "c");
EXPECT_EQ(square_node->input(1), "^d");
const NodeDef* square_node2 =
node_map.GetNode(absl::StrCat(p, "_", "mul_no_nan"));
ASSERT_NE(square_node2, nullptr);
EXPECT_EQ(square_node2->op(), "Square");
ASSERT_EQ(square_node2->input_size(), 1);
EXPECT_EQ(square_node2->input(0), "d");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c"), {b, b}, ops::Stack::Axis(2));
Output o = ops::Identity(s.WithOpName("output"), c);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplacePackWithTileReshape";
const NodeDef* t_node = node_map.GetNode(absl::StrCat(p, "_", "Tile_c"));
const NodeDef* c_node = node_map.GetNode(absl::StrCat(p, "_", "Multiples_c"));
const NodeDef* s_node = node_map.GetNode(absl::StrCat(p, "_", "Shape_c"));
const NodeDef* r_node = node_map.GetNode(absl::StrCat(p, "_", "Reshape_c"));
const NodeDef* a_node = node_map.GetNode("a");
ASSERT_NE(t_node, nullptr);
ASSERT_NE(c_node, nullptr);
ASSERT_NE(s_node, nullptr);
ASSERT_NE(r_node, nullptr);
ASSERT_NE(a_node, nullptr);
EXPECT_EQ(c_node->op(), "Const");
EXPECT_EQ(s_node->op(), "Const");
ASSERT_EQ(r_node->input_size(), 2);
EXPECT_EQ(r_node->op(), "Reshape");
EXPECT_EQ(r_node->input(0), t_node->name());
EXPECT_EQ(r_node->input(1), s_node->name());
ASSERT_EQ(t_node->input_size(), 2);
EXPECT_EQ(t_node->op(), "Tile");
EXPECT_EQ(t_node->input(0), a_node->name());
EXPECT_EQ(t_node->input(1), c_node->name());
EXPECT_EQ(t_node->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(t_node->attr().at("Tmultiples").type(),
c_node->attr().at("dtype").type());
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshapeControlDeps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output x = ops::Identity(s.WithOpName("x"), a);
Output y = ops::Identity(s.WithOpName("y"), a);
Output b = ops::Stack(s.WithOpName("b").WithControlDependencies(x), {a, a},
ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c").WithControlDependencies(y), {b, b},
ops::Stack::Axis(2));
Output o = ops::Identity(s.WithOpName("output"), c);
GrapplerItem item;
item.fetch = {"output"};
item.keep_ops = {"x", "y"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
EXPECT_EQ(CountOpNodes(g, "Identity"), 3);
NodeMap node_map(&g);
const string p = "ArithmeticOptimizer/ReplacePackWithTileReshape";
const NodeDef* t_node = node_map.GetNode(absl::StrCat(p, "_", "Tile_c"));
const NodeDef* c_node = node_map.GetNode(absl::StrCat(p, "_", "Multiples_c"));
const NodeDef* s_node = node_map.GetNode(absl::StrCat(p, "_", "Shape_c"));
const NodeDef* a_node = node_map.GetNode("a");
ASSERT_NE(t_node, nullptr);
ASSERT_NE(c_node, nullptr);
ASSERT_NE(s_node, nullptr);
ASSERT_NE(a_node, nullptr);
ASSERT_EQ(t_node->input_size(), 4);
EXPECT_EQ(t_node->op(), "Tile");
EXPECT_EQ(t_node->input(0), a_node->name());
EXPECT_EQ(t_node->input(1), c_node->name());
EXPECT_EQ(t_node->input(2), "^y");
EXPECT_EQ(t_node->input(3), "^x");
ASSERT_EQ(c_node->input_size(), 1);
EXPECT_EQ(c_node->input(0), "^a");
ASSERT_EQ(s_node->input_size(), 1);
ASSERT_EQ(s_node->input(0), "^a");
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileRemoveReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(3));
Output c = ops::Stack(s.WithOpName("c"), {b, b}, ops::Stack::Axis(2));
Output r =
ops::Reshape(s.WithOpName("r"), c, ops::Const(s, {3, 10, 14, 11}, {4}));
Output o = ops::Identity(s.WithOpName("output"), r);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 5, 7, 11}));
auto expected = EvaluateNodes(item.graph, item.fetch, {{"a", a_t}});
ASSERT_EQ(expected.size(), 1);
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 8);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 3);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 2);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
EXPECT_EQ(g.node_size(), 6);
EXPECT_EQ(CountOpNodes(g, "Pack"), 0);
EXPECT_EQ(CountOpNodes(g, "Tile"), 1);
EXPECT_EQ(CountOpNodes(g, "Const"), 2);
EXPECT_EQ(CountOpNodes(g, "Reshape"), 1);
auto result = EvaluateNodes(g, item.fetch, {{"a", a_t}});
ASSERT_EQ(result.size(), 1);
test::ExpectTensorNear<float>(result[0], expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ReplacePackWithTileReshapeOutOfRange) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({3, 5, 7, 11}));
Output b = ops::Stack(s.WithOpName("b"), {a, a}, ops::Stack::Axis(4));
Output o = ops::Identity(s.WithOpName("output"), b);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef g;
ArithmeticOptimizer optimizer;
EnableOnlyReplacePackWithTileReshape(&optimizer);
OptimizeAndPrune(&optimizer, &item, &g);
VerifyGraphsMatch(item.graph, g, __LINE__);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAdjacentNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto neg1 = ops::Neg(s.WithOpName("neg1"), c);
auto neg2 = ops::Neg(s.WithOpName("neg2"), neg1);
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), neg2);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), recip1);
auto id = ops::Identity(s.WithOpName("id"), recip2);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
ASSERT_EQ(output.node_size(), 2);
EXPECT_EQ(output.node(1).name(), "id");
ASSERT_EQ(output.node(1).input_size(), 1);
EXPECT_EQ(output.node(1).input(0), "c");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAroundValuePreservingChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(s.WithOpName("recip2"), squeeze);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 3);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "squeeze") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "c");
found++;
} else if (node.name() == "id2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "squeeze");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveInvolutionSkipControlDependencies) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
auto recip1 = ops::Reciprocal(s.WithOpName("recip1"), c);
auto id1 = ops::Identity(s.WithOpName("id1"), recip1);
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1);
auto recip2 = ops::Reciprocal(
s.WithOpName("recip2").WithControlDependencies(squeeze), c);
auto id2 = ops::Identity(s.WithOpName("id2"), recip2);
std::vector<string> fetch = {"id2"};
GrapplerItem item;
item.fetch = fetch;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveInvolution(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add"), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 5);
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
ASSERT_EQ(new_const->input_size(), 1);
EXPECT_EQ(new_const->input(0), "^x");
EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
string("\0\0\0@", 4));
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
ASSERT_EQ(new_mul->input_size(), 2);
EXPECT_EQ(new_mul->input(0), optimized_const_name);
EXPECT_EQ(new_mul->input(1), "x");
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
ASSERT_EQ(new_id->input_size(), 1);
EXPECT_EQ(new_id->input(0), optimized_mul_name);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2});
Output x = ops::Const(s.WithOpName("x"), {3.0f, 4.0f}, {1, 2});
Output add = ops::Add(s.WithOpName("add").WithControlDependencies(y), x, x);
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
std::vector<string> fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 6);
const string optimized_const_name = AggregationConstName("add");
const string optimized_mul_name = AggregationMulName("add");
const NodeDef* new_const = node_map.GetNode(optimized_const_name);
ASSERT_NE(new_const, nullptr);
ASSERT_EQ(new_const->input_size(), 1);
EXPECT_EQ(new_const->input(0), "^x");
EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
string("\0\0\0@", 4));
const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
ASSERT_NE(new_mul, nullptr);
ASSERT_EQ(new_mul->input_size(), 3);
EXPECT_EQ(new_mul->input(0), optimized_const_name);
EXPECT_EQ(new_mul->input(1), "x");
EXPECT_EQ(new_mul->input(2), "^y");
const NodeDef* new_id = node_map.GetNode("id");
ASSERT_NE(new_id, nullptr);
ASSERT_EQ(new_id->input_size(), 1);
EXPECT_EQ(new_id->input(0), optimized_mul_name);
auto tensors = EvaluateNodes(output, fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({10, 10}));
Output add = ops::Add(s.WithOpName("Add"), p, p);
Output add1 = ops::Add(s.WithOpName("Add_1"), p, p);
Output add4 = ops::Add(s.WithOpName("Add_4"), add, add1);
Output add5 = ops::Add(s.WithOpName("Add_5"), add, add1);
Output add6 = ops::Add(s.WithOpName("Add_6"), add4, add5);
Output id = ops::Identity(s.WithOpName("id"), add6);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const std::vector<string> devices{
"/device:CPU:0", "/device:GPU:0", "/device:CPU:0", "/device:GPU:1",
"/device:CPU:0", "/device:CPU:0", "/device:CPU:0",
};
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(devices[i]);
}
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
DisableAddToAddNCombining(&optimizer);
GraphDef output;
DedupAndOptimizeTwiceAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 8);
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_NE(id_node, nullptr);
ASSERT_EQ(id_node->input_size(), 1);
EXPECT_EQ(id_node->input(0), HoistMulName("Add_6"));
const NodeDef* mul_node = node_map.GetNode(HoistMulName("Add_6"));
ASSERT_NE(mul_node, nullptr);
ASSERT_EQ(mul_node->input_size(), 2);
EXPECT_EQ(mul_node->input(0), "Placeholder");
EXPECT_EQ(mul_node->input(1), HoistAddName("Add_6"));
const NodeDef* add_6_node = node_map.GetNode(HoistAddName("Add_6"));
ASSERT_NE(add_6_node, nullptr);
ASSERT_EQ(add_6_node->input_size(), 2);
EXPECT_EQ(add_6_node->input(0), HoistAddName("Add_4"));
EXPECT_EQ(add_6_node->input(1), HoistAddName("Add_5"));
const NodeDef* add_4_node = node_map.GetNode(HoistAddName("Add_4"));
ASSERT_NE(add_4_node, nullptr);
EXPECT_EQ(add_4_node->op(), "Add");
ASSERT_EQ(2, add_4_node->input_size());
EXPECT_EQ(add_4_node->input(0), AggregationConstName("Add"));
EXPECT_EQ(add_4_node->input(1), AggregationConstName("Add_1"));
const NodeDef* add_5_node = node_map.GetNode(HoistAddName("Add_5"));
ASSERT_NE(add_5_node, nullptr);
EXPECT_EQ(add_5_node->op(), "Add");
ASSERT_EQ(add_5_node->input_size(), 2);
EXPECT_EQ(add_5_node->input(0), AggregationConstName("Add"));
EXPECT_EQ(add_5_node->input(1), AggregationConstName("Add_1"));
const NodeDef* add_const_node = node_map.GetNode(AggregationConstName("Add"));
ASSERT_NE(add_const_node, nullptr);
EXPECT_EQ(add_const_node->op(), "Const");
ASSERT_EQ(add_const_node->input_size(), 1);
EXPECT_EQ(add_const_node->input(0), "^Placeholder");
const NodeDef* add_1_const_node =
node_map.GetNode(AggregationConstName("Add_1"));
ASSERT_NE(add_1_const_node, nullptr);
EXPECT_EQ(add_1_const_node->op(), "Const");
ASSERT_EQ(add_1_const_node->input_size(), 1);
EXPECT_EQ(add_1_const_node->input(0), "^Placeholder");
}
TEST_F(ArithmeticOptimizerTest, HoistFactorMul) {
for (bool matching_shapes : {true, false}) {
for (bool use_addn : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y1 = ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2});
Output y2 = matching_shapes
? ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2})
: ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1});
Output mul1 = ops::Mul(s.WithOpName("mul1"), x, y1);
Output mul2 = ops::Mul(s.WithOpName("mul2"), y2, x);
Output id =
use_addn ? ops::Identity(s.WithOpName("id"),
ops::AddN(s.WithOpName("add"), {mul1, mul2}))
: ops::Identity(s.WithOpName("id"),
ops::Add(s.WithOpName("add"), mul1, mul2));
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyHoistCommonFactor(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
if (use_addn && !matching_shapes) {
VerifyGraphsMatch(item.graph, output, __LINE__);
} else {
EXPECT_EQ(output.node_size(), 9);
const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
ASSERT_NE(new_add_node, nullptr) << "Hoisted Add node not found";
ASSERT_EQ(new_add_node->input_size(), 2);
EXPECT_EQ(new_add_node->input(0), "y1");
EXPECT_EQ(new_add_node->input(1), "y2");
const NodeDef* new_mul_node = node_map.GetNode(HoistMulName("add"));
ASSERT_NE(new_mul_node, nullptr) << "Hoisted Mul node not found";
ASSERT_EQ(new_mul_node->input_size(), 2);
EXPECT_EQ(new_mul_node->input(0), "x");
EXPECT_EQ(new_mul_node->input(1), new_add_node->name());
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_NE(id_node, nullptr) << "Id node not found";
EXPECT_EQ(id_node->name(), "id");
ASSERT_EQ(id_node->input_size(), 1);
EXPECT_EQ(id_node->input(0), HoistMulName("add"));
}
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
}
TEST_F(ArithmeticOptimizerTest, HoistFactorDiv) {
for (bool matching_shapes : {true, false}) {
for (bool use_addn : {true, false}) {
for (bool use_ints : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = use_ints
? ops::Const(s.WithOpName("x"), {1, 2}, {1, 2})
: ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y1 = use_ints
? ops::Const(s.WithOpName("y1"), {3, 4}, {1, 2})
: ops::Const(s.WithOpName("y1"), {3.0f, 4.0f}, {1, 2});
Output y2;
if (matching_shapes) {
y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5, 6}, {1, 2})
: ops::Const(s.WithOpName("y2"), {5.0f, 6.0f}, {1, 2});
} else {
y2 = use_ints ? ops::Const(s.WithOpName("y2"), {5}, {1, 1})
: ops::Const(s.WithOpName("y2"), {5.0f}, {1, 1});
}
Output div1 = ops::Div(s.WithOpName("div1"), y1, x);
Output div2 = ops::Div(s.WithOpName("div2"), y2, x);
Output id =
use_addn
? ops::Identity(s.WithOpName("id"),
ops::AddN(s.WithOpName("add"), {div1, div2}))
: ops::Identity(s.WithOpName("id"),
ops::Add(s.WithOpName("add"), div1, div2));
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyHoistCommonFactor(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
if ((use_addn && !matching_shapes) || use_ints) {
VerifyGraphsMatch(item.graph, output, __LINE__);
} else {
EXPECT_EQ(output.node_size(), 9);
const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
ASSERT_TRUE(new_add_node != nullptr) << "Hoisted Add node not found";
ASSERT_EQ(new_add_node->input_size(), 2);
EXPECT_EQ(new_add_node->input(0), "y1");
EXPECT_EQ(new_add_node->input(1), "y2");
const NodeDef* new_div_node = node_map.GetNode(HoistDivName("add"));
ASSERT_TRUE(new_div_node != nullptr) << "Hoisted Div node not found";
ASSERT_EQ(new_div_node->input_size(), 2);
EXPECT_EQ(new_div_node->input(0), new_add_node->name());
EXPECT_EQ(new_div_node->input(1), "x");
const NodeDef* id_node = node_map.GetNode("id");
ASSERT_TRUE(id_node != nullptr) << "Id node not found";
EXPECT_EQ("id", id_node->name());
ASSERT_EQ(id_node->input_size(), 1);
EXPECT_EQ(id_node->input(0), HoistDivName("add"));
}
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
if (use_ints) {
test::ExpectTensorEqual<int32>(tensors[0], tensors_expected[0]);
} else {
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
}
}
}
TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output conj = ops::Conj(s.WithOpName("conj"), z);
Output transp = ops::Transpose(s.WithOpName("trans"), conj, perm);
GrapplerItem item;
item.fetch = {"trans"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 7);
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = absl::StrCat(p, "_", "trans");
const NodeDef* trans_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(trans_fused_node, nullptr);
EXPECT_EQ(trans_fused_node->op(), "ConjugateTranspose");
ASSERT_EQ(trans_fused_node->input_size(), 2);
EXPECT_EQ(trans_fused_node->input(0), "z");
EXPECT_EQ(trans_fused_node->input(1), "perm");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output conj = ops::Conj(s.WithOpName("conj"), z);
Output transp =
ops::ConjugateTranspose(s.WithOpName("conjugate_trans"), conj, perm);
GrapplerItem item;
item.fetch = {"conjugate_trans"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 7);
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = absl::StrCat(p, "_", "conjugate_trans");
const NodeDef* conjugate_trans_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(conjugate_trans_fused_node, nullptr);
EXPECT_EQ(conjugate_trans_fused_node->op(), "Transpose");
ASSERT_EQ(conjugate_trans_fused_node->input_size(), 2);
EXPECT_EQ(conjugate_trans_fused_node->input(0), "z");
EXPECT_EQ(conjugate_trans_fused_node->input(1), "perm");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans = ops::Transpose(s.WithOpName("trans"), z, perm);
Output conj = ops::Conj(s.WithOpName("conj"), trans);
GrapplerItem item;
item.fetch = {"conj"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 7);
const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
const string optimized_name = absl::StrCat(p, "_", "conj");
const NodeDef* conj_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(conj_fused_node, nullptr);
EXPECT_EQ(conj_fused_node->op(), "ConjugateTranspose");
ASSERT_EQ(conj_fused_node->input_size(), 2);
EXPECT_EQ(conj_fused_node->input(0), "z");
EXPECT_EQ(conj_fused_node->input(1), "perm");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
for (const string matmul_type :
{"MatMul", "SparseMatMul", "BatchMatMul", "BatchMatMulV2"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output b = ops::Const(s.WithOpName("b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans_a = ops::Transpose(s.WithOpName("trans_a"), a, perm);
Output trans_b = ops::Transpose(s.WithOpName("trans_b"), b, perm);
Output matmul;
auto matmul_op = s.WithOpName("matmul");
if (matmul_type == "MatMul") {
matmul = ops::MatMul(matmul_op, trans_a, trans_b);
} else if (matmul_type == "SparseMatMul") {
matmul = ops::SparseMatMul(matmul_op, trans_a, trans_b);
} else if (matmul_type == "BatchMatMul") {
matmul = ops::BatchMatMul(matmul_op, trans_a, trans_b);
} else if (matmul_type == "BatchMatMulV2") {
matmul = ops::BatchMatMulV2(matmul_op, trans_a, trans_b);
}
auto identity = ops::Identity(s.WithOpName("identity"), matmul);
GrapplerItem item;
item.fetch = {"identity"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
EnableOnlyFoldTransposeIntoMatMul(&optimizer);
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 8);
const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
const string optimized_name = absl::StrCat(p, "_", "matmul");
const NodeDef* matmul_fused_node = node_map.GetNode(optimized_name);
ASSERT_NE(matmul_fused_node, nullptr);
ASSERT_EQ(matmul_fused_node->input_size(), 2);
EXPECT_EQ(matmul_fused_node->input(0), "a");
EXPECT_EQ(matmul_fused_node->input(1), "b");
if (matmul_type == "BatchMatMul" || matmul_type == "BatchMatMulV2") {
EXPECT_TRUE(matmul_fused_node->attr().at("adj_x").b());
EXPECT_TRUE(matmul_fused_node->attr().at("adj_y").b());
} else {
EXPECT_TRUE(matmul_fused_node->attr().at("transpose_a").b());
EXPECT_TRUE(matmul_fused_node->attr().at("transpose_b").b());
}
const NodeDef* identity_node = node_map.GetNode("identity");
ASSERT_NE(identity_node, nullptr);
ASSERT_EQ(identity_node->input_size(), 1);
EXPECT_EQ(identity_node->input(0), optimized_name);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, FoldConjugateTransposeIntoBatchMatMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re_a =
ops::Const(s.WithOpName("re_a"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im_a =
ops::Const(s.WithOpName("im_a"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2});
Output re_b =
ops::Const(s.WithOpName("re_b"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output im_b =
ops::Const(s.WithOpName("im_b"), {-5.0f, -6.0f, -7.0f, -8.0f}, {2, 2});
Output a = ops::Complex(s.WithOpName("a"), re_a, im_a);
Output b = ops::Complex(s.WithOpName("b"), re_b, im_b);
Output perm = ops::Const(s.WithOpName("perm"), {1, 0}, {2});
Output trans_a = ops::ConjugateTranspose(s.WithOpName("trans_a"), a, perm);
Output trans_b = ops::ConjugateTranspose(s.WithOpName("trans_b"), b, perm);
Output matmul = ops::BatchMatMul(s.WithOpName("matmul"), trans_a, trans_b);
Output identity = ops::Identity(s.WithOpName("identity"), matmul);
GrapplerItem item;
item.fetch = {"identity"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
ArithmeticOptimizer optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 12);
const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
const string optimized_name = absl::StrCat(p, "_", "matmul");
const NodeDef* optimized_matmul = node_map.GetNode(optimized_name);
ASSERT_NE(optimized_matmul, nullptr);
ASSERT_EQ(optimized_matmul->input_size(), 2);
EXPECT_EQ(optimized_matmul->input(0), "a");
EXPECT_EQ(optimized_matmul->input(1), "b");
EXPECT_TRUE(optimized_matmul->attr().at("adj_x").b());
EXPECT_TRUE(optimized_matmul->attr().at("adj_y").b());
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<complex64>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeIdentityReshape) {
for (bool is_broadcastto : {false, true}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28}));
Output inputs_shape = ops::Shape(s, inputs);
Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}),
ops::Const(s, {1}, {1}));
Output target_shape = ops::Concat(
s.WithOpName("target_shape"),
{batch_size, ops::Const(s, {3, 28, 28}, {3})}, ops::Const(s, {0}, {}));
if (is_broadcastto) {
Output outputs = ops::Identity(s.WithOpName("outputs"),
ops::BroadcastTo(s, inputs, target_shape));
} else {
Output outputs = ops::Identity(s.WithOpName("outputs"),
ops::Reshape(s, inputs, target_shape));
}
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28}));
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", x_t}});
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
EXPECT_EQ(CountOpNodes(output, "BroadcastTo"), 0);
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", x_t}});
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshapeIdentityReshapeBetweenSymbolicShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, -1, -1}));
Output inputs_shape = ops::Shape(s, inputs);
Output batch_size = ops::Slice(s, inputs_shape, ops::Const(s, {0}, {1}),
ops::Const(s, {1}, {1}));
Output height = ops::Slice(s, inputs_shape, ops::Const(s, {2}, {1}),
ops::Const(s, {1}, {1}));
Output width = ops::Slice(s, inputs_shape, ops::Const(s, {3}, {1}),
ops::Const(s, {1}, {1}));
Output target_shape =
ops::Concat(s.WithOpName("target_shape"),
{batch_size, ops::Const(s, {3}, {1}), height, width},
ops::Const(s, {0}, {}));
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeNotAssumeValidFeeds) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4});
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshapeAssumeValidFeedsInAggressiveMode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
Output target_shape = ops::Const(s, {4, 3, 28, 28}, {4});
Output reshape = ops::Reshape(s, inputs, target_shape);
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({4, 3, 28, 28}));
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"Placeholder", x_t}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeNotIdentityReshape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28}));
Output reshape = ops::Reshape(s, inputs, ops::Const(s, {8, -1, 28, 28}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 3, 28, 28}));
item.feed = {{"Placeholder", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest,
RemoveRedundantReshapeNotIdentityReshapeTooManyUnknownDimSizes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3}));
Output reshape = ops::Reshape(s, inputs, ops::Const(s, {-1, -1}, {2}));
Output outputs = ops::Identity(s.WithOpName("outputs"), reshape);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeCombineReshapes) {
for (bool include_unary_chain : {false, true}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output nchw_vect_c =
ops::Placeholder(s.WithOpName("nchw_vect_c"), DT_FLOAT,
ops::Placeholder::Shape({8, 3, 28, 28, 4}));
Output transpose =
ops::Transpose(s.WithOpName("transpose"), nchw_vect_c,
ops::Const(s.WithOpName("perm"), {0, 2, 3, 1, 4}, {5}));
Output nhwc = ops::Reshape(
s.WithOpName("nhwc"), transpose,
ops::Const(
s.WithControlDependencies(nchw_vect_c).WithOpName("nhwc_shape"),
{8, 28, 28, 12}, {4}));
Output flatten = ops::Reshape(
s.WithOpName("flatten"),
(include_unary_chain ? ops::Cos(s.WithOpName("Cos"), nhwc) : nhwc),
ops::Const(s.WithOpName("flatten_shape"), {8, 28 * 28 * 12}, {2}));
Output output0 = ops::Identity(s.WithOpName("output0"), flatten);
Output output1 = ops::Identity(s.WithOpName("output1"), flatten);
GraphDef graph;
TF_CHECK_OK(s.ToGraphDef(&graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 3, 28, 28, 4}));
auto eval =
EvaluateNodes(graph, {"output0", "nhwc"}, {{"nchw_vect_c", x_t}});
ASSERT_EQ(eval.size(), 2);
auto expected_output_t = eval[0];
auto nhwc_t = eval[1];
{
GrapplerItem item;
item.graph = graph;
item.fetch = {"output0", "output1"};
item.feed = {{"nchw_vect_c", x_t}};
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors[0], expected_output_t);
test::ExpectTensorEqual<float>(tensors[1], expected_output_t);
}
{
GrapplerItem item;
item.graph = graph;
item.fetch = {"output0", "output1"};
item.feed = {{"nhwc", nhwc_t}};
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 2);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors[0], expected_output_t);
test::ExpectTensorEqual<float>(tensors[1], expected_output_t);
}
{
Output output2 = ops::Identity(s.WithOpName("output2"), nhwc);
GraphDef graph;
TF_CHECK_OK(s.ToGraphDef(&graph));
GrapplerItem item;
item.graph = graph;
item.fetch = {"output0", "output1", "output2"};
item.feed = {{"nchw_vect_c", x_t}};
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantReshape(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
EXPECT_EQ(CountOpNodes(output, "Reshape"), 2);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 3);
test::ExpectTensorEqual<float>(tensors[0], expected_output_t);
test::ExpectTensorEqual<float>(tensors[1], expected_output_t);
test::ExpectTensorEqual<float>(tensors[2], nhwc_t);
}
}
}
TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastProducerIsCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_uint8 =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_fp32 = ops::Cast(s, nhwc_uint8, DT_FLOAT);
Output nchw_fp32 =
ops::Transpose(s, nhwc_fp32, ops::Const(s, {0, 3, 1, 2}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
OptimizeAndPrune(&optimizer, &item, &output);
const NodeDef* transpose_node = nullptr;
for (const NodeDef& node : output.node()) {
if (node.op() == "Transpose") {
EXPECT_EQ(transpose_node, nullptr);
EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
transpose_node = &node;
}
}
ASSERT_NE(transpose_node, nullptr);
for (const NodeDef& node : output.node()) {
if (node.op() == "Cast") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(transpose_node->name(), NodeName(node.input(0)));
}
}
auto tensors =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, ReorderS2DCastProducerIsCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output outputs =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
outputs = ops::Cast(s, outputs, DT_FLOAT);
outputs = ops::SpaceToDepth(s, outputs, 2);
outputs = ops::Identity(s.WithOpName("outputs"), outputs);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
OptimizeAndPrune(&optimizer, &item, &output);
const NodeDef* s2d_node = nullptr;
for (const NodeDef& node : output.node()) {
if (node.op() == "SpaceToDepth") {
EXPECT_EQ(s2d_node, nullptr);
EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
s2d_node = &node;
}
}
ASSERT_NE(s2d_node, nullptr);
for (const NodeDef& node : output.node()) {
if (node.op() == "Cast") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(s2d_node->name(), NodeName(node.input(0)));
}
}
auto tensors =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastProducerIsTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_fp32 =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nchw_fp32 =
ops::Transpose(s, nhwc_fp32, ops::Const(s, {0, 3, 1, 2}, {4}));
Output nchw_uint8 = ops::Cast(s, nchw_fp32, DT_UINT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_uint8);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_t =
GenerateConstantTensor<DT_FLOAT>(TensorShape({8, 28, 28, 3}), 42.0f);
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
OptimizeAndPrune(&optimizer, &item, &output);
const NodeDef* cast_node = nullptr;
for (const NodeDef& node : output.node()) {
if (node.op() == "Cast") {
EXPECT_EQ(cast_node, nullptr);
cast_node = &node;
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(NodeName(node.input(0)), "Placeholder");
}
}
ASSERT_NE(cast_node, nullptr);
for (const NodeDef& node : output.node()) {
if (node.op() == "Transpose") {
EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(cast_node->name(), NodeName(node.input(0)));
}
}
auto tensors =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<uint8>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, ReorderTransposeReverseCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_uint8 =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_fp32 = ops::Cast(s, nhwc_uint8, DT_FLOAT);
Output nhwc_fp32_reversed =
ops::Reverse(s, nhwc_fp32, ops::Const(s, {0}, {1}));
Output nchw_fp32_reversed =
ops::Transpose(s, nhwc_fp32_reversed, ops::Const(s, {0, 3, 1, 2}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32_reversed);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
OptimizeAndPrune(&optimizer, &item, &output);
const NodeDef* reverse_node = nullptr;
const NodeDef* transpose_node = nullptr;
const NodeDef* cast_node = nullptr;
for (const NodeDef& node : output.node()) {
if (node.op() == "Transpose") {
EXPECT_EQ(transpose_node, nullptr);
EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
transpose_node = &node;
} else if (node.op() == "ReverseV2") {
EXPECT_EQ(reverse_node, nullptr);
EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
reverse_node = &node;
} else if (node.op() == "Cast") {
cast_node = &node;
}
}
ASSERT_NE(cast_node, nullptr);
ASSERT_NE(reverse_node, nullptr);
ASSERT_NE(transpose_node, nullptr);
ASSERT_EQ(reverse_node->input_size(), 2);
EXPECT_EQ(NodeName(reverse_node->input(0)), "Placeholder");
ASSERT_EQ(transpose_node->input_size(), 2);
EXPECT_EQ(NodeName(transpose_node->input(0)), reverse_node->name());
ASSERT_EQ(cast_node->input_size(), 1);
EXPECT_EQ(NodeName(cast_node->input(0)), transpose_node->name());
auto tensors =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastCheckNumericsToIdentity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_uint8 =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_fp32 = ops::Cast(s, nhwc_uint8, DT_FLOAT);
Output nchw_fp32 = ops::CheckNumerics(s, nhwc_fp32, "foo");
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
}
TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCastProducerIsCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_fp32 =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nhwc_uint8 = ops::Cast(s, nhwc_fp32, DT_UINT8);
Output nchw_uint8 =
ops::Transpose(s, nhwc_uint8, ops::Const(s, {0, 3, 1, 2}, {4}));
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_uint8);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
}
TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCastProducerIsTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
Output nhwc_uint8 =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output nchw_uint8 =
ops::Transpose(s, nhwc_uint8, ops::Const(s, {0, 3, 1, 2}, {4}));
Output nchw_fp32 = ops::Cast(s, nchw_uint8, DT_FLOAT);
Output outputs = ops::Identity(s.WithOpName("outputs"), nchw_fp32);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
CompareGraphs(item.graph, output);
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4});
Output perm3 = ops::Const(s.WithOpName("perm3"), {0, 1, 2, 3}, {4});
Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm1);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), transpose1, perm2);
Output transpose3 = ops::Transpose(s.WithOpName("transpose3"), inputs, perm3);
Output id1 = ops::Identity(s.WithOpName("id1"), transpose2);
Output id2 = ops::Identity(s.WithOpName("id2"), transpose3);
GrapplerItem item;
item.fetch = {"id1", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
std::set<string> nodes_after_optimization;
for (const NodeDef& node : output.node()) {
nodes_after_optimization.insert(node.name());
}
EXPECT_EQ(nodes_after_optimization,
std::set<string>({"id1", "id2", "inputs_shape", "inputs"}));
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityConjugateTransposes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output re = ops::Const(s.WithOpName("re"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output im = ops::Const(s.WithOpName("im"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
Output z = ops::Complex(s.WithOpName("z"), re, im);
Output perm = ops::Const(s.WithOpName("perm"), {0, 1}, {2});
Output transpose = ops::ConjugateTranspose(s.WithOpName("trans"), z, perm);
Output id = ops::Identity(s.WithOpName("id"), transpose);
GrapplerItem item;
item.fetch = {"id"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 5);
const string p = "ArithmeticOptimizer/RemoveIdentityTranspose";
const string optimized_name = absl::StrCat(p, "_", "trans");
const NodeDef* conj = node_map.GetNode(optimized_name);
ASSERT_NE(conj, nullptr);
EXPECT_EQ(conj->op(), "Conj");
ASSERT_EQ(conj->input_size(), 1);
EXPECT_EQ(conj->input(0), "z");
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 9, 28, 28}, {4});
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 12, 28, 28}));
OutputList split = ops::Split(s, ops::Const(s, 1), inputs, 3).output;
Output perm1 = ops::Const(s, {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s, {0, 3, 1, 2}, {4});
Output branch0 = split[0];
Output branch1 = ops::Transpose(s, ops::Transpose(s, split[1], perm1), perm2);
Output branch2 = split[2];
Output concat = ops::Concat(s, {branch0, branch1, branch2}, ops::Const(s, 1));
Output outputs = ops::Identity(s.WithOpName("outputs"), concat);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 12, 28, 28}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
for (const NodeDef& node : output.node()) {
if (node.op() == "Concat") {
ASSERT_EQ(node.input_size(), 3);
EXPECT_EQ(node.input(0), "Split");
EXPECT_EQ(node.input(1), "Split:1");
EXPECT_EQ(node.input(2), "Split:2");
}
}
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({2, 3}));
Output transpose1 = ops::Transpose(s, inputs, ops::Const(s, {1, 0}));
Output transpose2 = ops::Transpose(s, transpose1, ops::Const(s, {1, 0}));
Output outputs =
ops::Identity(s.WithOpName("outputs").WithControlDependencies(transpose2),
ops::Const(s.WithOpName("outputs_const"), 1.0f));
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3}));
item.feed = {{"Placeholder", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* outputs_node = node_map.GetNode("outputs");
ASSERT_EQ(outputs_node->input_size(), 2);
EXPECT_EQ(outputs_node->input(0), "outputs_const");
EXPECT_EQ(outputs_node->input(1), "^Placeholder");
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, NotRemoveTransposes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm = ops::Const(s.WithOpName("perm"), {1, 2, 3, 0}, {4});
Output transpose1 = ops::Transpose(s.WithOpName("transpose1"), inputs, perm);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), transpose1, perm);
Output outputs = ops::Identity(s.WithOpName("outputs"), transpose2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 6);
}
TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesThroughChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs_shape =
ops::Const(s.WithOpName("inputs_shape"), {8, 3, 28, 28}, {4});
Output inputs =
ops::RandomUniform(s.WithOpName("inputs"), inputs_shape, DT_FLOAT);
Output perm1 = ops::Const(s.WithOpName("perm1"), {0, 2, 3, 1}, {4});
Output perm2 = ops::Const(s.WithOpName("perm2"), {0, 3, 1, 2}, {4});
Output transpose1 = ops::Transpose(
s.WithOpName("transpose1").WithControlDependencies(perm2), inputs, perm1);
Output identity = ops::Identity(s.WithOpName("id"), transpose1);
Output transpose2 =
ops::Transpose(s.WithOpName("transpose2"), identity, perm2);
Output id1 = ops::Identity(s.WithOpName("id1"), transpose2);
GrapplerItem item;
item.fetch = {"id1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
EnableOnlyRemoveIdentityTranspose(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
std::set<string> nodes_after_optimization;
for (const NodeDef& node : output.node()) {
nodes_after_optimization.insert(node.name());
if (node.name() == "id") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "inputs");
}
if (node.name() == "id1") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "id");
}
}
EXPECT_EQ(nodes_after_optimization,
std::set<string>({"id", "id1", "inputs_shape", "inputs"}));
}
TEST_F(ArithmeticOptimizerTest, FoldMulToTransposeConv) {
for (bool swap_inputs : {false, true}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({1, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs = ops::Multiply(s.WithOpName("scaled_inputs"),
swap_inputs ? scale : inputs,
swap_inputs ? inputs : scale);
Output perm_nhwc_to_nchw =
ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4});
Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"),
scaled_inputs, perm_nhwc_to_nchw);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 4}));
Output conv =
ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1},
"VALID", ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFoldMultipleIntoConv(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* folded_conv = node_map.GetNode(conv.node()->name());
ASSERT_NE(folded_conv, nullptr);
const NodeDef* folded_conv_weights =
node_map.GetNode(folded_conv->input(1));
ASSERT_NE(folded_conv_weights, nullptr);
EXPECT_EQ(folded_conv_weights->op(), "Mul");
const NodeDef* transpose =
node_map.GetNode(NodeName(folded_conv->input(0)));
ASSERT_NE(transpose, nullptr);
ASSERT_EQ(transpose->input_size(), 2);
EXPECT_EQ(transpose->input(0), "inputs");
}
}
TEST_F(ArithmeticOptimizerTest, NotFoldMulAcrossPreservedTranspose) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs =
ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale);
Output perm_nhwc_to_nchw =
ops::Const(s.WithOpName("perm_nhwc_to_nchw"), {0, 3, 1, 2}, {4});
Output inputs_nchw = ops::Transpose(s.WithOpName("inputs_nchw"),
scaled_inputs, perm_nhwc_to_nchw);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
Output conv =
ops::Conv2D(s.WithOpName("conv"), inputs_nchw, weights, {1, 1, 1, 1},
"VALID", ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
Tensor inputs_nchw_tensor(DT_FLOAT, {8, 3, 28, 28});
memset(const_cast<char*>(inputs_nchw_tensor.tensor_data().data()), 0,
inputs_nchw_tensor.tensor_data().size());
GrapplerItem item;
item.fetch = {"outputs"};
item.feed = {{"inputs_nchw", inputs_nchw_tensor}};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
NodeMap node_map(&output);
const NodeDef* inputs_nchw_node_def =
node_map.GetNode(inputs_nchw.node()->name());
ASSERT_NE(inputs_nchw_node_def, nullptr);
ASSERT_EQ(inputs_nchw_node_def->input_size(), 2);
EXPECT_EQ(NodeName(inputs_nchw_node_def->input(0)),
scaled_inputs.node()->name());
}
TEST_F(ArithmeticOptimizerTest, FoldMulToConv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_FLOAT,
ops::Placeholder::Shape({8, 28, 28, 28, 3}));
Output scale = ops::Const(s.WithOpName("scale"), 1.0f / 255.0f, {});
Output scaled_inputs =
ops::Multiply(s.WithOpName("scaled_inputs"), inputs, scale);
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 5, 3, 16}));
Output conv = ops::Conv3D(s.WithOpName("conv"), scaled_inputs, weights,
{1, 1, 1, 1, 1}, "VALID");
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
TF_EXPECT_OK(ArithmeticOptimizer().Optimize(nullptr, item, &output));
item.graph.Swap(&output);
TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output));
NodeMap node_map(&output);
const NodeDef* folded_conv = node_map.GetNode(conv.node()->name());
ASSERT_NE(folded_conv, nullptr);
ASSERT_EQ(folded_conv->input_size(), 2);
CHECK_EQ(NodeName(folded_conv->input(0)), inputs.node()->name());
const NodeDef* folded_conv_input_1 =
node_map.GetNode(NodeName(folded_conv->input(1)));
ASSERT_NE(folded_conv_input_1, nullptr);
CHECK_EQ(folded_conv_input_1->op(), "Mul");
}
TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output inputs =
ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
Output cast = ops::Cast(s, inputs, DT_FLOAT);
Output mul = ops::Mul(s, cast, ops::Const(s, 1.0f / 255.0f));
Output transpose =
ops::Transpose(s, mul, ops::Const(s.WithOpName("perm"), {0, 3, 1, 2}));
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
Output conv = ops::Conv2D(s, transpose, weights, {1, 1, 1, 1}, "VALID",
ops::Conv2D::DataFormat("NCHW"));
Output outputs = ops::Identity(s.WithOpName("outputs"), conv);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
OptimizeTwiceAndPrune(&optimizer, &item, &output, true);
NodeMap node_map(&output);
const string p = "ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_";
const string optimized_cast_name = absl::StrCat(p, "float_Cast");
const string optimized_transpose_name = absl::StrCat(p, "uint8_Transpose");
const string optimized_weights =
"ArithmeticOptimizer/FoldMultiplyIntoConv_scaled_Conv2D_weights";
const NodeDef* inputs_node = node_map.GetNode("Placeholder");
const NodeDef* transpose_node = node_map.GetNode(optimized_transpose_name);
const NodeDef* cast_node = node_map.GetNode(optimized_cast_name);
const NodeDef* weights_node = node_map.GetNode(optimized_weights);
const NodeDef* conv_node = node_map.GetNode("Conv2D");
ASSERT_NE(inputs_node, nullptr);
ASSERT_NE(transpose_node, nullptr);
ASSERT_NE(cast_node, nullptr);
ASSERT_NE(weights_node, nullptr);
ASSERT_NE(conv_node, nullptr);
EXPECT_EQ(output.node_size(), 7);
ASSERT_EQ(transpose_node->input_size(), 2);
EXPECT_EQ(transpose_node->input(0), inputs_node->name());
ASSERT_EQ(cast_node->input_size(), 1);
EXPECT_EQ(cast_node->input(0), transpose_node->name());
ASSERT_EQ(conv_node->input_size(), 2);
EXPECT_EQ(conv_node->input(0), cast_node->name());
EXPECT_EQ(conv_node->input(1), weights_node->name());
}
TEST_F(ArithmeticOptimizerTest, OptimizeMultipleMulTransposeConv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
GrapplerItem item;
Output conv[2];
for (int i = 0; i < 2; ++i) {
Output inputs =
ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 3, 28, 28}));
Output mul = ops::Mul(s, inputs, ops::Const(s, 1.0f / 255.0f));
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(127.0f, {5, 5, 3, 16}));
conv[i] = ops::Conv2D(s, mul, weights, {1, 1, 1, 1}, "VALID",
ops::Conv2D::DataFormat("NCHW"));
}
Output outputs = ops::Add(s.WithOpName("outputs"), conv[0], conv[1]);
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFoldMultipleIntoConv(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output, true);
NodeMap node_map(&output);
using absl::StrCat;
const string p = "ArithmeticOptimizer/FoldMultiplyIntoConv_";
const string optimized_weights = StrCat(p, "scaled_Conv2D_weights");
const string optimized_weights_1 = StrCat(p, "scaled_Conv2D_1_weights_1");
const NodeDef* weights_node = node_map.GetNode(optimized_weights);
const NodeDef* weights_node_1 = node_map.GetNode(optimized_weights_1);
const NodeDef* conv_node = node_map.GetNode("Conv2D");
const NodeDef* conv_node_1 = node_map.GetNode("Conv2D_1");
ASSERT_NE(weights_node, nullptr);
ASSERT_NE(weights_node_1, nullptr);
ASSERT_NE(conv_node, nullptr);
ASSERT_NE(conv_node_1, nullptr);
ASSERT_EQ(conv_node->input_size(), 2);
ASSERT_EQ(conv_node_1->input_size(), 2);
EXPECT_EQ(conv_node->input(1), weights_node->name());
EXPECT_EQ(conv_node_1->input(1), weights_node_1->name());
}
TEST_F(ArithmeticOptimizerTest, CombineBitcasts) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_UINT8,
ops::Placeholder::Shape({2, 3}));
Output bc1 = ops::Bitcast(s.WithOpName("bc1"), inputs, DT_QINT8);
Output bc2 = ops::Bitcast(s.WithOpName("bc2"), bc1, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), bc2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_UINT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantBitcast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 3);
EXPECT_EQ(CountOpNodes(output, "Bitcast"), 1);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "bc2"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8,
ops::Placeholder::Shape({2, 3}));
Output bc1 = ops::Bitcast(s, inputs, DT_QINT8);
Output bc2 = ops::Bitcast(s, bc1, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), bc2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantBitcast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 2);
EXPECT_EQ(CountOpNodes(output, "Bitcast"), 0);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output inputs = ops::Placeholder(s.WithOpName("inputs"), DT_INT8,
ops::Placeholder::Shape({2, 3}));
Output cast = ops::Cast(s, inputs, DT_INT8);
Output outputs = ops::Identity(s.WithOpName("outputs"), cast);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
item.feed = {{"inputs", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveRedundantCast(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 2);
EXPECT_EQ(CountOpNodes(output, "Cast"), 0);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddOpsOfIdenticalShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Scope sx = s.NewSubScope("x");
tensorflow::Scope sy = s.NewSubScope("y");
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_bc = ops::Add(sx.WithOpName("Add_bc"), b, c);
auto add_abc = ops::Add(sy.WithOpName("Add_abc"), a, add_bc);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 5);
NodeMap node_map(&output);
const NodeDef* collapsed_add =
node_map.GetNode("y/ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ(collapsed_add->op(), "AddN");
ASSERT_EQ(collapsed_add->input_size(), 3);
EXPECT_EQ(collapsed_add->input(0), "a");
EXPECT_EQ(collapsed_add->input(1), "b");
EXPECT_EQ(collapsed_add->input(2), "c");
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
ASSERT_EQ(updated_outputs->input_size(), 1);
EXPECT_EQ(updated_outputs->input(0), collapsed_add->name());
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMultiplePasses) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT);
auto z = ops::Variable(s.WithOpName("z"), {2, 2}, DT_FLOAT);
auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y);
auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z);
auto mul = ops::Multiply(s.WithOpName("Mul"), add_abc, add_xyz);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 10);
NodeMap node_map(&output);
const NodeDef* collapsed_left =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_left, nullptr);
EXPECT_EQ(collapsed_left->op(), "AddN");
ASSERT_EQ(collapsed_left->input_size(), 3);
EXPECT_EQ(collapsed_left->input(0), "a");
EXPECT_EQ(collapsed_left->input(1), "b");
EXPECT_EQ(collapsed_left->input(2), "c");
const NodeDef* collapsed_right =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_xyz");
ASSERT_NE(collapsed_right, nullptr);
EXPECT_EQ(collapsed_right->op(), "AddN");
ASSERT_EQ(collapsed_right->input_size(), 3);
EXPECT_EQ(collapsed_right->input(0), "x");
EXPECT_EQ(collapsed_right->input(1), "y");
EXPECT_EQ(collapsed_right->input(2), "z");
const NodeDef* updated_mul = node_map.GetNode("Mul");
ASSERT_NE(updated_mul, nullptr);
EXPECT_EQ(updated_mul->op(), "Mul");
ASSERT_EQ(updated_mul->input_size(), 2);
EXPECT_EQ(updated_mul->input(0), collapsed_left->name());
EXPECT_EQ(updated_mul->input(1), collapsed_right->name());
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddInputMultipleTimes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c);
auto add_all = ops::Add(s.WithOpName("Add_all"), add_ab, add_bc);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_all);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 5);
NodeMap node_map(&output);
const NodeDef* collapsed_add =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_all");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ(collapsed_add->op(), "AddN");
ASSERT_EQ(collapsed_add->input_size(), 4);
EXPECT_EQ(collapsed_add->input(0), "a");
EXPECT_EQ(collapsed_add->input(1), "b");
EXPECT_EQ(collapsed_add->input(2), "b");
EXPECT_EQ(collapsed_add->input(3), "c");
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddOpsOfSymbolicallyEqualShape) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input = ops::Variable(s.WithOpName("input"), {-1, 2}, DT_FLOAT);
auto a = ops::Sqrt(s.WithOpName("a"), input);
auto b = ops::Square(s.WithOpName("b"), input);
auto c = ops::Round(s.WithOpName("c"), input);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {{"input", x_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 6);
NodeMap node_map(&output);
const NodeDef* collapsed_add =
node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
ASSERT_NE(collapsed_add, nullptr);
EXPECT_EQ(collapsed_add->op(), "AddN");
ASSERT_EQ(collapsed_add->input_size(), 3);
EXPECT_EQ(collapsed_add->input(0), "a");
EXPECT_EQ(collapsed_add->input(1), "b");
EXPECT_EQ(collapsed_add->input(2), "c");
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
ASSERT_EQ(updated_outputs->input_size(), 1);
EXPECT_EQ(updated_outputs->input(0), collapsed_add->name());
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMinimizeBCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32, 32, 32}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto x = ops::Variable(s.WithOpName("x"), {32}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {32, 32}, DT_FLOAT);
auto z = ops::Variable(s.WithOpName("z"), {32, 32, 32}, DT_FLOAT);
auto add_xy = ops::Add(s.WithOpName("Add_xy"), x, y);
auto add_xyz = ops::Add(s.WithOpName("Add_xyz"), add_xy, z);
auto add_all = ops::Add(s.WithOpName("AddAll"), add_abc, add_xyz);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_all);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32}));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto z_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32, 32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 12);
NodeMap node_map(&output);
string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_AddAll";
string outer_0_add_name =
"ArithmeticOptimizer/AddOpsRewrite_Internal_0_AddAll";
string inner_0_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_AddAll";
string inner_1_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_1_AddAll";
string inner_2_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_2_AddAll";
const NodeDef* add_ax_node = node_map.GetNode(inner_0_add_name);
ASSERT_NE(add_ax_node, nullptr);
EXPECT_EQ(add_ax_node->op(), "AddN");
ASSERT_EQ(add_ax_node->input_size(), 2);
EXPECT_EQ(add_ax_node->input(0), "a");
EXPECT_EQ(add_ax_node->input(1), "x");
const NodeDef* add_by_node = node_map.GetNode(inner_1_add_name);
ASSERT_NE(add_by_node, nullptr);
EXPECT_EQ(add_by_node->op(), "AddN");
ASSERT_EQ(2, add_by_node->input_size());
EXPECT_EQ(add_by_node->input(0), "b");
EXPECT_EQ(add_by_node->input(1), "y");
const NodeDef* add_cz_node = node_map.GetNode(inner_2_add_name);
ASSERT_NE(add_cz_node, nullptr);
EXPECT_EQ(add_cz_node->op(), "AddN");
ASSERT_EQ(add_cz_node->input_size(), 2);
EXPECT_EQ(add_cz_node->input(0), "c");
EXPECT_EQ(add_cz_node->input(1), "z");
const NodeDef* outer_0_node = node_map.GetNode(outer_0_add_name);
ASSERT_NE(outer_0_node, nullptr);
EXPECT_EQ(outer_0_node->op(), "AddV2");
ASSERT_EQ(outer_0_node->input_size(), 2);
EXPECT_EQ(outer_0_node->input(0), inner_0_add_name);
EXPECT_EQ(outer_0_node->input(1), inner_1_add_name);
const NodeDef* outer_node = node_map.GetNode(outer_add_name);
ASSERT_NE(outer_node, nullptr);
EXPECT_EQ(outer_node->op(), "AddV2");
ASSERT_EQ(outer_node->input_size(), 2);
EXPECT_EQ(outer_node->input(0), outer_0_add_name);
EXPECT_EQ(outer_node->input(1), inner_2_add_name);
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
ASSERT_EQ(updated_outputs->input_size(), 1);
EXPECT_EQ(updated_outputs->input(0), outer_add_name);
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMinimizeBCastWithSymbolicShapes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto small = ops::Variable(s.WithOpName("small"), {-1, 1, 1}, DT_DOUBLE);
auto d = "/device:CPU:0";
auto v = ops::Variable(s.WithOpName("v"), {1, 32, 32}, DT_DOUBLE);
auto large = ops::Add(s.WithOpName("large").WithDevice(d), small, v);
auto a = ops::Sqrt(s.WithOpName("a"), small);
auto b = ops::Square(s.WithOpName("b"), large);
auto c = ops::Round(s.WithOpName("c"), small);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_abc = ops::Add(s.WithOpName("Add_abc"), add_ab, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), add_abc);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto s_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({8, 1, 1}));
auto v_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({1, 32, 32}));
std::vector<std::pair<string, Tensor>> feed = {{"small", s_t}, {"v", v_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyAddToAddNCombining(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 9);
NodeMap node_map(&output);
string outer_add_name = "ArithmeticOptimizer/AddOpsRewrite_Add_abc";
string inner_add_name = "ArithmeticOptimizer/AddOpsRewrite_Leaf_0_Add_abc";
const NodeDef* outer_add = node_map.GetNode(outer_add_name);
ASSERT_NE(outer_add, nullptr);
EXPECT_EQ(outer_add->op(), "AddV2");
ASSERT_EQ(outer_add->input_size(), 2);
EXPECT_EQ(outer_add->input(0), inner_add_name);
EXPECT_EQ(outer_add->input(1), "b");
const NodeDef* inner_add = node_map.GetNode(inner_add_name);
ASSERT_NE(inner_add, nullptr);
ASSERT_EQ(inner_add->input_size(), 2);
EXPECT_EQ(inner_add->input(0), "a");
EXPECT_EQ(inner_add->input(1), "c");
const NodeDef* updated_outputs = node_map.GetNode("outputs");
ASSERT_NE(updated_outputs, nullptr);
ASSERT_EQ(updated_outputs->input_size(), 1);
EXPECT_EQ(updated_outputs->input(0), outer_add_name);
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveNegation) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Variable(s.WithOpName("x"), {2, 2}, DT_FLOAT);
auto y = ops::Variable(s.WithOpName("y"), {2, 2}, DT_FLOAT);
Output neg_x = ops::Neg(s.WithOpName("Neg_x"), x);
Output neg_y = ops::Neg(s.WithOpName("Neg_y"), y);
Output add_x_y = ops::Add(s.WithOpName("Add_x_y"), x, y);
Output add_negx_y = ops::Add(s.WithOpName("Add_negx_y"), neg_x, y);
Output add_x_negy = ops::Add(s.WithOpName("Add_x_negy"), x, neg_y);
Output add_negx_negy = ops::Add(s.WithOpName("Add_negx_negy"), neg_x, neg_y);
Output sub_x_y = ops::Sub(s.WithOpName("Sub_x_y"), x, y);
Output sub_negx_y = ops::Sub(s.WithOpName("Sub_negx_y"), neg_x, y);
Output sub_x_negy = ops::Sub(s.WithOpName("Sub_x_negy"), x, neg_y);
Output sub_negx_negy = ops::Sub(s.WithOpName("Sub_negx_negy"), neg_x, neg_y);
Output neg_x_with_dep = ops::Neg(
s.WithOpName("Neg_x_with_dep").WithControlDependencies({add_x_y}), x);
Output add_negx_with_dep_y =
ops::Add(s.WithOpName("Add_negx_with_dep_y"), neg_x_with_dep, y);
auto add_all =
ops::AddN(s.WithOpName("add_all"),
{add_x_y, add_negx_y, add_x_negy, add_negx_negy, sub_x_y,
sub_negx_y, sub_x_negy, sub_negx_negy, add_negx_with_dep_y});
GrapplerItem item;
item.fetch = {"add_all"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
std::vector<std::pair<string, Tensor>> feed = {{"x", x_t}, {"y", y_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveNegation(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), item.graph.node_size());
int found = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "Add_negx_y") {
++found;
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "y");
EXPECT_EQ(node.input(1), "x");
} else if (node.name() == "Add_x_negy") {
++found;
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
EXPECT_EQ(node.input(1), "y");
} else if (node.name() == "Add_negx_negy") {
++found;
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "Neg_x");
EXPECT_EQ(node.input(1), "y");
} else if (node.name() == "Sub_x_negy") {
++found;
EXPECT_EQ(node.op(), "AddV2");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
EXPECT_EQ(node.input(1), "y");
} else if (node.name() == "Sub_negx_negy") {
++found;
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "y");
EXPECT_EQ(node.input(1), "x");
} else if (node.name() == "Add_negx_with_dep_y") {
++found;
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 3);
EXPECT_EQ(node.input(0), "y");
EXPECT_EQ(node.input(1), "x");
EXPECT_EQ(node.input(2), "^Add_x_y");
}
}
EXPECT_EQ(found, 6);
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
Output sqrt_y = ops::Sqrt(s.WithOpName("sqrt_y"), y);
Output div_x_sqrt_y = ops::Div(s.WithOpName("output"), x, sqrt_y);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySqrtDivToRsqrtMul(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ(node.op(), "Mul");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
EXPECT_EQ(node.input(1), "sqrt_y");
} else if (node.name() == "sqrt_y") {
EXPECT_EQ(node.op(), "Rsqrt");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "y");
}
}
}
TEST_F(ArithmeticOptimizerTest, DoNotConvertSqrtDivToRsqrtMulDivisorFetchNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output floats = ops::Const(s.WithOpName("floats"),
{0.7423212f, 0.19757693f, 0.53124744f}, {1, 3});
Output output0 = ops::Sqrt(s.WithOpName("output0"), floats);
Output const1 = ops::Const(s.WithOpName("const1"), 1.0f, {3});
Output mul1 = ops::Multiply(s.WithOpName("mul1"), const1, 0.5f);
Output grad = ops::Div(s.WithOpName("grad"), mul1, output0);
GrapplerItem item;
item.fetch = {"grad", "output0"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySqrtDivToRsqrtMul(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 2);
for (int i = 0; i < tensors.size(); i++) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
EXPECT_EQ(output.node_size(), item.graph.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "grad") {
EXPECT_EQ(node.op(), "Div");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "mul1");
EXPECT_EQ(node.input(1), "output0");
} else if (node.name() == "output0") {
EXPECT_EQ(node.op(), "Sqrt");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "floats");
}
}
}
TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMulExcludeFloorDiv) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
Output sqrt_y = ops::Sqrt(s.WithOpName("sqrt_y"), y);
Output div_x_sqrt_y = ops::FloorDiv(s.WithOpName("output"), x, sqrt_y);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySqrtDivToRsqrtMul(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ(node.op(), "FloorDiv");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
EXPECT_EQ(node.input(1), "sqrt_y");
} else if (node.name() == "sqrt_y") {
EXPECT_EQ(node.op(), "Sqrt");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "y");
}
}
}
TEST_F(ArithmeticOptimizerTest, FuseSquaredDiff) {
for (bool is_complex : {false, true}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
Output complex_x = ops::Complex(s.WithOpName("complex_x"), x, x);
Output complex_y = ops::Complex(s.WithOpName("complex_y"), y, y);
Output sub_x_y =
is_complex ? ops::Sub(s.WithOpName("sub_x_y"), complex_x, complex_y)
: ops::Sub(s.WithOpName("sub_x_y"), x, y);
Output square_sub_x_y = ops::Square(s.WithOpName("output"), sub_x_y);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFuseSquaredDiff(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
const auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
if (is_complex) {
test::ExpectTensorNear<std::complex<float>>(tensors[0],
tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
} else {
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size() - 2);
}
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ(node.op(), is_complex ? "Square" : "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "sub_x_y");
} else if (node.name() == "sub_x_y") {
EXPECT_EQ(node.op(), is_complex ? "Sub" : "SquaredDifference");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), is_complex ? "complex_x" : "x");
EXPECT_EQ(node.input(1), is_complex ? "complex_y" : "y");
}
}
}
}
TEST_F(ArithmeticOptimizerTest, DoNotFuseSquaredDiffFetchNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
Output sub_x_y = ops::Sub(s.WithOpName("sub_x_y"), x, y);
Output square_sub_x_y = ops::Square(s.WithOpName("output"), sub_x_y);
GrapplerItem item;
item.fetch = {"output", "sub_x_y"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyFuseSquaredDiff(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
const auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 2);
for (int i = 0; i < tensors.size(); i++) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
EXPECT_EQ(output.node_size(), item.graph.node_size());
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ(node.op(), "Square");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "sub_x_y");
} else if (node.name() == "sub_x_y") {
EXPECT_EQ(node.op(), "Sub");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
EXPECT_EQ(node.input(1), "y");
}
}
}
TEST_F(ArithmeticOptimizerTest, ConvertLogSoftmax) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output softmax = ops::Softmax(s.WithOpName("softmax"), x);
Output logsoftmax = ops::Log(s.WithOpName("output"), softmax);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyLogSoftmax(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
const auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size() - 1);
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "output") {
EXPECT_EQ(node.op(), "LogSoftmax");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "x");
}
}
}
TEST_F(ArithmeticOptimizerTest, DoNotConvertLogSoftmaxArgFetchNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output floats = ops::Const(s.WithOpName("floats"),
{0.7423212f, 0.19757693f, 0.53124744f}, {1, 3});
Output softmax = ops::Softmax(s.WithOpName("softmax"), floats);
Output final_output = ops::Log(s.WithOpName("final_output"), softmax);
GrapplerItem item;
item.fetch = {"softmax", "final_output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyLogSoftmax(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
const auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 2);
VerifyGraphsMatch(item.graph, output, __LINE__);
for (int i = 0; i < tensors.size(); i++) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, ConvertPow) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
auto y2 = ops::Const(s.WithOpName("y2"), {2.0f, 2.0f}, {1, 2});
auto y3 = ops::Const(s.WithOpName("y3"), {3.0f, 3.0f}, {1, 2});
auto y1 = ops::Const(s.WithOpName("y1"), {1.0f, 1.0f}, {1, 2});
auto yPoint5 = ops::Const(s.WithOpName("y.5"), {0.5f, 0.5f}, {1, 2});
auto y0 = ops::Const(s.WithOpName("y0"), {0.0f, 0.0f}, {1, 2});
auto y_Point5 = ops::Const(s.WithOpName("y_.5"), {-0.5f, -0.5f}, {1, 2});
auto y_1 = ops::Const(s.WithOpName("y_1"), {-1.0f, -1.0f}, {1, 2});
auto y = ops::Const(s.WithOpName("y"), {3.0f, 4.0f}, {1, 2});
auto z = ops::Const(s.WithOpName("z"), {42.0f}, {});
auto ones = ops::Const(s.WithOpName("ones"), {1.0f, 1.0f, 1.0f}, {1, 3});
auto zeros = ops::Const(s.WithOpName("zeros"), {0.0f, 0.0f, 0.0f}, {1, 3});
Output out2 = ops::Pow(s.WithOpName("out2"), x, y2);
Output out3 =
ops::Pow(s.WithOpName("out3").WithDevice("/device:CPU:0"), x, y3);
Output out1 = ops::Pow(s.WithOpName("out1"), x, y1);
Output outPoint5 = ops::Pow(s.WithOpName("out.5"), x, yPoint5);
Output out0 = ops::Pow(s.WithOpName("out0"), x, y0);
Output out_Point5 = ops::Pow(s.WithOpName("out_.5"), x, y_Point5);
Output out_1 = ops::Pow(s.WithOpName("out_1"), x, y_1);
Output out = ops::Pow(s.WithOpName("out"), x, y);
Output out_bcast1 = ops::Pow(s.WithOpName("out_bcast1"), z, ones);
Output out_bcast2 = ops::Pow(s.WithOpName("out_bcast2"), z, zeros);
GrapplerItem item;
item.fetch = {"out2", "out3", "out1", "out.5", "out0",
"out_.5", "out_1", "out", "out_bcast1", "out_bcast2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 10);
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyConvertPow(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
ASSERT_EQ(tensors.size(), 10);
for (int i = 0; i < tensors.size(); ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x", "Const", {}, {}, &want);
AddNode("y", "Const", {}, {}, &want);
AddNode("z", "Const", {}, {}, &want);
AddNode("ones", "Const", {}, {}, &want);
AddNode("zeros", "Const", {}, {}, &want);
AddNode("out2", "Square", {"x"}, {}, &want);
AddNode("ArithmeticOptimizer/ConvertPow__inner_out3", "Square", {"x"}, {},
&want)
->set_device("/device:CPU:0");
AddNode("out3", "Mul", {"x", "ArithmeticOptimizer/ConvertPow__inner_out3"},
{}, &want)
->set_device("/device:CPU:0");
AddNode("out1", "Identity", {"x"}, {}, &want);
AddNode("out.5", "Sqrt", {"x"}, {}, &want);
AddNode("out0", "Const", {AsControlDependency("x")}, {}, &want);
AddNode("out_.5", "Rsqrt", {"x"}, {}, &want);
AddNode("out_1", "Reciprocal", {"x"}, {}, &want);
AddNode("out", "Pow", {"x", "y"}, {}, &want);
AddNode("out_bcast1", "Pow", {"z", "ones"}, {}, &want);
AddNode("out_bcast2", "Pow", {"z", "zeros"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, Log1p) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x1 = ops::Const(s.WithOpName("x1"), {1.0f, 1.0f}, {1, 2});
auto x2 = ops::Const(s.WithOpName("x2"), {2.0f, 2.0f}, {1, 2});
auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2});
auto a12 = ops::Add(s.WithOpName("a12").WithControlDependencies(x3), x1, x2);
auto a23 = ops::Add(s.WithOpName("a23"), x2, x3);
Output out1 = ops::Log(s.WithOpName("out1"), a12);
Output out2 = ops::Log(s.WithOpName("out2"), a23);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyLog1p(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
ASSERT_EQ(tensors.size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x2", "Const", {}, {}, &want);
AddNode("x3", "Const", {}, {}, &want);
AddNode("a23", "Add", {"x2", "x3"}, {}, &want);
AddNode("out1", "Log1p", {"x2", AsControlDependency("x3")}, {}, &want);
AddNode("out2", "Log", {"a23"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, Expm1) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x1 = ops::Const(s.WithOpName("x1"), {2.0f, 2.0f}, {1, 2});
auto x2 = ops::Const(s.WithOpName("x2"), {1.0f, 1.0f}, {1, 2});
auto x3 = ops::Const(s.WithOpName("x3"), {3.0f, 3.0f}, {1, 2});
auto exp1 = ops::Exp(s.WithOpName("exp1").WithControlDependencies(x3), x1);
Output out1 = ops::Sub(s.WithOpName("out1"), exp1, x2);
Output out2 = ops::Sub(s.WithOpName("out2"), exp1, x3);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 2);
GraphDef got;
ArithmeticOptimizer optimizer;
EnableOnlyExpm1(&optimizer);
OptimizeAndPrune(&optimizer, &item, &got);
auto tensors = EvaluateNodes(got, item.fetch);
ASSERT_EQ(tensors.size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
GraphDef want;
AddNode("x1", "Const", {}, {}, &want);
AddNode("x3", "Const", {}, {}, &want);
AddNode("exp1", "Exp", {"x1", AsControlDependency("x3")}, {}, &want);
AddNode("out1", "Expm1", {"x1", AsControlDependency("x3")}, {}, &want);
AddNode("out2", "Sub", {"exp1", "x3"}, {}, &want);
CompareGraphs(want, got);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_SimpleSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul2);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul1");
ASSERT_NE(mul1_node, nullptr);
ASSERT_EQ(mul1_node->input_size(), 2);
EXPECT_EQ(mul1_node->input(0), "a");
EXPECT_EQ(mul1_node->input(1), "c");
const NodeDef* mul2_node = node_map.GetNode("mul2");
ASSERT_NE(mul2_node, nullptr);
ASSERT_EQ(mul2_node->input_size(), 2);
EXPECT_EQ(mul2_node->input(0), "mul1");
EXPECT_EQ(mul2_node->input(1), "b");
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_DOUBLE);
auto b = ops::Variable(s.WithOpName("b"), {32, 32}, DT_DOUBLE);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_DOUBLE);
auto d = ops::Variable(s.WithOpName("d"), {32}, DT_DOUBLE);
auto e = ops::Variable(s.WithOpName("e"), {32}, DT_DOUBLE);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), mul1, c);
auto mul3 = ops::Mul(s.WithOpName("mul3"), mul2, d);
auto mul4 = ops::Mul(s.WithOpName("mul4"), mul3, e);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul4);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32, 32}));
auto c_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto d_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
auto e_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"d", d_t}, {"e", e_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul1");
ASSERT_NE(mul1_node, nullptr);
ASSERT_EQ(mul1_node->input_size(), 2);
EXPECT_EQ(mul1_node->input(0), "a");
EXPECT_EQ(mul1_node->input(1), "c");
const NodeDef* mul2_node = node_map.GetNode("mul2");
ASSERT_NE(mul2_node, nullptr);
ASSERT_EQ(mul2_node->input_size(), 2);
EXPECT_EQ(mul2_node->input(0), "d");
EXPECT_EQ(mul2_node->input(1), "e");
const NodeDef* mul3_node = node_map.GetNode("mul3");
ASSERT_NE(mul3_node, nullptr);
ASSERT_EQ(mul3_node->input_size(), 2);
EXPECT_EQ(mul3_node->input(0), "mul1");
EXPECT_EQ(mul3_node->input(1), "mul2");
const NodeDef* mul4_node = node_map.GetNode("mul4");
ASSERT_NE(mul4_node, nullptr);
ASSERT_EQ(mul4_node->input_size(), 2);
EXPECT_EQ(mul4_node->input(0), "mul3");
EXPECT_EQ(mul4_node->input(1), "b");
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {32}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {32}, DT_FLOAT);
auto d = ops::Variable(s.WithOpName("D"), {32, 32}, DT_FLOAT);
auto mul1 = ops::Mul(s.WithOpName("mul1"), a, b);
auto mul2 = ops::Mul(s.WithOpName("mul2"), c, d);
auto mul3 = ops::Mul(s.WithOpName("mul3"), mul1, mul2);
auto outputs = ops::Identity(s.WithOpName("outputs"), mul3);
GrapplerItem item;
item.fetch = {"outputs"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto a_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto b_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto c_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32}));
auto d_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({32, 32}));
std::vector<std::pair<string, Tensor>> feed = {
{"a", a_t}, {"b", b_t}, {"c", c_t}, {"D", d_t}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyMinimizeBroadcasts(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
NodeMap node_map(&output);
const NodeDef* mul1_node = node_map.GetNode("mul2");
ASSERT_NE(mul1_node, nullptr);
ASSERT_EQ(mul1_node->input_size(), 2);
EXPECT_EQ(mul1_node->input(0), "a");
EXPECT_EQ(mul1_node->input(1), "b");
const NodeDef* mul2_node = node_map.GetNode("mul1");
ASSERT_NE(mul2_node, nullptr);
ASSERT_EQ(mul2_node->input_size(), 2);
EXPECT_EQ(mul2_node->input(0), "mul2");
EXPECT_EQ(mul2_node->input(1), "c");
const NodeDef* mul3_node = node_map.GetNode("mul3");
ASSERT_NE(mul3_node, nullptr);
ASSERT_EQ(mul3_node->input_size(), 2);
EXPECT_EQ(mul3_node->input(0), "D");
EXPECT_EQ(mul3_node->input(1), "mul1");
auto tensors = EvaluateNodes(output, item.fetch, feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, DoNotHoistReluFromConcat) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output weights1 = ops::Const(s.WithOpName("weights1"),
Input::Initializer(1.0f, {5, 5, 3, 4}));
Output weights2 = ops::Const(s.WithOpName("weights2"),
Input::Initializer(2.0f, {5, 5, 3, 4}));
Output biases =
ops::Const(s.WithOpName("biases"), Input::Initializer(2.0f, {4}));
Output axis = ops::Const(s.WithOpName("axis"), 3, {});
Output input = ops::Const(s.WithOpName("input"),
Input::Initializer(1.0f, {1, 28, 28, 3}));
Output branch1 =
ops::Conv2D(s.WithOpName("conv1"), input, weights1, {1, 1, 1, 1}, "SAME");
branch1 = ops::BiasAdd(s.WithOpName("biasadd1"), branch1, biases);
branch1 = ops::Relu(s.WithOpName("relu1"), branch1);
Output branch2 =
ops::Conv2D(s.WithOpName("conv2"), input, weights2, {1, 1, 1, 1}, "SAME");
branch2 = ops::BiasAdd(s.WithOpName("biasadd2"), branch2, biases);
branch2 = ops::Relu(s.WithOpName("relu2"), branch2);
Output concat = ops::Concat(s.WithOpName("concat"), {branch1, branch2}, axis);
Output output = ops::Identity(s.WithOpName("output"), concat);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef new_graph;
ArithmeticOptimizer optimizer;
OptimizeAndPrune(&optimizer, &item, &new_graph);
EXPECT_EQ(CountOpNodes(new_graph, "Relu"), 2);
auto tensors = EvaluateNodes(new_graph, item.fetch);
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryFromConcat) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output b = ops::Const(s.WithOpName("b"), 1.0f, {32});
Output c = ops::Const(s.WithOpName("c"), 42.0f, {32});
Output axis = ops::Const(s.WithOpName("axis"), 0, {});
Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {});
Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {});
Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {});
Output sin_a =
ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl3), a);
Output exp_a =
ops::Exp(s.WithOpName("exp_a").WithControlDependencies(ctrl1), sin_a);
Output exp_b = ops::Exp(s.WithOpName("exp_b"), b);
Output exp_c =
ops::Exp(s.WithOpName("exp_c").WithControlDependencies(ctrl2), c);
Output concat =
ops::Concat(s.WithOpName("concat"), {exp_a, exp_b, exp_c}, axis);
Output id = ops::Identity(s.WithOpName("id"), concat);
Output exp_a2 =
ops::Exp(s.WithOpName("exp_a2").WithControlDependencies(ctrl1), sin_a);
Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), b);
Output exp_c2 =
ops::Exp(s.WithOpName("exp_c2").WithControlDependencies(ctrl2), c);
Output cos_exp_a2 = ops::Cos(
s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl1), exp_a2);
Output cos_exp_b2 = ops::Cos(
s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2);
Output cos_exp_c2 = ops::Cos(s.WithOpName("cos_exp_c2"), exp_c2);
Output concat2 = ops::Concat(s.WithOpName("concat2"),
{cos_exp_a2, cos_exp_b2, cos_exp_c2}, axis);
Output id2 = ops::Identity(s.WithOpName("id2"), concat2);
GrapplerItem item;
item.fetch = {"id", "id2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyHoistCWiseUnaryChains(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "concat") {
ASSERT_EQ(node.input_size(), 4);
EXPECT_EQ(node.input(0), "sin_a");
EXPECT_EQ(node.input(1), "b");
EXPECT_EQ(node.input(2), "c");
EXPECT_EQ(node.input(3), "axis");
found++;
}
if (node.name() == "exp_a") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "concat");
found++;
}
if (node.name() == "id") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "exp_a");
found++;
}
if (node.name() == "concat2") {
ASSERT_EQ(node.input_size(), 4);
EXPECT_EQ(node.input(0), "sin_a");
EXPECT_EQ(node.input(1), "b");
EXPECT_EQ(node.input(2), "c");
EXPECT_EQ(node.input(3), "axis");
found++;
}
if (node.name() == "exp_a2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "concat2");
found++;
}
if (node.name() == "cos_exp_a2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "exp_a2");
found++;
}
if (node.name() == "id2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "cos_exp_a2");
found++;
}
}
EXPECT_EQ(found, 7);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryIntoSplit) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), 3.1415f, {32});
Output axis = ops::Const(s.WithOpName("axis"), 0, {});
Output ctrl1 = ops::Const(s.WithOpName("ctrl1"), 1, {});
Output ctrl2 = ops::Const(s.WithOpName("ctrl2"), 2, {});
Output ctrl3 = ops::Const(s.WithOpName("ctrl3"), 3, {});
ops::Split split1(s.WithOpName("split1"), axis, x, 2);
Output sin_a =
ops::Sin(s.WithOpName("sin_a").WithControlDependencies(ctrl1), split1[0]);
Output id_a = ops::Identity(s.WithOpName("id_a"), sin_a);
Output sin_b = ops::Sin(s.WithOpName("sin_b"), split1[1]);
Output exp_b = ops::Exp(s.WithOpName("exp_b"), sin_b);
Output id_b = ops::Identity(s.WithOpName("id_b"), exp_b);
Output size_splits2 = ops::Const(s.WithOpName("size_splits2"), {20, 12}, {2});
ops::SplitV split2(s.WithOpName("split2"), x, size_splits2, axis, 2);
Output exp_a2 = ops::Exp(
s.WithOpName("exp_a2").WithControlDependencies(ctrl1), split2[0]);
Output exp_b2 = ops::Exp(s.WithOpName("exp_b2"), split2[1]);
Output cos_exp_a2 = ops::Cos(
s.WithOpName("cos_exp_a2").WithControlDependencies(ctrl2), exp_a2);
Output cos_exp_b2 = ops::Cos(
s.WithOpName("cos_exp_b2").WithControlDependencies(ctrl3), exp_b2);
Output id_a2 = ops::Identity(s.WithOpName("id_a2"), cos_exp_a2);
Output id_b2 = ops::Identity(s.WithOpName("id_b2"), cos_exp_b2);
GrapplerItem item;
item.fetch = {"id_a", "id_b", "id_a2", "id_b2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyHoistCWiseUnaryChains(&optimizer);
OptimizeTwiceAndPrune(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.name(), "sin_a");
EXPECT_NE(node.name(), "sin_b");
EXPECT_NE(node.name(), "exp_a2");
EXPECT_NE(node.name(), "exp_b2");
EXPECT_NE(node.name(), "cos_exp_a2");
EXPECT_NE(node.name(), "cos_exp_b2");
if (node.name() == "split1") {
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "axis");
EXPECT_EQ(node.input(1), "ArithmeticOptimizer/_sin_a_split1");
found++;
}
if (node.name() == "ArithmeticOptimizer/_sin_a_split1") {
EXPECT_EQ(node.op(), "Sin");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "x");
found++;
}
if (node.name() == "id_a") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "split1");
found++;
}
if (node.name() == "exp_b") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "split1:1");
found++;
}
if (node.name() == "id_b") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "exp_b");
found++;
}
if (node.name() == "ArithmeticOptimizer/_exp_a2_split2") {
EXPECT_EQ(node.op(), "Exp");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "x");
found++;
}
if (node.name() == "ArithmeticOptimizer/_cos_exp_a2_split2") {
EXPECT_EQ(node.op(), "Cos");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "ArithmeticOptimizer/_exp_a2_split2");
found++;
}
if (node.name() == "split2") {
ASSERT_EQ(node.input_size(), 3);
EXPECT_EQ(node.input(0), "ArithmeticOptimizer/_cos_exp_a2_split2");
EXPECT_EQ(node.input(1), "size_splits2");
EXPECT_EQ(node.input(2), "axis");
found++;
}
if (node.name() == "id_a2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "split2");
found++;
}
if (node.name() == "id_b2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "split2:1");
found++;
}
}
EXPECT_EQ(found, 10);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, RemoveIdempotent) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output sn1 = ops::Snapshot(s.WithOpName("sn1"), a);
Output sn2 = ops::Snapshot(s.WithOpName("sn2"), sn1);
Output out1 = ops::Identity(s.WithOpName("out1"), sn2);
Output id1 = ops::Identity(s.WithOpName("id1"), a);
Output id2 = ops::Identity(s.WithOpName("id2"), id1);
Output out2 = ops::Identity(s.WithOpName("out2"), id2);
GrapplerItem item;
item.fetch = {"out1", "out2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveIdempotent(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
EXPECT_EQ(7, output.node_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "out1") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "sn1");
found++;
} else if (node.name() == "out2") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "id1");
found++;
} else if (node.name() == "sn1") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "a");
found++;
}
}
EXPECT_EQ(found, 3);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
}
}
TEST_F(ArithmeticOptimizerTest, RemoveLogicalNot) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32});
Output b = ops::Const(s.WithOpName("b"), -3.14f, {32});
Output eq = ops::Equal(s.WithOpName("eq"), a, b);
Output neq = ops::NotEqual(s.WithOpName("neq"), a, b);
Output lt = ops::Less(s.WithOpName("lt"), a, b);
Output le = ops::LessEqual(s.WithOpName("le"), a, b);
Output gt = ops::Greater(s.WithOpName("gt"), a, b);
Output ge = ops::GreaterEqual(s.WithOpName("ge"), a, b);
Output not_eq1 = ops::LogicalNot(s.WithOpName("not_eq1"), eq);
Output not_neq = ops::LogicalNot(s.WithOpName("not_neq"), neq);
Output not_lt = ops::LogicalNot(s.WithOpName("not_lt"), lt);
Output not_le = ops::LogicalNot(s.WithOpName("not_le"), le);
Output not_gt = ops::LogicalNot(s.WithOpName("not_gt"), gt);
Output not_ge = ops::LogicalNot(s.WithOpName("not_ge"), ge);
Output id_not_eq = ops::Identity(s.WithOpName("id_not_eq"), not_eq1);
Output id_not_neq = ops::Identity(s.WithOpName("id_not_neq"), not_neq);
Output id_not_lt = ops::Identity(s.WithOpName("id_not_lt"), not_lt);
Output id_not_le = ops::Identity(s.WithOpName("id_not_le"), not_le);
Output id_not_gt = ops::Identity(s.WithOpName("id_not_gt"), not_gt);
Output id_not_ge = ops::Identity(s.WithOpName("id_not_ge"), not_ge);
GrapplerItem item;
item.fetch = {"id_not_eq", "id_not_neq", "id_not_lt",
"id_not_le", "id_not_gt", "id_not_ge"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveLogicalNot(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "id_not_eq") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "eq");
++found;
}
if (node.name() == "id_not_neq") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "neq");
++found;
}
if (node.name() == "id_not_lt") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "lt");
++found;
}
if (node.name() == "id_not_le") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "le");
++found;
}
if (node.name() == "id_not_gt") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "gt");
++found;
}
if (node.name() == "id_not_ge") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "ge");
++found;
}
if (node.name() == "eq") {
EXPECT_EQ(node.op(), "NotEqual");
++found;
}
if (node.name() == "neq") {
EXPECT_EQ(node.op(), "Equal");
++found;
}
if (node.name() == "lt") {
EXPECT_EQ(node.op(), "GreaterEqual");
++found;
}
if (node.name() == "le") {
EXPECT_EQ(node.op(), "Greater");
++found;
}
if (node.name() == "gt") {
EXPECT_EQ(node.op(), "LessEqual");
++found;
}
if (node.name() == "ge") {
EXPECT_EQ(node.op(), "Less");
++found;
}
}
EXPECT_EQ(found, 12);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<bool>(tensors[i], tensors_expected[i]);
}
}
TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "sqrt") {
EXPECT_EQ(node.op(), "Sqrt");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "reduce_max");
++required_node_count;
} else if (node.name() == "reduce_max") {
EXPECT_EQ(node.op(), "Max");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
++required_node_count;
}
}
EXPECT_EQ(required_node_count, 2);
}
TEST_F(ArithmeticOptimizerTest, OptimizeArgMaxOrArgMinOfMonotonicElementWise) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
const auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output arg_max = ops::ArgMax(s.WithOpName("arg_max"), sqrt, 1);
Output final_out = ops::Identity(s.WithOpName("final_out"), arg_max);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
const auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int64_t>(tensors[0], tensors_expected[0]);
EXPECT_EQ(output.node_size(), item.graph.node_size() - 1);
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "final_out") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "arg_max");
++required_node_count;
} else if (node.name() == "arg_max") {
EXPECT_EQ(node.op(), "ArgMax");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
++required_node_count;
}
}
EXPECT_EQ(required_node_count, 2);
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseDoNotChangeFetchNode) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), sqrt, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"sqrt", "final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseDoNotChangeFetchNodeReduction) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {2, 3}, {1, 2});
Output reshape = ops::Reshape(s.WithOpName("reshape"), x, {-1});
Output y = ops::Neg(s.WithOpName("y"), reshape);
Output z = ops::Max(s.WithOpName("z"), y, {0});
GrapplerItem item;
item.fetch = {"z"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int>(tensors[0], tensors_expected[0]);
test::ExpectTensorEqual<int>(tensors[0], Tensor(-2));
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseDoNotChangeSegmentMaxOrMinOps) {
constexpr absl::string_view kSegmentMaxOpName = "SegmentMax";
constexpr absl::string_view kUnsortedSegmentMaxOpName = "UnsortedSegmentMax";
constexpr absl::string_view kSegmentMinOpName = "SegmentMin";
constexpr absl::string_view kUnsortedSegmentMinOpName = "UnsortedSegmentMin";
constexpr absl::string_view segment_max_or_min_op_names[] = {
kSegmentMaxOpName, kUnsortedSegmentMaxOpName, kSegmentMinOpName,
kUnsortedSegmentMinOpName};
for (const absl::string_view segment_op_name : segment_max_or_min_op_names) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Input x = ops::Const(s.WithOpName("x"), {-1.0f, 2.0f, -3.0f, 4.0f}, {2, 2});
Input segment_ids = ops::Const(s.WithOpName("x"), {0, 2}, {2});
Output relu = ops::Relu(s.WithOpName("relu"), x);
Output segment_op;
if (segment_op_name == kSegmentMaxOpName) {
segment_op =
ops::SegmentMax(s.WithOpName(segment_op_name), relu, segment_ids);
} else if (segment_op_name == kUnsortedSegmentMaxOpName) {
segment_op = ops::UnsortedSegmentMax(s.WithOpName(segment_op_name), relu,
segment_ids, 3);
} else if (segment_op_name == kSegmentMinOpName) {
segment_op =
ops::SegmentMin(s.WithOpName(segment_op_name), relu, segment_ids);
} else {
segment_op = ops::UnsortedSegmentMin(s.WithOpName(segment_op_name), relu,
segment_ids, 3);
}
Output final_out = ops::Identity(s.WithOpName("final_out"), segment_op);
GrapplerItem item;
item.fetch = {"relu", "final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), 2);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseNonIncreasing) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output neg = ops::Neg(s.WithOpName("neg"), x);
Output reduce_max = ops::Max(s.WithOpName("reduce_max"), neg, {0});
Output final_out = ops::Identity(s.WithOpName("final_out"), reduce_max);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "neg") {
EXPECT_EQ(node.op(), "Neg");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "reduce_max");
++required_node_count;
} else if (node.name() == "reduce_max") {
EXPECT_EQ(node.op(), "Min");
ASSERT_EQ(node.input_size(), 2);
EXPECT_EQ(node.input(0), "x");
++required_node_count;
}
}
EXPECT_EQ(2, required_node_count);
}
TEST_F(ArithmeticOptimizerTest,
OptimizeMaxOrMinOfMonotonicElementWiseNonIncreasingDoNotChangeMaxPool) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 1.5f, {3, 3, 3, 1});
Output neg = ops::Neg(s.WithOpName("neg"), x);
Output max_pool = ops::MaxPool(s.WithOpName("max_pool"), neg, {1, 2, 2, 1},
{1, 2, 2, 1}, "VALID");
GrapplerItem item;
item.fetch = {"max_pool"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &output);
VerifyGraphsMatch(item.graph, output, __LINE__);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicBiasAddReluMaxPool) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output weights = ops::Const(s.WithOpName("weights"),
Input::Initializer(1.0f, {5, 5, 3, 4}));
Output biases =
ops::Const(s.WithOpName("biases"), Input::Initializer(2.0f, {4}));
Output input = ops::Const(s.WithOpName("input"),
Input::Initializer(1.0f, {1, 28, 28, 3}));
Output output =
ops::Conv2D(s.WithOpName("conv"), input, weights, {1, 1, 1, 1}, "SAME");
output = ops::BiasAdd(s.WithOpName("biasadd"), output, biases);
output = ops::Relu(s.WithOpName("relu"), output);
output = ops::MaxPool(s.WithOpName("max_pool"), output, {1, 2, 2, 1},
{1, 2, 2, 1}, "VALID");
output = ops::Identity(s.WithOpName("output"), output);
GrapplerItem item;
item.fetch = {"output"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef new_graph;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeTwice(&optimizer, &item, &new_graph);
VerifyGraphsMatch(item.graph, new_graph, __LINE__);
auto tensors = EvaluateNodes(new_graph, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWiseMaxPool) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 1.5f, {3, 3, 3, 1});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output max_pool = ops::MaxPool(s.WithOpName("max_pool"), sqrt, {1, 2, 2, 1},
{1, 2, 2, 1}, "VALID");
Output final_out = ops::Identity(s.WithOpName("final_out"), max_pool);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
EXPECT_EQ(output.node_size(), item.graph.node_size());
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "sqrt") {
EXPECT_EQ(node.op(), "Sqrt");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "max_pool");
++required_node_count;
} else if (node.name() == "max_pool") {
EXPECT_EQ(node.op(), "MaxPool");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "x");
++required_node_count;
}
}
EXPECT_EQ(required_node_count, 2);
}
TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
Output log = ops::Log(s.WithOpName("log"), sqrt);
Output relu = ops::Relu(s.WithOpName("relu"), log);
Output final_out = ops::Identity(s.WithOpName("final_out"), relu);
GrapplerItem item;
item.fetch = {"final_out"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyUnaryOpsComposition(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 3);
int required_node_count = 0;
for (int i = 0; i < output.node_size(); ++i) {
const NodeDef& node = output.node(i);
if (node.name() == "final_out") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "relu/unary_ops_composition");
++required_node_count;
} else if (node.name() == "relu/unary_ops_composition") {
EXPECT_EQ(node.op(), "_UnaryOpsComposition");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "x");
auto op_names = node.attr().at("op_names").list().s();
ASSERT_EQ(op_names.size(), 3);
EXPECT_EQ(op_names[0], "Sqrt");
EXPECT_EQ(op_names[1], "Log");
EXPECT_EQ(op_names[2], "Relu");
++required_node_count;
}
}
EXPECT_EQ(required_node_count, 2);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(ArithmeticOptimizerTest, RemoveStackStridedSliceSameAxis) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a_in =
ops::Const(s.WithOpName("a_in"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
auto b_in =
ops::Const(s.WithOpName("b_in"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2});
auto c_in =
ops::Const(s.WithOpName("c_in"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
auto a = ops::PlaceholderWithDefault(s.WithOpName("a"), a_in,
PartialTensorShape({-1, -1}));
auto b = ops::PlaceholderWithDefault(s.WithOpName("b"), b_in,
PartialTensorShape({-1, -1}));
auto c = ops::PlaceholderWithDefault(s.WithOpName("c"), c_in,
PartialTensorShape({-1, -1}));
auto stacked =
ops::Stack(s.WithOpName("stacked"), {a.output, b.output, c.output},
ops::Stack::Axis(1));
auto expanded_a = ops::ExpandDims(s.WithOpName("expanded_a"), a, {1});
auto expanded_b = ops::ExpandDims(s.WithOpName("expanded_b"), b, {1});
auto expanded_c = ops::ExpandDims(s.WithOpName("expanded_c"), c, {1});
auto begin_a = ops::Const(s.WithOpName("begin_a"), {0, 0, 0}, {3});
auto end_a = ops::Const(s.WithOpName("end_a"), {0, 1, 0}, {3});
auto begin_b = ops::Const(s.WithOpName("begin_b"), {0, 1, 0}, {3});
auto end_b = ops::Const(s.WithOpName("end_b"), {0, 2, 0}, {3});
auto begin_c = ops::Const(s.WithOpName("begin_c"), {0, 2, 0}, {3});
auto end_c = ops::Const(s.WithOpName("end_c"), {0, 3, 0}, {3});
auto end_c_1to = ops::Const(s.WithOpName("begin_c_2to"), {0, 0, 0}, {3});
auto strides = ops::Const(s.WithOpName("strides"), {1, 1, 1}, {3});
using SS = ops::StridedSlice;
auto pa_slice = ops::Identity(
s.WithOpName("pa_slice_out"),
SS(s.WithOpName("pa_slice"), stacked, begin_a, end_a, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0b0010)));
auto pb_slice = ops::Identity(
s.WithOpName("pb_slice_out"),
SS(s.WithOpName("pb_slice"), stacked, begin_b, end_b, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0b0010)));
auto pc_slice = ops::Identity(
s.WithOpName("pc_slice_out"),
SS(s.WithOpName("pc_slice"), stacked, begin_c, end_c, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0b0010)));
auto pa_slice_01 = ops::Identity(
s.WithOpName("pa_slice_01_out"),
SS(s.WithOpName("pa_slice_01"), stacked, begin_a, end_a, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0)));
auto pa_slice_to1 = ops::Identity(
s.WithOpName("pa_slice_to1_out"),
SS(s.WithOpName("pa_slice_to1"), stacked, begin_a, end_a, strides,
SS::BeginMask(0b0111)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0)));
auto pb_slice_12 = ops::Identity(
s.WithOpName("pb_slice_12_out"),
SS(s.WithOpName("pb_slice_12"), stacked, begin_b, end_b, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0101)
.NewAxisMask(0)
.ShrinkAxisMask(0)));
auto pc_slice_2to = ops::Identity(
s.WithOpName("pc_slice_2to_out"),
SS(s.WithOpName("pc_slice_2to"), stacked, begin_c, end_c_1to, strides,
SS::BeginMask(0b0101)
.EllipsisMask(0)
.EndMask(0b0111)
.NewAxisMask(0)
.ShrinkAxisMask(0)));
GrapplerItem item;
item.fetch = {"a",
"b",
"c",
"pa_slice_out",
"pb_slice_out",
"pc_slice_out",
"expanded_a",
"expanded_b",
"expanded_c",
"pa_slice_01_out",
"pa_slice_to1_out",
"pb_slice_12_out",
"pc_slice_2to_out"};
enum FetchItem {
fA,
fB,
fC,
fASliceOut,
fBSliceOut,
fCSliceOut,
fExpandedA,
fExpandedB,
fExpandedC,
fASlice01Out,
fASliceTo1Out,
fBSlice12Out,
fCSlice2ToOut,
};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
test::ExpectTensorEqual<float>(tensors_expected[fASliceOut],
tensors_expected[fA]);
test::ExpectTensorEqual<float>(tensors_expected[fBSliceOut],
tensors_expected[fB]);
test::ExpectTensorEqual<float>(tensors_expected[fCSliceOut],
tensors_expected[fC]);
test::ExpectTensorEqual<float>(tensors_expected[fASlice01Out],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors_expected[fASliceTo1Out],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors_expected[fBSlice12Out],
tensors_expected[fExpandedB]);
test::ExpectTensorEqual<float>(tensors_expected[fCSlice2ToOut],
tensors_expected[fExpandedC]);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveStackSliceSameAxis(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
for (const auto& node : output.node()) {
if (node.name() == "pa_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "a");
} else if (node.name() == "pb_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "b");
} else if (node.name() == "pc_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "c");
} else if (absl::EndsWith(node.name(), "_out")) {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(
absl::StrCat(node.input(0), "_out"),
absl::StrCat("ArithmeticOptimizer/RemoveStackStridedSliceSameAxis_",
node.name()));
}
}
auto tensors = EvaluateNodes(output, item.fetch);
test::ExpectTensorEqual<float>(tensors[fASliceOut], tensors_expected[fA]);
test::ExpectTensorEqual<float>(tensors[fBSliceOut], tensors_expected[fB]);
test::ExpectTensorEqual<float>(tensors[fCSliceOut], tensors_expected[fC]);
test::ExpectTensorEqual<float>(tensors[fASlice01Out],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors[fASliceTo1Out],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors[fBSlice12Out],
tensors_expected[fExpandedB]);
test::ExpectTensorEqual<float>(tensors[fCSlice2ToOut],
tensors_expected[fExpandedC]);
}
TEST_F(ArithmeticOptimizerTest, RemoveStackSimpleSliceSameAxis) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a_in =
ops::Const(s.WithOpName("a_in"), {1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
auto b_in =
ops::Const(s.WithOpName("b_in"), {-1.0f, -2.0f, -3.0f, -4.0f}, {2, 2});
auto c_in =
ops::Const(s.WithOpName("c_in"), {5.0f, 6.0f, 7.0f, 8.0f}, {2, 2});
auto a = ops::PlaceholderWithDefault(s.WithOpName("a"), a_in,
PartialTensorShape({-1, -1}));
auto b = ops::PlaceholderWithDefault(s.WithOpName("b"), b_in,
PartialTensorShape({-1, -1}));
auto c = ops::PlaceholderWithDefault(s.WithOpName("c"), c_in,
PartialTensorShape({-1, -1}));
auto stacked =
ops::Stack(s.WithOpName("stacked"), {a.output, b.output, c.output},
ops::Stack::Axis(1));
auto expanded_a = ops::ExpandDims(s.WithOpName("expanded_a"), a, {1});
auto expanded_b = ops::ExpandDims(s.WithOpName("expanded_b"), b, {1});
auto expanded_c = ops::ExpandDims(s.WithOpName("expanded_c"), c, {1});
auto begin_a = ops::Const(s.WithOpName("begin_a"), {0, 0, 0}, {3});
auto begin_b = ops::Const(s.WithOpName("begin_b"), {0, 1, 0}, {3});
auto begin_c = ops::Const(s.WithOpName("begin_c"), {0, 2, 0}, {3});
auto sizes_to_end = ops::Const(s.WithOpName("size"), {-1, 1, -1}, {3});
auto pa_slice = ops::Identity(
s.WithOpName("pa_slice_out"),
ops::Slice(s.WithOpName("pa_slice"), stacked, begin_a, sizes_to_end));
auto pb_slice = ops::Identity(
s.WithOpName("pb_slice_out"),
ops::Slice(s.WithOpName("pb_slice"), stacked, begin_b, sizes_to_end));
auto pc_slice = ops::Identity(
s.WithOpName("pc_slice_out"),
ops::Slice(s.WithOpName("pc_slice"), stacked, begin_c, sizes_to_end));
GrapplerItem item;
item.fetch = {"a",
"b",
"c",
"pa_slice_out",
"pb_slice_out",
"pc_slice_out",
"expanded_a",
"expanded_b",
"expanded_c"};
enum FetchItem {
fA,
fB,
fC,
fASliceOut,
fBSliceOut,
fCSliceOut,
fExpandedA,
fExpandedB,
fExpandedC,
};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
test::ExpectTensorEqual<float>(tensors_expected[fASliceOut],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors_expected[fBSliceOut],
tensors_expected[fExpandedB]);
test::ExpectTensorEqual<float>(tensors_expected[fCSliceOut],
tensors_expected[fExpandedC]);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveStackSliceSameAxis(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
const string kExpandDimsNamePrefix(
"ArithmeticOptimizer/RemoveStackStridedSliceSameAxis_p");
for (const auto& node : output.node()) {
if (node.name() == "pa_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), absl::StrCat(kExpandDimsNamePrefix, "a_slice"));
} else if (node.name() == "pb_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), absl::StrCat(kExpandDimsNamePrefix, "b_slice"));
} else if (node.name() == "pc_slice_out") {
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), absl::StrCat(kExpandDimsNamePrefix, "c_slice"));
} else if (absl::StartsWith(node.name(), kExpandDimsNamePrefix)) {
EXPECT_EQ(node.op(), "ExpandDims");
EXPECT_EQ(node.input(0),
node.name().substr(kExpandDimsNamePrefix.size(), 1));
}
}
auto tensors = EvaluateNodes(output, item.fetch);
test::ExpectTensorEqual<float>(tensors[fASliceOut],
tensors_expected[fExpandedA]);
test::ExpectTensorEqual<float>(tensors[fBSliceOut],
tensors_expected[fExpandedB]);
test::ExpectTensorEqual<float>(tensors[fCSliceOut],
tensors_expected[fExpandedC]);
}
TEST_F(ArithmeticOptimizerTest, SimplifyAggregationBFloat16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
Output cast = ops::Cast(s.WithOpName("cast"), x, DT_BFLOAT16);
Output add = ops::AddN(s.WithOpName("add"), {cast, cast});
Output id = ops::Identity(s.WithOpName("id"), add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySimplifyAggregation(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
EXPECT_EQ(output.node_size(), 5);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<bfloat16>(tensors[0], tensors_expected[0]);
}
TEST_F(ArithmeticOptimizerTest, SimplifyEmbeddingLookup) {
for (DataType unique_idx_type : {DT_INT32, DT_INT64}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output embeddings = ops::Const(s.WithOpName("embeddings"),
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output segment_ids =
ops::Const(s.WithOpName("segment_ids"), {0, 1, 1, 2, 2, 2, 2});
Output indices = ops::Const(s.WithOpName("indices"), {0, 0, 1, 0, 1, 0, 1});
auto unique = ops::Unique(s.WithOpName("unique"), indices,
{unique_idx_type});
Output ids = unique.y;
Output idx = unique.idx;
Output gathered_rows =
ops::Gather(s.WithOpName("gathered_rows"), embeddings, ids);
Output result = ops::SparseSegmentSum(s.WithOpName("result"), gathered_rows,
idx, segment_ids);
Output id = ops::Identity(s.WithOpName("id"), result);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySimplifyEmbeddingLookup(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
for (const auto& node : output.node()) {
if (node.name() == "result") {
EXPECT_EQ(node.input(0), "embeddings");
EXPECT_EQ(node.input(1), "indices");
}
EXPECT_NE(node.op(), "Unique");
EXPECT_NE(node.op(), "Gather");
}
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
}
TEST_F(ArithmeticOptimizerTest, SimplifyResourceEmbeddingLookup) {
for (DataType unique_idx_type : {DT_INT32, DT_INT64}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output embeddings = ops::Const(s.WithOpName("embeddings"),
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output segment_ids =
ops::Const(s.WithOpName("segment_ids"), {0, 1, 1, 2, 2, 2, 2});
Output indices = ops::Const(s.WithOpName("indices"), {0, 0, 1, 0, 1, 0, 1});
auto unique = ops::Unique(s.WithOpName("unique"), indices,
{unique_idx_type});
Output ids = unique.y;
Output idx = unique.idx;
auto var =
ops::VarHandleOp(s.WithOpName("var"), DT_FLOAT, TensorShape({2, 2}));
ops::AssignVariableOp assign_op(s.WithOpName("assign_var_handle"), var,
embeddings);
Output gathered_rows = ops::ResourceGather(
s.WithOpName("gathered_rows")
.WithControlDependencies(std::vector<Operation>{assign_op}),
var, ids, DT_FLOAT);
gathered_rows.node()->AddAttr("_class", {"test_class"});
Output result =
ops::SparseSegmentSum(s.WithOpName("result").WithControlDependencies(
std::vector<Operation>{assign_op}),
gathered_rows, idx, segment_ids);
Output id = ops::Identity(s.WithOpName("id"), result);
GrapplerItem item;
item.init_ops.push_back("assign_var_handle");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlySimplifyEmbeddingLookup(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
bool read_var_node_found = false;
for (const auto& node : output.node()) {
if (node.name() == "result") {
EXPECT_EQ(
node.input(0),
"ArithmeticOptimizer/SimplifyEmbeddingLookupStage_ReadVar_result");
EXPECT_EQ(node.input(1), "indices");
}
if (node.op() == "ReadVariableOp") {
read_var_node_found = true;
EXPECT_EQ(node.attr().at("_class").list().s(0), "test_class");
}
EXPECT_NE(node.op(), "Unique");
EXPECT_NE(node.op(), "Gather");
}
EXPECT_TRUE(read_var_node_found);
for (int i = 0; i < output.node_size(); ++i) {
if (output.node(i).name() ==
"ArithmeticOptimizer/SimplifyEmbeddingLookupStage_ReadVar_result") {
output.mutable_node(i)->add_input("^assign_var_handle");
}
}
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
}
TEST_F(ArithmeticOptimizerTest, RemoveCastIntoSegmentReduction) {
for (DataType indices_type : {DT_INT32, DT_INT64}) {
for (DataType segment_ids_type : {DT_INT32, DT_INT64}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output embeddings = ops::Const(s.WithOpName("embeddings"),
{1.0f, 2.0f, 3.0f, 4.0f}, {2, 2});
Output indices =
ops::Cast(s.WithOpName("cast_indices"),
ops::Const(s.WithOpName("indices"), {0, 0, 1, 0, 1, 0, 1}),
indices_type);
Output segment_ids = ops::Cast(
s.WithOpName("cast_segment_ids"),
ops::Const(s.WithOpName("segment_ids"), {0, 1, 1, 2, 2, 2, 2}),
segment_ids_type);
Output result = ops::SparseSegmentSum(s.WithOpName("result"), embeddings,
indices, segment_ids);
Output id = ops::Identity(s.WithOpName("id"), result);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"id"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
GraphDef output;
ArithmeticOptimizer optimizer;
EnableOnlyRemoveCastIntoSegmentReduction(&optimizer);
OptimizeAndPrune(&optimizer, &item, &output);
for (const auto& node : output.node()) {
if (node.name() == "result") {
EXPECT_EQ(node.input(1), "indices");
EXPECT_EQ(node.input(2), "segment_ids");
}
EXPECT_NE(node.op(), "Cast");
}
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54e4611a-f874-4b76-b0bf-4a081ea79e25 | cpp | tensorflow/tensorflow | auto_parallel | tensorflow/core/grappler/optimizers/auto_parallel.cc | tensorflow/core/grappler/optimizers/auto_parallel_test.cc | #include "tensorflow/core/grappler/optimizers/auto_parallel.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace grappler {
const char kAutoParallelPrefix[] = "AutoParallel";
NodeDef* AutoParallel::AddNodeDivConst() {
NodeDef* node = graph_.add_node();
node->set_name(strings::StrCat(kAutoParallelPrefix, "-Div-Const"));
node->set_op("Const");
AttrValue attr_data_type;
attr_data_type.set_type(DT_FLOAT);
node->mutable_attr()->insert({"dtype", attr_data_type});
AttrValue attr_tensor;
auto tensor = attr_tensor.mutable_tensor();
tensor->add_float_val(static_cast<float>(num_replicas_));
tensor->set_dtype(DT_FLOAT);
node->mutable_attr()->insert({"value", attr_tensor});
return node;
}
NodeDef* AutoParallel::AddNodeDiv(const string& name, const string& input_a,
const string& input_b) {
NodeDef* node = graph_.add_node();
node->set_name(strings::StrCat(kAutoParallelPrefix, "-Div-", name));
node->set_op("RealDiv");
node->add_input(input_a);
node->add_input(input_b);
AttrValue attr_type;
attr_type.set_type(DT_FLOAT);
node->mutable_attr()->insert({"T", attr_type});
return node;
}
NodeDef* AutoParallel::AddNodeControl(const string& name,
const std::set<string>& deps,
GraphDef* graph) {
NodeDef* node = graph->add_node();
node->set_name(name);
node->set_op("NoOp");
for (const auto& dep : deps) {
node->add_input(strings::StrCat("^", dep));
}
return node;
}
Status AutoParallel::Initialize(const GrapplerItem& item) {
num_gpus_ = GetNumAvailableGPUs();
LOG(INFO) << "Number of GPUs: " << num_gpus_;
item_ = &item;
graph_ = item.graph;
LOG(INFO) << "Original graph size: " << graph_.node_size();
if (item.fetch.empty()) {
return Status(absl::StatusCode::kInvalidArgument,
"No fetch nodes provided.");
}
if (item.MainVariables().empty()) {
return Status(absl::StatusCode::kInvalidArgument, "No variables provided.");
}
for (const auto& init : item.init_ops) {
VLOG(1) << "Init node: " << init;
}
for (const auto& fetch : item.fetch) {
VLOG(1) << "Fetch node: " << fetch;
}
for (const auto& var : item.MainVariables()) {
VLOG(2) << "Variable: " << var->name();
}
const std::set<string> apply_gradients_ops = {"ApplyGradientDescent",
"ApplyProximalGradientDescent",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyProximalAdagrad",
"ApplyAdagradDA",
"ApplyFtrl",
"ApplyMomentum",
"ApplyAdam",
"ApplyRMSProp",
"ApplyCenteredRMSProp"};
for (int i = 0; i < graph_.node_size(); i++) {
all_nodes_.insert(
std::make_pair(graph_.node(i).name(), graph_.mutable_node(i)));
if (apply_gradients_ops.find(graph_.node(i).op()) !=
apply_gradients_ops.end()) {
apply_gradients_nodes_.insert(graph_.node(i).name());
VLOG(2) << "Apply gradients node: " << graph_.node(i).name();
}
}
auto div_const_node = AddNodeDivConst();
all_nodes_.insert(std::make_pair(div_const_node->name(), div_const_node));
std::map<string, int> gradient_pos = {{"ApplyGradientDescent", 2},
{"ApplyProximalGradientDescent", 4},
{"ApplyAdadelta", 6},
{"ApplyAdagrad", 3},
{"ApplyProximalAdagrad", 5},
{"ApplyAdagradDA", 3},
{"ApplyFtrl", 3},
{"ApplyMomentum", 3},
{"ApplyAdam", 9},
{"ApplyRMSProp", 7},
{"ApplyCenteredRMSProp", 8}};
for (const auto& apply_gradient_node_name : apply_gradients_nodes_) {
auto apply_gradients_op = all_nodes_[apply_gradient_node_name]->op();
auto apply_gradients_node = all_nodes_[apply_gradient_node_name];
auto div_node = AddNodeDiv(
apply_gradient_node_name,
apply_gradients_node->input(gradient_pos[apply_gradients_op]),
div_const_node->name());
all_nodes_.insert(std::make_pair(div_node->name(), div_node));
*apply_gradients_node->mutable_input(gradient_pos[apply_gradients_op]) =
div_node->name();
}
LOG(INFO) << "Graph size after adding div nodes: " << all_nodes_.size();
std::vector<const NodeDef*> train_nodes;
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, item.fetch, &train_nodes));
LOG(INFO) << "Number of training nodes: " << train_nodes.size();
const NodeDef* dequeue_node = nullptr;
for (const auto& train_node : train_nodes) {
if (IsDequeueOp(*train_node)) {
dequeue_node = train_node;
break;
}
}
std::vector<const NodeDef*> input_nodes;
if (dequeue_node) {
LOG(INFO) << "Dequeue node: " << dequeue_node->name();
TF_RETURN_IF_ERROR(ComputeTransitiveFanin(graph_, {dequeue_node->name()},
{}, &input_nodes));
}
LOG(INFO) << "Number of input nodes: " << input_nodes.size();
std::set<string> dont_replicate_nodes;
for (const auto& variable : item.MainVariables()) {
dont_replicate_nodes.insert(variable->name());
}
for (const auto& init : item.init_ops) {
dont_replicate_nodes.insert(NodeName(init));
}
for (const auto& input_node : input_nodes) {
if (input_node->name() != dequeue_node->name()) {
dont_replicate_nodes.insert(input_node->name());
}
}
for (const auto& node : train_nodes) {
if (dont_replicate_nodes.find(node->name()) == dont_replicate_nodes.end()) {
replica_nodes_.insert(node->name());
}
}
LOG(INFO) << "Number of replica nodes: " << replica_nodes_.size();
for (const auto& node : all_nodes_) {
if (replica_nodes_.find(node.first) == replica_nodes_.end()) {
shared_nodes_.insert(node.first);
}
}
LOG(INFO) << "Number of shared nodes: " << shared_nodes_.size();
return absl::OkStatus();
}
bool AutoParallel::NotSharedNode(const string& name) {
return shared_nodes_.find(name) == shared_nodes_.end();
}
void AutoParallel::AddSharedNodes(GraphDef* graph) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", 0);
for (const auto& node : shared_nodes_) {
auto new_node = graph->add_node();
*new_node = *all_nodes_[node];
for (int i = 0; i < new_node->input_size(); i++) {
if (NotSharedNode(NodeName(new_node->input(i)))) {
string new_name = AddPrefixToNodeName(new_node->input(i), prefix);
*new_node->mutable_input(i) = new_name;
}
}
}
}
void AutoParallel::AddOneReplica(GraphDef* graph, int number) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", number);
for (const auto& node : replica_nodes_) {
auto new_node = graph->add_node();
*new_node = *all_nodes_[node];
if (NotSharedNode(new_node->name())) {
new_node->set_name(AddPrefixToNodeName(new_node->name(), prefix));
if (num_gpus_ > 0) {
new_node->set_device(strings::StrCat("/gpu:", number % num_gpus_));
}
for (int i = 0; i < new_node->input_size(); i++) {
if (NotSharedNode(NodeName(new_node->input(i)))) {
string new_name = AddPrefixToNodeName(new_node->input(i), prefix);
*new_node->mutable_input(i) = new_name;
}
}
}
}
}
void AutoParallel::BuildGraph(GraphDef* graph) {
AddSharedNodes(graph);
for (int i = 0; i < num_replicas_; i++) {
AddOneReplica(graph, i);
}
std::set<string> fetches;
for (size_t i = 0; i < item_->fetch.size(); i++) {
for (int j = 0; j < num_replicas_; j++) {
string prefix = strings::StrCat(kAutoParallelPrefix, "-Replica-", j);
string fetch = AddPrefixToNodeName(item_->fetch[i], prefix);
fetches.insert(fetch);
}
}
string name_control =
strings::StrCat(kAutoParallelPrefix, "-Control-", "Fetch");
auto control = AddNodeControl(name_control, fetches, graph);
for (const auto& fetch : item_->fetch) {
AddNodeControl(fetch, {control->name()}, graph);
}
*graph->mutable_library() = item_->graph.library();
*graph->mutable_versions() = item_->graph.versions();
LOG(INFO) << "Parallelized graph size: " << graph->node_size();
}
Status AutoParallel::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
TF_RETURN_IF_ERROR(Initialize(item));
BuildGraph(output);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/auto_parallel.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class AutoParallelTest : public ::testing::Test {};
TEST_F(AutoParallelTest, SimpleParallel) {
tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope();
Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1});
Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1});
Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a});
Output identity = ops::Identity(s.WithOpName("identity"), {var});
Output fifo_queue = ops::FIFOQueue(s.WithOpName("fifo_queue"), {DT_FLOAT});
auto dequeue = ops::QueueDequeueMany(s.WithOpName("dequeue"), {fifo_queue},
{constant_b}, {DT_FLOAT});
Output add = ops::AddN(s.WithOpName("add"), {constant_a, dequeue[0]});
Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1});
Output apply_gradient = ops::ApplyGradientDescent(
s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add});
GrapplerItem item;
item.init_ops.push_back("assign");
item.fetch.push_back("apply_gradient");
item.init_ops.push_back("assign");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AutoParallel parallel(2);
GraphDef output;
Status status = parallel.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(21, output.node_size());
const NodeDef& node_assign = output.node(0);
EXPECT_EQ("assign", node_assign.name());
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_assign.input(1));
const NodeDef& node_constant_b = output.node(1);
EXPECT_EQ("constant_b", node_constant_b.name());
const NodeDef& node_fifo_queue = output.node(2);
EXPECT_EQ("fifo_queue", node_fifo_queue.name());
const NodeDef& node_identity = output.node(3);
EXPECT_EQ("identity", node_identity.name());
EXPECT_EQ("var", node_identity.input(0));
const NodeDef& node_var = output.node(4);
EXPECT_EQ("var", node_var.name());
const NodeDef& node_div_const0 = output.node(5);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-Const",
node_div_const0.name());
const NodeDef& node_div0 = output.node(6);
EXPECT_EQ("AutoParallel-Replica-0/AutoParallel-Div-apply_gradient",
node_div0.name());
const NodeDef& node_add0 = output.node(7);
EXPECT_EQ("AutoParallel-Replica-0/add", node_add0.name());
const NodeDef& node_gradient0 = output.node(8);
EXPECT_EQ("AutoParallel-Replica-0/apply_gradient", node_gradient0.name());
const NodeDef& node_constant_a0 = output.node(9);
EXPECT_EQ("AutoParallel-Replica-0/constant_a", node_constant_a0.name());
const NodeDef& node_dequeue0 = output.node(10);
EXPECT_EQ("AutoParallel-Replica-0/dequeue", node_dequeue0.name());
const NodeDef& node_learning_rate0 = output.node(11);
EXPECT_EQ("AutoParallel-Replica-0/learning_rate", node_learning_rate0.name());
const NodeDef& node_div_const1 = output.node(12);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-Const",
node_div_const1.name());
const NodeDef& node_div1 = output.node(13);
EXPECT_EQ("AutoParallel-Replica-1/AutoParallel-Div-apply_gradient",
node_div1.name());
const NodeDef& node_add1 = output.node(14);
EXPECT_EQ("AutoParallel-Replica-1/add", node_add1.name());
const NodeDef& node_gradient1 = output.node(15);
EXPECT_EQ("AutoParallel-Replica-1/apply_gradient", node_gradient1.name());
const NodeDef& node_constant_a1 = output.node(16);
EXPECT_EQ("AutoParallel-Replica-1/constant_a", node_constant_a1.name());
const NodeDef& node_dequeue1 = output.node(17);
EXPECT_EQ("AutoParallel-Replica-1/dequeue", node_dequeue1.name());
const NodeDef& node_learning_rate1 = output.node(18);
EXPECT_EQ("AutoParallel-Replica-1/learning_rate", node_learning_rate1.name());
const NodeDef& node_fetch = output.node(19);
EXPECT_EQ("AutoParallel-Control-Fetch", node_fetch.name());
EXPECT_EQ("^AutoParallel-Replica-0/apply_gradient", node_fetch.input(0));
EXPECT_EQ("^AutoParallel-Replica-1/apply_gradient", node_fetch.input(1));
const NodeDef& node_gradient = output.node(20);
EXPECT_EQ("apply_gradient", node_gradient.name());
EXPECT_EQ("^AutoParallel-Control-Fetch", node_gradient.input(0));
}
TEST_F(AutoParallelTest, SimpleParallelNoDequeue) {
tensorflow::Scope s = tensorflow::Scope::DisabledShapeInferenceScope();
Output constant_a = ops::Const(s.WithOpName("constant_a"), 1.0f, {1});
Output constant_c = ops::Const(s.WithOpName("constant_c"), 1.0f, {1});
Output constant_b = ops::Const(s.WithOpName("constant_b"), 1, {1});
Output var = ops::Variable(s.WithOpName("var"), {1}, DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), {var}, {constant_a});
Output add = ops::AddN(s.WithOpName("add"), {constant_a, constant_c});
Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1});
Output apply_gradient = ops::ApplyGradientDescent(
s.WithOpName("apply_gradient"), {var}, {learning_rate}, {add});
GrapplerItem item;
item.init_ops.push_back("assign");
item.fetch.push_back("apply_gradient");
item.init_ops.push_back("assign");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
AutoParallel parallel(2);
GraphDef output;
Status status = parallel.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_parallel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_parallel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d46b1e94-dd76-416b-ac17-f2a57bb63e60 | cpp | tensorflow/tensorflow | batch_op_rewriter | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.cc | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter_test.cc | #include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <functional>
#include <string>
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/map.h"
#include "google/protobuf/repeated_field.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchFunction[] = "BatchFunction";
constexpr char kBatchOpRewriteConfigParamKey[] = "batch_op_rewrite_config";
constexpr char kNumBatchThreadsAttr[] = "num_batch_threads";
constexpr char kMaxBatchSizeAttr[] = "max_batch_size";
constexpr char kBatchTimeoutMicrosAttr[] = "batch_timeout_micros";
constexpr char kAllowedBatchSizesAttr[] = "allowed_batch_sizes";
constexpr char kMaxEnqueuedBatchesAttr[] = "max_enqueued_batches";
constexpr char kEnableLargeBatchSplitting[] = "enable_large_batch_splitting";
constexpr int64 kBoostMicrosNotSet = -1;
using BatchOpRewriteFunction = std::function<void(NodeDef* batch_op)>;
}
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::Status;
using ::tensorflow::grappler::Cluster;
using ::tensorflow::grappler::GrapplerItem;
namespace {
struct AdaptiveBatchSchedulerParams {
int32 initial_inflight_batches;
int32 min_inflight_batches;
int32 max_inflight_batches;
int32 batches_to_average_over;
int64_t full_batch_scheduling_boost_micros;
};
AdaptiveBatchSchedulerParams GetAdaptiveBatchSchedulerParams(
const BatchOpRewriteConfig::AdaptiveBatchSchedulerOption& option) {
AdaptiveBatchSchedulerParams params;
params.min_inflight_batches =
option.has_min_inflight_batches_limit()
? option.min_inflight_batches_limit().value()
: kMinInflightBatches;
params.initial_inflight_batches =
option.has_initial_inflight_batches_limit()
? option.initial_inflight_batches_limit().value()
: kInitialInflightBatches;
params.max_inflight_batches =
option.has_max_inflight_batches_limit()
? option.max_inflight_batches_limit().value()
: kMaxInflightBatches;
params.batches_to_average_over =
option.has_batches_to_average_over()
? option.batches_to_average_over().value()
: kBatchesToAverageOver;
params.full_batch_scheduling_boost_micros =
option.has_full_batch_scheduling_boost_micros()
? option.full_batch_scheduling_boost_micros().value()
: kBoostMicrosNotSet;
return params;
}
void SetNodeAttrs(const AdaptiveBatchSchedulerParams& params, NodeDef* node) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMaxInflightBatchesAttr, params.max_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMinInflightBatchesAttr, params.min_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kInitialInflightBatchesAttr, params.initial_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kBatchesToAverageOverAttr, params.batches_to_average_over, node);
if (params.full_batch_scheduling_boost_micros != -1) {
::tensorflow::graph_transforms::SetNodeAttr(
kFullBatchSchedulingBoostMicros,
params.full_batch_scheduling_boost_micros, node);
}
}
void UpdateBatchOps(GraphDef* graph, BatchOpRewriteFunction rewrite_fn) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
for (int i = 0; i < graph->library().function_size(); i++) {
FunctionDef* function_def = graph->mutable_library()->mutable_function(i);
for (int j = 0; j < function_def->node_def_size(); j++) {
NodeDef* node = function_def->mutable_node_def(j);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
}
}
}
Status BatchOpRewriter::Init(
const ::tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (config->parameter_map().find(kBatchOpRewriteConfigParamKey) ==
config->parameter_map().end()) {
return absl::InternalError(
"batch_op_rewrite_config param must be set in the rewriter config "
"with a serialized/encoded BatchOpRewriteConfig.");
}
const auto& params =
config->parameter_map().at(kBatchOpRewriteConfigParamKey);
std::string unencoded;
if (params.s().empty()) {
VLOG(2) << "Empty batch-op rewrite config";
return absl::OkStatus();
}
if (!absl::Base64Unescape(params.s(), &unencoded)) {
return absl::InternalError(
"Failed to unencode batch_op_rewrite_config from params.");
}
if (!config_.ParseFromString(unencoded)) {
return absl::InternalError(
"Failed to parse batch_op_rewrite_config from params.");
}
VLOG(2) << "BatchOp Rewrite config is " << config_.DebugString();
return absl::OkStatus();
}
Status BatchOpRewriter::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
VLOG(2) << "Running BatchOp Rewriter";
*optimized_graph = item.graph;
bool asbs_overridden = false;
if (config_proto_.has_experimental() &&
config_proto_.experimental().has_session_metadata()) {
const string model_name =
config_proto_.experimental().session_metadata().name();
if (!config_.model_scheduler_options().empty()) {
return absl::InvalidArgumentError(
"model_scheduler_options is deprecated. Please use the "
"adaptive_batch_scheduler_option field in batch_options instead.");
}
auto model_batch_options = config_.batch_options().find(model_name);
if (model_batch_options != config_.batch_options().end()) {
auto& batch_options = model_batch_options->second;
VLOG(2) << "Rewriting batch_options for " << model_name << " to "
<< batch_options.DebugString();
if (batch_options.has_adaptive_batch_scheduler_option()) {
AdaptiveBatchSchedulerParams params = GetAdaptiveBatchSchedulerParams(
batch_options.adaptive_batch_scheduler_option());
if ((params.min_inflight_batches > params.max_inflight_batches) ||
(params.initial_inflight_batches < params.min_inflight_batches) ||
(params.initial_inflight_batches > params.max_inflight_batches)) {
return absl::InvalidArgumentError(absl::StrCat(
"Requires min_inflight_batches <= initial_inflight_batches "
"and initial_inflight_batches <= max_inflight_batches; Got "
"{min_inflight_batches : ",
params.min_inflight_batches,
", initial_inflight_batches : ", params.initial_inflight_batches,
", max_inflight_batches : ", params.max_inflight_batches, "}."));
}
asbs_overridden = true;
UpdateBatchOps(optimized_graph, [¶ms](NodeDef* batch_op) {
SetNodeAttrs(params, batch_op);
});
}
if (config_.enable_adaptive_shared_batching_thread_pool() &&
!asbs_overridden && batch_options.has_num_batch_threads() &&
batch_options.num_batch_threads() != 0) {
return absl::InvalidArgumentError(
"Unable to enable adapative shared batching because it requires "
"num_batch_threads=0 but the BatchOpRewriteConfig is also trying "
"to set num_batch_threads. Set either set "
"enable_adaptive_shared_batching_thread_pool or num_batch_threads "
"but not both.");
}
UpdateBatchOps(optimized_graph, [&batch_options](NodeDef* batch_op) {
if (batch_options.has_num_batch_threads()) {
::tensorflow::graph_transforms::SetNodeAttr(
kNumBatchThreadsAttr, batch_options.num_batch_threads(),
batch_op);
}
if (batch_options.has_max_batch_size()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxBatchSizeAttr, batch_options.max_batch_size(), batch_op);
}
if (batch_options.has_batch_timeout_micros()) {
::tensorflow::graph_transforms::SetNodeAttr(
kBatchTimeoutMicrosAttr, batch_options.batch_timeout_micros(),
batch_op);
}
if (!batch_options.allowed_batch_sizes().empty()) {
::tensorflow::graph_transforms::SetNodeAttr(
kAllowedBatchSizesAttr, batch_options.allowed_batch_sizes(),
batch_op);
}
if (batch_options.has_max_enqueued_batches()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxEnqueuedBatchesAttr, batch_options.max_enqueued_batches(),
batch_op);
}
if (batch_options.has_disable_large_batch_splitting()) {
::tensorflow::graph_transforms::SetNodeAttr(
kEnableLargeBatchSplitting,
!batch_options.disable_large_batch_splitting(), batch_op);
}
});
}
}
if (asbs_overridden) {
return absl::OkStatus();
}
if (config_.enable_adaptive_shared_batching_thread_pool()) {
UpdateBatchOps(optimized_graph, [](NodeDef* batch_op) {
::tensorflow::graph_transforms::SetNodeAttr(kNumBatchThreadsAttr, 0,
batch_op);
});
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchOpRewriter, "batch_op_rewrite");
}
} | #include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/escaping.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::RewriterConfig_CustomGraphOptimizer;
using ::tensorflow::Status;
using ::tensorflow::grappler::GrapplerItem;
using ::tensorflow::serving::BatchOpRewriteConfig;
void AddBatchOp(GraphDef* graph, int num_batch_threads = 16,
const absl::flat_hash_map<string, int>& reserved_int_attrs = {},
int max_batch_size = 16, int batch_timeout_micros = 10000,
const std::vector<int32>& allowed_batch_sizes = {8, 16},
int max_enqueued_batches = 1000,
bool disable_large_batch_splitting = false) {
auto set_batch_node_attribute = [&](const int32_t num_batch_threads,
NodeDef* batch_op) {
batch_op->set_name("cond/batch/BatchFunction");
batch_op->set_op("BatchFunction");
::tensorflow::graph_transforms::SetNodeAttr("num_batch_threads",
num_batch_threads, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_batch_size",
max_batch_size, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("batch_timeout_micros",
batch_timeout_micros, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("allowed_batch_sizes",
allowed_batch_sizes, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_enqueued_batches",
max_enqueued_batches, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("enable_large_batch_splitting",
!disable_large_batch_splitting,
batch_op);
if (!reserved_int_attrs.empty()) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, batch_op);
for (const auto& reserved_int_attr : reserved_int_attrs) {
::tensorflow::graph_transforms::SetNodeAttr(
reserved_int_attr.first, reserved_int_attr.second, batch_op);
}
}
};
set_batch_node_attribute(num_batch_threads, graph->add_node());
FunctionDefLibrary* function_def_lib = graph->mutable_library();
FunctionDef* function_def = function_def_lib->add_function();
set_batch_node_attribute(num_batch_threads, function_def->add_node_def());
}
RewriterConfig_CustomGraphOptimizer MakeConfig(
const BatchOpRewriteConfig& config) {
RewriterConfig_CustomGraphOptimizer rewriter_config;
(*rewriter_config.mutable_parameter_map())["batch_op_rewrite_config"].set_s(
absl::Base64Escape(config.SerializeAsString()));
return rewriter_config;
}
class BatchOpRewriterTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(RewriteNumBatchThreads, BatchOpRewriterTest,
::testing::Bool());
TEST_P(BatchOpRewriterTest, Basic) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
GraphDef optimized_graph;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, GetParam() ? 0 : 16);
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_P(BatchOpRewriterTest, InvalidArgumentForAdaptiveBatchScheduler) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_version(123);
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_name("model_with_override");
GraphDef optimized_graph;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(errors::IsInvalidArgument(status));
}
TEST_P(BatchOpRewriterTest, AdaptiveBatchScheduler) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_max_inflight_batches_limit()
->set_value(32);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_full_batch_scheduling_boost_micros()
->set_value(12345);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 16 ,
{{kBatchesToAverageOverAttr, 1000},
{kInitialInflightBatchesAttr, 16},
{kMinInflightBatchesAttr, 8},
{kMaxInflightBatchesAttr, 32},
{kFullBatchSchedulingBoostMicros, 12345}});
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest, UpdateModelSchedulerOptions) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
TEST_F(BatchOpRewriterTest, UpdateBatchOptions) {
BatchOpRewriteConfig config;
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
(*config.mutable_batch_options())["model_with_override"].set_max_batch_size(
128);
(*config.mutable_batch_options())["model_with_override"]
.set_batch_timeout_micros(5000);
const std::vector<int32> allowed_batch_sizes{4, 32};
(*config.mutable_batch_options())["model_with_override"]
.mutable_allowed_batch_sizes()
->Add(allowed_batch_sizes.begin(), allowed_batch_sizes.end());
(*config.mutable_batch_options())["model_with_override"]
.set_max_enqueued_batches(500);
(*config.mutable_batch_options())["model_with_override"]
.set_disable_large_batch_splitting(true);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 2 ,
{} , 128 ,
5000 , allowed_batch_sizes,
500 ,
true );
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest,
UpdateAdaptiveSharedBatchSchedulerAndNumBatchThreads) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/inference/batch_op_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68fb818f-c069-4546-9b9c-dcb7b2294c41 | cpp | tensorflow/tensorflow | seq_interleave_prefetch | tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.cc | tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch_test.cc | #include "tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kInterleaveDatasetOpName[] = "InterleaveDataset";
constexpr char kParallelInterleaveDatasetV2OpName[] =
"ParallelInterleaveDatasetV2";
constexpr char kParallelInterleaveDatasetV3OpName[] =
"ParallelInterleaveDatasetV3";
constexpr char kParallelInterleaveDatasetV4OpName[] =
"ParallelInterleaveDatasetV4";
constexpr char kParallelInterleaveDatasetOpName[] = "ParallelInterleaveDataset";
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kDatasetStr[] = "Dataset";
constexpr char kConstOpName[] = "Const";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kConstNodeOutputSuffix[] = ":output:0";
constexpr char kDatasetNodeOutputSuffix[] = ":handle:0";
constexpr char kDeterministicAttr[] = "deterministic";
constexpr char kFunctionAttr[] = "f";
constexpr char kDTypeAttr[] = "dtype";
constexpr char kValueAttr[] = "value";
constexpr char kTArgumentsAttr[] = "Targuments";
constexpr char kOutputTypesAttr[] = "output_types";
constexpr char kMetadataAttr[] = "metadata";
constexpr char kOutputShapesAttr[] = "output_shapes";
constexpr char kTOutputTypesAttr[] = "Toutput_types";
constexpr char kSeqInterleavePrefetchRewritePrefix[] =
"inject/seq_interleave_prefetch_rewrite_";
bool IsParallelInterleave(const std::string& op) {
return data::MatchesAnyVersion(kParallelInterleaveDatasetOpName, op);
}
int GetNumInputsForParallelInterleaveOp(const std::string& op) {
if (op == kParallelInterleaveDatasetV2OpName) {
return 4;
} else if (op == kParallelInterleaveDatasetV3OpName) {
return 4;
} else if (op == kParallelInterleaveDatasetV4OpName) {
return 6;
}
return 0;
}
bool NodeOpHasDatasetSuffix(const NodeDef& node) {
return absl::EndsWith(node.op(), kDatasetStr);
}
bool DatasetOpInFunction(const NodeDef& node, const FunctionDef* fn) {
for (const auto& node : fn->node_def()) {
if (NodeOpHasDatasetSuffix(node)) {
return true;
}
}
return false;
}
bool RewritePossibleForNode(const NodeDef& node,
const FunctionLibraryDefinition& fld) {
auto is_deterministic_parallel_interleave_node = [&]() -> bool {
if (!IsParallelInterleave(node.op())) return false;
auto determinism_value = node.attr().find(kDeterministicAttr);
return (determinism_value != node.attr().end()) &&
(determinism_value->second.s() == "true");
};
if (node.attr().count(kFunctionAttr) == 0) return false;
const FunctionDef* fn = fld.Find(node.attr().at(kFunctionAttr).func().name());
if (fn == nullptr) return false;
if (fn->signature().output_arg_size() != 1) return false;
if (is_deterministic_parallel_interleave_node()) {
return DatasetOpInFunction(node, fn);
}
return false;
}
NodeDef CreateBufferSizeNode(DataType dtype,
const std::function<void(TensorProto*)>& add_value,
MutableGraphView* graph, FunctionDef& fdef) {
NodeDef node;
node.set_op(kConstOpName);
function_utils::SetUniqueFunctionNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix, "buffer_size"), &fdef,
&node);
(*node.mutable_attr())[kDTypeAttr].set_type(dtype);
auto tensor = std::make_unique<tensorflow::TensorProto>();
auto tensor_shape = std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())[kValueAttr].set_allocated_tensor(tensor.release());
return node;
}
Status CreateAndAppendPrefetchNode(MutableGraphView* graph, FunctionDef& fdef) {
auto get_last_dataset_op_node = [&]() -> const NodeDef* {
const auto& output_arg = fdef.signature().output_arg(0).name();
const auto& ret_val = fdef.ret().at(output_arg);
auto input = function_utils::FunctionDefTensorDesc(ret_val);
const NodeDef* dataset_op_node = nullptr;
while (
function_utils::ContainsFunctionNodeWithName(input.node_name, fdef)) {
int idx = function_utils::FindFunctionNodeWithName(input.node_name, fdef);
const NodeDef& node = fdef.node_def(idx);
if (NodeOpHasDatasetSuffix(node)) {
dataset_op_node = &node;
break;
}
input = function_utils::FunctionDefTensorDesc(node.input(0));
}
return dataset_op_node;
};
const NodeDef* add_after = get_last_dataset_op_node();
if (add_after == nullptr) {
return errors::NotFound(
"Could not find any dataset node to append `Prefetch` at its output in "
"`seq_interleave_prefetch` rewrite");
}
NodeDef prefetch_node;
prefetch_node.set_op(kPrefetchDatasetOpName);
function_utils::SetUniqueFunctionNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
fdef.signature().name()),
&fdef, &prefetch_node);
const auto input_dataset =
absl::StrCat(add_after->name(), kDatasetNodeOutputSuffix);
NodeDef buffer_size_node = CreateBufferSizeNode(
DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(data::model::kAutotune); },
graph, fdef);
prefetch_node.add_input(input_dataset);
prefetch_node.add_input(
absl::StrCat(buffer_size_node.name(), kConstNodeOutputSuffix));
if (add_after->attr().count(kOutputShapes) > 0) {
graph_utils::CopyAttribute(kOutputShapes, *add_after, &prefetch_node);
} else {
tensorflow::TensorShapeProto* shape =
(*(prefetch_node.mutable_attr()))[kOutputShapes]
.mutable_list()
->add_shape();
shape->set_unknown_rank(true);
}
if (add_after->attr().count(kOutputTypes) > 0) {
graph_utils::CopyAttribute(kOutputTypes, *add_after, &prefetch_node);
} else if (add_after->attr().count(kTOutputTypesAttr) > 0) {
(*(prefetch_node.mutable_attr()))[kOutputTypes] =
add_after->attr().at(kTOutputTypesAttr);
} else {
(*(prefetch_node.mutable_attr()))[kOutputTypes].mutable_list()->add_type(
tensorflow::DataType::DT_STRING);
}
std::string old_input = input_dataset;
std::string new_input =
absl::StrCat(prefetch_node.name(), kDatasetNodeOutputSuffix);
function_utils::ReplaceReferences(old_input, new_input, &fdef);
*fdef.add_node_def() = std::move(prefetch_node);
*fdef.add_node_def() = std::move(buffer_size_node);
return absl::OkStatus();
}
Status AddInterleaveNode(MutableGraphView* graph,
const NodeDef& parallel_interleave_node,
const std::string& interleave_map_func_name,
absl::flat_hash_set<string>& nodes_to_delete) {
NodeDef interleave_node;
interleave_node.set_op(kInterleaveDatasetOpName);
graph_utils::SetUniqueGraphNodeName(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
parallel_interleave_node.name()),
graph->graph(), &interleave_node);
int num_other_args =
parallel_interleave_node.input_size() -
GetNumInputsForParallelInterleaveOp(parallel_interleave_node.op());
int inputs_from_parallel_interleave = 1 + num_other_args +
1 +
1 ;
for (int i = 0; i < inputs_from_parallel_interleave; ++i) {
interleave_node.add_input(parallel_interleave_node.input(i));
}
if (parallel_interleave_node.attr().contains(kTArgumentsAttr)) {
graph_utils::CopyAttribute(kTArgumentsAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kOutputTypesAttr)) {
graph_utils::CopyAttribute(kOutputTypesAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kOutputShapesAttr)) {
graph_utils::CopyAttribute(kOutputShapesAttr, parallel_interleave_node,
&interleave_node);
}
if (parallel_interleave_node.attr().contains(kMetadataAttr)) {
graph_utils::CopyAttribute(kMetadataAttr, parallel_interleave_node,
&interleave_node);
}
const auto& parallel_interleave_fn_attr =
parallel_interleave_node.attr().at(kFunctionAttr);
(*interleave_node.mutable_attr())[kFunctionAttr] =
parallel_interleave_fn_attr;
(*interleave_node.mutable_attr())[kFunctionAttr].mutable_func()->set_name(
interleave_map_func_name);
graph_utils::CopyShapesAndTypesAttrs(parallel_interleave_node,
&interleave_node);
*interleave_node.mutable_experimental_type() =
parallel_interleave_node.experimental_type();
NodeDef* new_node_graph = graph->AddNode(std::move(interleave_node));
TF_RETURN_IF_ERROR(graph->UpdateFanouts(parallel_interleave_node.name(),
new_node_graph->name()));
nodes_to_delete.insert(parallel_interleave_node.name());
return absl::OkStatus();
}
}
Status SeqInterleavePrefetch::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition fld(OpRegistry::Global(), item.graph.library());
for (const NodeDef& node : item.graph.node()) {
if (!RewritePossibleForNode(node, fld)) continue;
const FunctionDef* parallel_interleave_fn =
fld.Find(node.attr().at("f").func().name());
FunctionDef interleave_fn(*parallel_interleave_fn);
interleave_fn.mutable_signature()->set_name(
absl::StrCat(kSeqInterleavePrefetchRewritePrefix,
parallel_interleave_fn->signature().name()));
TF_RETURN_IF_ERROR(AddInterleaveNode(
&graph, node, interleave_fn.signature().name(), nodes_to_delete));
TF_RETURN_IF_ERROR(CreateAndAppendPrefetchNode(&graph, interleave_fn));
TF_RETURN_IF_ERROR(fld.ReplaceFunction(
parallel_interleave_fn->signature().name(), interleave_fn));
stats->num_changes++;
}
*output->mutable_library() = fld.ToProto();
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(SeqInterleavePrefetch, "seq_interleave_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::GDef;
using test::function::NDef;
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kInterleaveDatasetOpName[] = "InterleaveDataset";
constexpr char kParallelInterleaveDatasetOpName[] =
"ParallelInterleaveDatasetV4";
constexpr char kSeqInterleavePrefetchRewritePrefix[] =
"inject/seq_interleave_prefetch_rewrite_";
constexpr char kFdefProtoStr[] =
R"pb(signature {
name: "parallel_interleave_fdef"
input_arg { name: "args_0" type: DT_STRING }
output_arg { name: "identity" type: DT_VARIANT }
is_stateful: true
control_output: "SSTableDataset"
}
node_def {
name: "key_prefix"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "start_key"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "stop_key"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: ""
}
}
}
}
node_def {
name: "SSTableDataset"
op: "SSTableDataset"
input: "args_0"
input: "key_prefix:output:0"
input: "start_key:output:0"
input: "stop_key:output:0"
attr {
key: "metadata"
value { s: "" }
}
attr {
key: "split_size"
value { i: 0 }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_TENSOR
args { type_id: TFT_STRING }
}
}
}
}
node_def {
name: "Identity"
op: "Identity"
input: "SSTableDataset:handle:0"
input: "^NoOp"
attr {
key: "T"
value { type: DT_VARIANT }
}
}
node_def { name: "NoOp" op: "NoOp" input: "^SSTableDataset" }
ret { key: "identity" value: "Identity:output:0" }
attr {
key: "_construction_context"
value { s: "kEagerRuntime" }
}
attr {
key: "_tf_data_function"
value { b: true }
}
control_ret { key: "SSTableDataset" value: "SSTableDataset" }
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value { list { shape {} } }
}
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
})pb";
GraphDef ParallelInterleaveCase(bool deterministic) {
FunctionDef fdef;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef);
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"parallel_interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "parallel_interleave_fdef",
deterministic ? "true" : "false")},
{
fdef,
});
}
GraphDef MultipleParallelInterleaveCase(bool deterministic) {
FunctionDef fdef_1, fdef_2, fdef_3;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_1);
fdef_1.mutable_signature()->set_name("parallel_interleave_fdef_1");
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_2);
fdef_2.mutable_signature()->set_name("parallel_interleave_fdef_2");
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef_3);
fdef_3.mutable_signature()->set_name("parallel_interleave_fdef_3");
auto make_parallel_interleave_node =
[&deterministic](const int node_num, const FunctionDef &fdef) {
return graph_tests_utils::MakeParallelInterleaveV4Node(
absl::StrCat("parallel_interleave_", node_num), "range",
"cycle_length", "block_length", "num_parallel_calls",
fdef.signature().name(), deterministic ? "true" : "false");
};
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
make_parallel_interleave_node(1, fdef_1),
make_parallel_interleave_node(2, fdef_2),
make_parallel_interleave_node(3, fdef_3)},
{
fdef_1,
fdef_2,
fdef_3,
});
}
GraphDef InterleaveCase(bool deterministic) {
FunctionDef fdef;
protobuf::TextFormat::ParseFromString(kFdefProtoStr, &fdef);
return GDef(
{NDef("stop", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"stop"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeInterleaveNode(
"sequential_interleave", "range", "cycle_length", "block_length",
"parallel_interleave_fdef", deterministic ? "true" : "false")},
{
fdef,
});
}
bool PrefetchInFunction(const NodeDef &node,
const FunctionLibraryDefinition &flib) {
auto f_attr_it = node.attr().find("f");
if (f_attr_it == node.attr().end()) return false;
const FunctionDef *func = flib.Find(f_attr_it->second.func().name());
if (func == nullptr) {
return false;
}
for (int i = 0; i < func->node_def_size(); i++) {
NodeDef node_in_func = func->node_def(i);
if (tensorflow::data::MatchesAnyVersion(
kPrefetchDatasetOpName,
node_in_func.op())) {
return true;
}
}
return false;
}
bool IsInterleaveNode(const NodeDef &node) {
return (node.op() == kInterleaveDatasetOpName);
}
}
Status OptimizeWithInjectInterleavePrefetch(const GrapplerItem &item,
GraphDef *output) {
SeqInterleavePrefetch optimizer;
return optimizer.Optimize(nullptr, item, output);
}
class SeqInterleavePrefetchParameterizedTest
: public ::testing::TestWithParam<bool> {};
TEST_P(SeqInterleavePrefetchParameterizedTest,
ParallelInterleaveHasConditionalInjection) {
GrapplerItem item;
bool deterministic = GetParam();
item.graph = ParallelInterleaveCase(deterministic);
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
const std::string ¶llel_interleave_fdef_name = "parallel_interleave_fdef";
const std::string &interleave_fdef_name = absl::StrCat(
kSeqInterleavePrefetchRewritePrefix, parallel_interleave_fdef_name);
if (deterministic) {
EXPECT_TRUE(
!graph_utils::ContainsGraphNodeWithName("parallel_interleave", output));
EXPECT_TRUE(!graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_TRUE(PrefetchInFunction(node, lib_def));
}
const FunctionDef *parallel_interleave_fdef =
lib_def.Find(parallel_interleave_fdef_name);
const FunctionDef *interleave_fdef = lib_def.Find(interleave_fdef_name);
EXPECT_EQ(parallel_interleave_fdef, nullptr);
EXPECT_NE(interleave_fdef, nullptr);
EXPECT_EQ(lib_def.ListFunctionNames().at(0), interleave_fdef_name);
EXPECT_TRUE(function_utils::FindFunctionNodeWithOp(kPrefetchDatasetOpName,
*interleave_fdef));
} else {
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
!graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("parallel_interleave", output));
EXPECT_NE(lib_def.Find(parallel_interleave_fdef_name), nullptr);
}
EXPECT_EQ(lib_def.num_functions(), 1);
}
TEST_P(SeqInterleavePrefetchParameterizedTest,
MultipleParallelInterleavesHaveConditionalInjection) {
GrapplerItem item;
bool deterministic = GetParam();
item.graph = MultipleParallelInterleaveCase(deterministic);
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
if (deterministic) {
EXPECT_TRUE(!graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (int i = 1; i <= 3; ++i) {
EXPECT_TRUE(!graph_utils::ContainsGraphNodeWithName(
absl::StrCat("parallel_interleave_", std::to_string(i)), output));
}
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_TRUE(PrefetchInFunction(node, lib_def));
}
} else {
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(
kParallelInterleaveDatasetOpName, output));
EXPECT_TRUE(
!graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
for (int i = 1; i <= 3; ++i) {
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
absl::StrCat("parallel_interleave_", std::to_string(i)), output));
}
}
}
TEST_P(SeqInterleavePrefetchParameterizedTest,
SequentialInterleaveHasNoInjection) {
GrapplerItem item;
item.graph = InterleaveCase(GetParam());
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectInterleavePrefetch(item, &output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp(kInterleaveDatasetOpName, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("sequential_interleave", output));
FunctionLibraryDefinition lib_def(OpRegistry::Global(), output.library());
for (auto node : output.node()) {
if (!IsInterleaveNode(node)) continue;
EXPECT_FALSE(PrefetchInFunction(node, lib_def));
}
}
INSTANTIATE_TEST_SUITE_P(Determinism, SeqInterleavePrefetchParameterizedTest,
::testing::Values(false, true));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/seq_interleave_prefetch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f6be64f7-9c91-427c-bf2e-79578886c590 | cpp | tensorflow/tensorflow | remove_compression_map | tensorflow/core/grappler/optimizers/data/remove_compression_map.cc | tensorflow/core/grappler/optimizers/data/remove_compression_map_test.cc | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
absl::StatusOr<std::string> GetCompressionFunctionName(const GraphDef& graph) {
for (const auto& function : graph.library().function()) {
for (const auto& node : function.node_def()) {
if (node.op() == "CompressElement") {
return function.signature().name();
}
}
}
return errors::Internal("Compression function not found.");
}
absl::StatusOr<NodeDef> GetCompressionMapNode(const GraphDef& graph) {
TF_ASSIGN_OR_RETURN(std::string compression_function_name,
GetCompressionFunctionName(graph));
for (const auto& node : graph.node()) {
if (node.op() != "ParallelMapDatasetV2") {
continue;
}
if (auto it = node.attr().find("f");
it != node.attr().end() && it->second.has_func() &&
it->second.func().name() == compression_function_name) {
return node;
}
}
return errors::Internal("Compression map node not found.");
}
}
Status RemoveCompressionMap::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_ASSIGN_OR_RETURN(NodeDef compression_map_node,
GetCompressionMapNode(*output));
MutableGraphView graph(output);
for (const auto& compression_map_output :
graph.GetFanout(graph.GetOutputPort(compression_map_node.name(), 0))) {
compression_map_output.node->clear_input();
compression_map_output.node->add_input(compression_map_node.input().Get(0));
++stats->num_changes;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(RemoveCompressionMap, "remove_compression_map");
}
} | #include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::HasSubstr;
TEST(RemoveCompressionMap, Success) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("Const/_4",
"Const",
{},
{{"dtype", DT_INT64},
{"value", -1}}),
graph_tests_utils::MakeParallelMapV2Node(
"ParallelMapDatasetV2/_5",
"RangeDataset/_3",
"Const/_4",
"__inference_Dataset_map_lambda_10",
"default",
false),
NDef("dataset",
"_Retval",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"ParallelMapDatasetV2/_5"},
{{"T", DT_VARIANT}})},
{FunctionDefHelper::Create(
"__inference_Dataset_map_lambda_10",
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"CompressElement"},
"CompressElement",
{"args_0"},
{{"input_types", DT_INT64}}},
{{"Identity"},
"Identity",
{"CompressElement:compressed:0"},
{{"T", DT_VARIANT}}},
},
{})});
RemoveCompressionMap optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("dataset", output);
EXPECT_EQ(output.node(index).input(0), "RangeDataset/_3");
}
TEST(RemoveCompressionMap, FailureNoMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef({NDef("Const/_0",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 0}}),
NDef("Const/_1",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 10}}),
NDef("Const/_2",
"Const",
{},
{{"dtype", DT_INT64},
{"value", 1}}),
NDef("RangeDataset/_3",
"RangeDataset",
{"Const/_0",
"Const/_1",
"Const/_2"},
{}),
NDef("dataset",
"_Retval",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}}),
NDef("Sink",
"Identity",
{"RangeDataset/_3"},
{{"T", DT_VARIANT}})});
RemoveCompressionMap optimizer;
GraphDef output;
ASSERT_THAT(optimizer.Optimize(nullptr, item, &output),
testing::StatusIs(error::INTERNAL,
HasSubstr("Compression function not found.")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/remove_compression_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/remove_compression_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
007ea024-5d57-428a-86ad-a1f23ced3e5b | cpp | tensorflow/tensorflow | make_deterministic | tensorflow/core/grappler/optimizers/data/make_deterministic.cc | tensorflow/core/grappler/optimizers/data/make_deterministic_test.cc | #include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include <algorithm>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/split_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kInterleaveOp[] = "InterleaveDataset";
constexpr char kParallelInterleaveOp[] = "ParallelInterleaveDataset";
constexpr char kLegacyParallelInterleaveOp[] =
"LegacyParallelInterleaveDatasetV2";
constexpr char kMapOp[] = "MapDataset";
constexpr char kParallelMapOp[] = "ParallelMapDataset";
constexpr char kParallelMapOpV2[] = "ParallelMapDatasetV2";
constexpr char kMapAndBatchOp[] = "MapAndBatchDataset";
constexpr char kBatchOp[] = "BatchDataset";
constexpr char kBatchV2Op[] = "BatchDatasetV2";
constexpr char kParallelBatchOp[] = "ParallelBatchDataset";
constexpr char kPrefetchOp[] = "PrefetchDataset";
constexpr std::array<const char*, 9> kDeterministicStatefulOps = {
"TextLineDataset", "FixedLengthRecordDataset", "TFRecordDataset",
"TensorSliceDataset", "RangeDataset", "SSTableDataset", "RecordIODataset",
"Print", "Assert"};
constexpr std::array<const char*, 13> kDeterministicStatefulOpsWhenAsync = {
"RandomUniform",
"RandomUniformInt",
"RandomStandardNormal",
"ParameterizedTruncatedNormal",
"TruncatedNormal",
"RandomShuffle",
"Multinomial",
"RandomGamma",
"RandomGammaGrad",
"RandomPoisson",
"RandomCrop",
"SampleDistortedBoundingBox",
"SampleDistortedBoundingBoxV2"};
bool IsDeterministicWhenRunInParallel(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsDeterministicWhenRunAsynchronously(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
for (auto op_in_array : kDeterministicStatefulOpsWhenAsync) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsParallelInterleave(const std::string& op) {
return data::MatchesAnyVersion(kParallelInterleaveOp, op) ||
op == kLegacyParallelInterleaveOp;
}
bool IsParallelMap(const std::string& op) {
return data::MatchesAnyVersion(kParallelMapOp, op);
}
bool IsParallelBatch(const std::string& op) {
return data::MatchesAnyVersion(kParallelBatchOp, op);
}
bool IsMapAndBatch(const std::string& op) {
return data::MatchesAnyVersion(kMapAndBatchOp, op);
}
bool IsPrefetch(const std::string& op) {
return data::MatchesAnyVersion(kPrefetchOp, op);
}
bool IntroducesFunctionParallelism(const std::string& op) {
return IsParallelInterleave(op) || IsParallelMap(op) || IsMapAndBatch(op);
}
bool IntroducesAsynchrony(const std::string& op) {
return IntroducesFunctionParallelism(op) || IsPrefetch(op) ||
IsParallelBatch(op);
}
absl::flat_hash_map<absl::string_view, const NodeDef*> NameToNode(
const FunctionDef& function) {
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node;
for (const NodeDef& node : function.node_def()) {
name_to_node.insert({node.name(), &node});
}
return name_to_node;
}
NodeDef* GetMutableNode(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
return graph->graph()->mutable_node(index);
}
Status ConvertMapOrInterleave(const string& node_name,
MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
auto Targuments = node->attr().find("Targuments");
if (Targuments == node->attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
int num_inputs_after_rewrite;
if (IsParallelInterleave(node->op())) {
node->set_op(kInterleaveOp);
num_inputs_after_rewrite = 3 + Targuments->second.list().type_size();
} else {
DCHECK(IsParallelMap(node->op()));
node->set_op(kMapOp);
num_inputs_after_rewrite = 1 + Targuments->second.list().type_size();
}
int inputs_processed = 0;
for (int i = 0; i < node->input_size(); i++) {
std::string input = node->input(i);
if (IsControlInput(input)) {
continue;
}
if (inputs_processed >= num_inputs_after_rewrite) {
node->set_input(i, absl::StrCat("^", input));
}
inputs_processed++;
}
if (inputs_processed < num_inputs_after_rewrite) {
return errors::Internal("Found only ", inputs_processed, " inputs to node ",
node_name, ", but expected to find at least ",
num_inputs_after_rewrite);
}
node->mutable_attr()->erase("deterministic");
node->mutable_attr()->erase("sloppy");
return absl::OkStatus();
}
absl::flat_hash_set<absl::string_view> GetAllTransitiveDependencies(
const FunctionDef& function_def,
const absl::flat_hash_set<absl::string_view>& nodes) {
std::vector<absl::string_view> nodes_to_process;
std::copy(nodes.begin(), nodes.end(), std::back_inserter(nodes_to_process));
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node =
NameToNode(function_def);
absl::flat_hash_set<absl::string_view> dependencies;
while (!nodes_to_process.empty()) {
absl::string_view node_name = nodes_to_process.back();
nodes_to_process.pop_back();
if (dependencies.contains(node_name)) {
continue;
}
dependencies.insert(node_name);
auto iter = name_to_node.find(node_name);
if (iter == name_to_node.end()) {
continue;
}
for (absl::string_view inp : iter->second->input()) {
absl::string_view inp_node = inp.substr(0, inp.find(':'));
if (inp_node.at(0) == '^') {
inp_node = inp_node.substr(1);
}
if (name_to_node.contains(inp_node)) {
nodes_to_process.push_back(inp_node);
}
}
}
return dependencies;
}
Status SplitMap(
const FunctionLibraryDefinition& library, const string& map_node_name,
MutableGraphView* graph,
const absl::flat_hash_set<absl::string_view>& nondeterministic_nodes) {
NodeDef* map_node = GetMutableNode(map_node_name, graph);
NameAttrList func = map_node->attr().at("f").func();
const FunctionDef* function_def = library.Find(func.name());
if (!function_def) {
return errors::Internal("Could not look up function ", func.name(),
" in FunctionLibraryDefinition");
}
absl::flat_hash_set<absl::string_view> nodes_to_move =
GetAllTransitiveDependencies(*function_def, nondeterministic_nodes);
VLOG(2) << "Will move nodes to nonparallel function: "
<< absl::StrJoin(nodes_to_move, ", ");
int64_t num_captured_arguments =
map_node->attr().find("Targuments")->second.list().type_size();
TF_ASSIGN_OR_RETURN(
split_utils::SplitResults split_results,
split_utils::SplitFunction(*function_def, nodes_to_move,
num_captured_arguments, library));
if (split_results.first_function_output_types.empty()) {
return errors::Unimplemented(
"The case where the first function has no outputs is unimplemented.");
}
bool is_map_and_batch = map_node->op() == kMapAndBatchOp;
NodeDef* first_map_node_ptr;
{
NodeDef first_map_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_sequential_map/", map_node->name()),
graph->graph(), &first_map_node);
first_map_node.set_op(kMapOp);
int num_control_deps = NumControlInputs(*map_node);
int num_extra_inputs = is_map_and_batch ? 3 : 1;
int control_deps_index = map_node->input_size() - num_control_deps;
int extra_inputs_index = control_deps_index - num_extra_inputs;
for (int i = 0; i < extra_inputs_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
for (int i = extra_inputs_index; i < control_deps_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(absl::StrCat("^", map_node->input(i)));
}
for (int i = control_deps_index; i < map_node->input_size(); i++) {
DCHECK(IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*first_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.first_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &first_map_node);
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &first_map_node);
}
}
AddNodeAttr("output_types", split_results.first_function_output_types,
&first_map_node);
TensorShapeProto unknown_shape;
unknown_shape.set_unknown_rank(true);
std::vector<TensorShapeProto> output_shapes(
split_results.first_function_output_types.size(), unknown_shape);
AddNodeAttr("output_shapes", output_shapes, &first_map_node);
first_map_node_ptr = graph->AddNode(std::move(first_map_node));
}
NodeDef* second_map_node_ptr;
{
NodeDef second_map_node;
string node_name =
map_node->op() == kMapAndBatchOp ? "map_and_batch" : "parallel_map";
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_parallel_", node_name, "/",
map_node->name()),
graph->graph(), &second_map_node);
second_map_node.set_op(map_node->op());
second_map_node.add_input(first_map_node_ptr->name());
for (int i = 1; i < map_node->input_size(); i++) {
second_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*second_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.second_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_types", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_shapes", *map_node, &second_map_node);
if (!is_map_and_batch) {
AddNodeAttr("deterministic", "true", &second_map_node);
}
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &second_map_node);
}
}
second_map_node_ptr = graph->AddNode(std::move(second_map_node));
}
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(map_node->name(), second_map_node_ptr->name()));
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.first_function;
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.second_function;
return absl::OkStatus();
}
Status ConvertBatch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
node->set_op(kBatchV2Op);
std::string num_parallel_calls_input = node->input(2);
node->set_input(2, node->input(3));
node->set_input(3, absl::StrCat("^", num_parallel_calls_input));
node->mutable_attr()->erase("deterministic");
return absl::OkStatus();
}
Status ConvertMapAndBatch(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
const NodeDef& orig_node = graph->graph()->node(index);
auto Targuments = orig_node.attr().find("Targuments");
if (Targuments == orig_node.attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
NodeDef new_map_node;
new_map_node.set_op(kMapOp);
graph_utils::SetUniqueGraphNodeName(kMapOp, graph->graph(), &new_map_node);
int num_map_inputs = 1 + Targuments->second.list().type_size();
for (int i = 0; i < num_map_inputs; i++) {
new_map_node.add_input(orig_node.input(i));
}
for (int i = num_map_inputs; i < orig_node.input_size(); i++) {
if (IsControlInput(orig_node.input(i))) {
new_map_node.add_input(orig_node.input(i));
} else {
new_map_node.add_input(absl::StrCat("^", orig_node.input(i)));
}
}
for (auto key : {"f", "Targuments", "output_types"}) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(new_map_node.attr(), key)) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
}
auto orig_output_shapes = orig_node.attr().find("output_shapes");
if (orig_output_shapes == orig_node.attr().end()) {
return errors::Internal("Failed to find output_shapes attribute for node ",
node_name);
}
AttrValue& map_output_shapes =
(*new_map_node.mutable_attr())["output_shapes"];
for (const TensorShapeProto& orig_shape :
orig_output_shapes->second.list().shape()) {
TensorShapeProto* new_shape = map_output_shapes.mutable_list()->add_shape();
if (orig_shape.unknown_rank()) {
new_shape->set_unknown_rank(true);
} else if (orig_shape.dim_size() == 0) {
return errors::Internal(
"Output shape of MapAndBatch node cannot be scalar");
} else {
for (int i = 1; i < orig_shape.dim_size(); i++) {
*new_shape->add_dim() = orig_shape.dim(i);
}
}
}
NodeDef new_batch_node;
new_batch_node.set_op(kBatchV2Op);
graph_utils::SetUniqueGraphNodeName(kBatchOp, graph->graph(),
&new_batch_node);
new_batch_node.add_input(new_map_node.name());
new_batch_node.add_input(orig_node.input(num_map_inputs));
new_batch_node.add_input(
orig_node.input(num_map_inputs + 2));
graph_utils::CopyShapesAndTypesAttrs(orig_node, &new_batch_node);
graph->AddNode(std::move(new_map_node));
NodeDef* graph_batch_node = graph->AddNode(std::move(new_batch_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(orig_node.name(), graph_batch_node->name()));
return absl::OkStatus();
}
Status ConvertPrefetch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
constexpr int buffer_size_index = 1;
node->add_input(absl::StrCat("^", node->input(buffer_size_index)));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(0, graph);
node->set_input(buffer_size_index, tmp->name());
return absl::OkStatus();
}
enum class NondeterminismType { PARALLELISM, ASYNCHRONY };
bool IsDeterministicStatefulOp(NondeterminismType type,
const std::string& stateful_op) {
return type == NondeterminismType::PARALLELISM
? IsDeterministicWhenRunInParallel(stateful_op)
: IsDeterministicWhenRunAsynchronously(stateful_op);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed);
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed,
absl::flat_hash_set<absl::string_view>* nondeterministic_nodes) {
if (functions_processed->contains(function_name)) {
return false;
}
functions_processed->insert(function_name);
const FunctionDef* function_def = library.Find(function_name);
if (!function_def) {
VLOG(2) << "Could not look up function " << function_name
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool found = false;
for (const NodeDef& node_def : function_def->node_def()) {
bool nondeterministic = FunctionNodeMayIntroduceNondeterminism(
library, node_def, nondeterminism_type, functions_processed);
if (nondeterministic) {
if (nondeterministic_nodes) {
nondeterministic_nodes->insert(node_def.name());
found = true;
} else {
return true;
}
}
}
return found;
}
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type) {
absl::flat_hash_set<string> functions_processed;
return FunctionMayIntroduceNondeterminism(library, function_name,
nondeterminism_type,
&functions_processed, nullptr);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = library.LookUp(node_def.op(), &op_reg_data);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool is_function_op = op_reg_data->is_function_op;
bool is_stateful = false;
if (!is_function_op) {
const OpDef* op_def;
s = OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in OpRegistry, so rewriting op to be safe";
return true;
}
is_stateful = op_def->is_stateful();
}
if (is_stateful && !IsStatefulPartitionedCall((node_def)) &&
!IsIf(node_def) && !IsWhile(node_def) &&
!IsDeterministicStatefulOp(nondeterminism_type, node_def.op())) {
VLOG(2) << "Will rewrite due to op: " << node_def.op();
return true;
}
std::vector<std::string> attr_func_names;
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
if (is_function_op) {
attr_func_names.push_back(node_def.op());
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
nondeterminism_type,
functions_processed, nullptr)) {
return true;
}
}
return false;
}
bool NodeMayIntroduceNondeterminismWhenAsync(
const FunctionLibraryDefinition& library, const NodeDef& node) {
const OpDef* op_def;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (s.code() == error::NOT_FOUND) {
return false;
} else if (!s.ok()) {
return true;
}
if (data::DatasetOpKernel::IsDatasetOp(*op_def)) {
std::vector<std::string> attr_func_names;
for (const auto& attr : node.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
NondeterminismType::ASYNCHRONY)) {
return true;
}
}
}
return false;
}
bool GraphMayHaveAsyncNondeterminism(const FunctionLibraryDefinition& library,
const GraphDef& graph) {
for (const NodeDef& node : graph.node()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
for (const string& function_name : library.ListFunctionNames()) {
const FunctionDef* function_def = library.Find(function_name);
CHECK(function_def);
for (const NodeDef& node : function_def->node_def()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
}
return false;
}
}
Status MakeDeterministic::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
absl::flat_hash_set<string> nodes_to_delete;
bool remove_async_nodes =
GraphMayHaveAsyncNondeterminism(function_library, item.graph);
for (const NodeDef& node : item.graph.node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["sloppy"].set_b(false);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["deterministic"].set_s("true");
stats->num_changes++;
}
bool rewrite_due_to_async =
IntroducesAsynchrony(node.op()) && remove_async_nodes;
absl::flat_hash_set<std::string> functions_processed;
absl::flat_hash_set<absl::string_view> nondeterministic_nodes;
bool rewrite_due_to_parallelism =
IntroducesFunctionParallelism(node.op()) &&
FunctionMayIntroduceNondeterminism(
function_library, node.attr().at("f").func().name(),
NondeterminismType::PARALLELISM, &functions_processed,
&nondeterministic_nodes);
if (!rewrite_due_to_async && !rewrite_due_to_parallelism) {
continue;
}
VLOG(1) << "Rewriting node " << node.name() << " (" << node.op()
<< ") because it introduces nondeterminism through "
<< (rewrite_due_to_async ? "asynchrony" : "parallelism");
bool maybe_can_split =
!rewrite_due_to_async &&
(node.op() == kParallelMapOpV2 || IsMapAndBatch(node.op()));
if (maybe_can_split) {
Status s = SplitMap(function_library, node.name(), &graph,
nondeterministic_nodes);
if (s.ok()) {
VLOG(1) << "Split node " << node.name() << " (" << node.op()
<< ") into two map nodes: a nonparallel version and a "
"parallel version.";
nodes_to_delete.insert(node.name());
continue;
} else if (s.code() == error::UNIMPLEMENTED) {
VLOG(1) << "Could not move stateful ops to their own function, so will "
"convert node "
<< node.name()
<< " to a nonparallel version instead. Reason: " << s;
} else {
return s;
}
}
if (IsPrefetch(node.op())) {
TF_RETURN_IF_ERROR(ConvertPrefetch(node.name(), &graph));
} else if (IsMapAndBatch(node.op())) {
TF_RETURN_IF_ERROR(ConvertMapAndBatch(node.name(), &graph));
nodes_to_delete.insert(node.name());
} else if (IsParallelBatch(node.op())) {
TF_RETURN_IF_ERROR(ConvertBatch(node.name(), &graph));
} else {
DCHECK(IsParallelInterleave(node.op()) || IsParallelMap(node.op()));
TF_RETURN_IF_ERROR(ConvertMapOrInterleave(node.name(), &graph));
}
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MakeDeterministic, "make_deterministic");
}
} | #include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<string> GetNodeNames(const FunctionDef& func) {
std::vector<string> node_names;
for (const NodeDef& node : func.node_def()) {
node_names.push_back(node.name());
}
return node_names;
}
class SplitMapTest : public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(SplitMapTest, SplitMapFunction) {
using test::function::NDef;
GrapplerItem item;
bool deterministic, rewrite_map_and_batch;
std::tie(deterministic, rewrite_map_and_batch) = GetParam();
if (deterministic && rewrite_map_and_batch) {
LOG(INFO) << "Skipping test because MapAndBatch does not have "
"'deterministic' attribute";
return;
}
FunctionDef orig_func_def = FunctionDefHelper::Create(
"MyFunction",
{"a1: float", "a2: float", "a3: double"},
{"o1: float", "o2: double"},
{},
{
{{"i1"}, "Identity", {"a2"}, {{"T", DT_FLOAT}}},
{{"i2"}, "Identity", {"i1:output"}, {{"T", DT_FLOAT}}},
{{"stateful"},
"SampleDistortedBoundingBox",
{"a1", "i2:output"},
{{"T", DT_FLOAT}}},
{{"i3"}, "Identity", {"stateful:bboxes:0"}, {{"T", DT_FLOAT}}},
{{"i4"}, "Identity", {"a3"}, {{"T", DT_DOUBLE}}},
},
{{"o1", "i3:output"}, {"o2", "i4:output"}});
NodeDef orig_map_node_def;
if (rewrite_map_and_batch) {
orig_map_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map", "range", "batch_size", "num_parallel_calls", "drop_remainder",
"MyFunction");
} else {
orig_map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "MyFunction",
deterministic ? "true" : "false", false);
}
orig_map_node_def.add_input("^start");
AttrValue* attr_val = &(*orig_map_node_def.mutable_attr())["Targuments"];
SetAttrValue(std::vector<DataType>{DT_DOUBLE}, attr_val);
(*orig_map_node_def.mutable_attr())["preserve_cardinality"].set_b(true);
attr_val = &(*orig_map_node_def.mutable_attr())["output_types"];
SetAttrValue(std::vector<DataType>{DT_FLOAT, DT_DOUBLE}, attr_val);
attr_val = &(*orig_map_node_def.mutable_attr())["output_shapes"];
SetAttrValue(std::vector<TensorShape>{{1}, {1}}, attr_val);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
orig_map_node_def},
{orig_func_def});
MakeDeterministic optimizer;
GraphDef output;
VLOG(1) << "GraphDef before optimization:\n"
<< item.graph.DebugString() << "\n\n";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
VLOG(1) << "GraphDef after optimization:\n" << output.DebugString() << "\n\n";
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef first_map_node_def = output.node(index);
if (rewrite_map_and_batch) {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^batch_size", "^num_parallel_calls",
"^drop_remainder", "^start"));
} else {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^num_parallel_calls", "^start"));
}
std::vector<DataType> t_arguments;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
std::vector<DataType> output_types;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT));
std::vector<TensorShapeProto> output_shapes;
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_TRUE(shape.unknown_rank());
}
bool preserve_cardinality;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef first_func = output.library().function(index);
ASSERT_TRUE(first_func.signature().is_stateful());
ASSERT_THAT(GetNodeNames(first_func),
::testing::UnorderedElementsAre("i1", "i2", "stateful"));
NodeDef second_map_node_def;
if (rewrite_map_and_batch) {
index = graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(), "batch_size",
"num_parallel_calls", "drop_remainder",
"^start"));
} else {
index = graph_utils::FindGraphNodeWithOp("ParallelMapDatasetV2", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(),
"num_parallel_calls", "^start"));
ASSERT_EQ(second_map_node_def.attr().at("deterministic").s(), "true");
}
t_arguments.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
output_types.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT, DT_DOUBLE));
output_shapes.clear();
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_EQ(shape.dim_size(), 0);
}
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef second_func = output.library().function(index);
ASSERT_THAT(GetNodeNames(second_func),
::testing::UnorderedElementsAre("i3", "i4"));
}
INSTANTIATE_TEST_SUITE_P(Test, SplitMapTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
FunctionDef OuterXTimesTwo() {
return FunctionDefHelper::Define(
"OuterXTimesTwo",
{"x: float"},
{"y: float"},
{},
{{{"y"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterRandomUniform() {
return FunctionDefHelper::Define(
"OuterRandomUniform",
{"x: float"},
{"random_uniform: int64"},
{},
{{{"random_uniform"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_INT64}},
{"f", FunctionDefHelper::FunctionRef("RandomUniformFn",
{{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterReadResourceVariable() {
return FunctionDefHelper::Define(
"OuterReadResourceVariable",
{"x: resource"},
{"y: float"},
{},
{{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("ReadResourceVariable", {})}}}});
}
class MakeDeterministicTest
: public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(MakeDeterministicTest, NoRewriteInterleave) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic)},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelInterleaveDatasetV2");
ASSERT_EQ(node_def.attr().at("sloppy").b(), false);
}
TEST_P(MakeDeterministicTest, NoRewriteMap) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false)},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelMapDatasetV2");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelBatchDataset");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 2);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(1), "buffer_size");
NodeDef buffer_size =
output.node(graph_utils::FindGraphNodeWithName("buffer_size", output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 1);
}
TEST_P(MakeDeterministicTest, RewriteInterleave) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
NodeDef interleave_node_def = graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic);
interleave_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
interleave_node_def},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("InterleaveDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "cycle_length");
ASSERT_EQ(node_def.input(2), "block_length");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
}
enum CannotSplitReason { FUNC_HAS_ATTR, ASYNC_NONDETERMINISM };
class RewriteMapWithoutSplitTest
: public ::testing::TestWithParam<
std::tuple<bool, bool, CannotSplitReason>> {};
TEST_P(RewriteMapWithoutSplitTest, RewriteMapWithoutSplit) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
CannotSplitReason reason;
std::tie(nest, deterministic, reason) = GetParam();
FunctionDef func;
FunctionDef outer_func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
outer_func = OuterRandomUniform();
(*outer_func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
outer_func = OuterReadResourceVariable();
}
std::string func_name =
nest ? outer_func.signature().name() : func.signature().name();
NodeDef map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false", false);
map_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
map_node_def},
{func, outer_func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "^num_parallel_calls");
ASSERT_EQ(node_def.input(2), "^start");
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(node_def, "f", &f));
ASSERT_EQ(f.name(), func_name);
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
}
TEST_P(MakeDeterministicTest, RewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
NodeDef batch_node_def = graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false");
batch_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
batch_node_def},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "map");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "drop_remainder");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
ASSERT_EQ(node_def.attr().count("deterministic"), 0);
}
TEST_P(MakeDeterministicTest, RewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(2), "^buffer_size");
NodeDef buffer_size = output.node(
graph_utils::FindGraphNodeWithName(node_def.input(1), output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, MakeDeterministicTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapWithoutSplitTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
TEST(NoRewriteMapAndBatchTest, NoRewriteMapAndBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{test::function::XTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map_and_batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 4);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "num_parallel_calls");
ASSERT_EQ(node_def.input(3), "drop_remainder");
}
class RewriteMapAndBatchWithoutSplitTest
: public ::testing::TestWithParam<std::tuple<bool, CannotSplitReason>> {};
TEST_P(RewriteMapAndBatchWithoutSplitTest, RewriteMapAndBatchWithoutSplit) {
using test::function::NDef;
GrapplerItem item;
bool nest;
CannotSplitReason reason;
std::tie(nest, reason) = GetParam();
FunctionDef func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
}
NodeDef map_and_batch_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", func.signature().name());
SetAttrValue(
absl::Span<const PartialTensorShape>{
{2}, {-1, 3, -1}, PartialTensorShape()},
&(*map_and_batch_node_def.mutable_attr())["output_shapes"]);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
map_and_batch_node_def},
{func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef map_node_def = output.node(index);
ASSERT_EQ(map_node_def.input_size(), 4);
ASSERT_EQ(map_node_def.input(0), "range");
ASSERT_EQ(map_node_def.input(1), "^batch_size");
ASSERT_EQ(map_node_def.input(2), "^num_parallel_calls");
ASSERT_EQ(map_node_def.input(3), "^drop_remainder");
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("f"),
map_node_def.attr().at("f")));
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("Targuments"),
map_node_def.attr().at("Targuments")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
map_node_def.attr().at("output_types")));
ASSERT_EQ(map_node_def.attr().at("output_shapes").list().shape_size(), 3);
ASSERT_TRUE(PartialTensorShape({}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(0)));
ASSERT_TRUE(PartialTensorShape({3, -1}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(1)));
ASSERT_TRUE(PartialTensorShape().IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(2)));
index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef batch_node_def = output.node(index);
ASSERT_EQ(batch_node_def.input_size(), 3);
ASSERT_EQ(batch_node_def.input(0), map_node_def.name());
ASSERT_EQ(batch_node_def.input(1), "batch_size");
ASSERT_EQ(batch_node_def.input(2), "drop_remainder");
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
batch_node_def.attr().at("output_types")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_shapes"),
batch_node_def.attr().at("output_shapes")));
}
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapAndBatchWithoutSplitTest,
::testing::Combine(::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_deterministic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_deterministic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
477f035e-6214-4586-947e-0ba30d6c6af9 | cpp | tensorflow/tensorflow | replicate_on_split | tensorflow/core/grappler/optimizers/data/replicate_on_split.cc | tensorflow/core/grappler/optimizers/data/replicate_on_split_test.cc | #include "tensorflow/core/grappler/optimizers/data/replicate_on_split.h"
#include "absl/log/log.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
Status ReplicateOnSplit::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
VLOG(1) << "Running replicate on split optimization";
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (graph_utils::HasReplicateOnSplitAttr(node.op())) {
(*node.mutable_attr())["replicate_on_split"].set_b(true);
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ReplicateOnSplit, "replicate_on_split");
}
} | #include "tensorflow/core/grappler/optimizers/data/replicate_on_split.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(ReplicateOnSplit, TensorSliceDataset) {
using test::function::NDef;
GrapplerItem item;
Tensor tensor = test::AsTensor<int32>({32, 32});
item.graph = test::function::GDef(
{NDef("tensor", "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeTensorSliceNode("tensor_slice_dataset", "tensor",
false)});
ReplicateOnSplit optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName("tensor_slice_dataset", output));
int index =
graph_utils::FindGraphNodeWithName("tensor_slice_dataset", output);
EXPECT_TRUE(output.node(index).attr().at("replicate_on_split").b());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/replicate_on_split.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/replicate_on_split_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33c87f73-931a-4cca-94c8-eb111cc28f6c | cpp | tensorflow/tensorflow | noop_elimination | tensorflow/core/grappler/optimizers/data/noop_elimination.cc | tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc | #include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kIdentity[] = "Identity";
bool IsTakeAll(const NodeDef& take_node, const MutableGraphView& graph) {
if (take_node.op() != "TakeDataset") return false;
const auto& count_node = *graph.GetNode(take_node.input(1));
if (count_node.op() != "Const") return false;
const auto& tensor = count_node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) < 0;
return false;
}
bool IsConstNodeWithValue(const NodeDef& node, int value) {
if (node.op() != "Const") return false;
const auto& tensor = node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) == value;
return value == 0;
}
bool IsSkipNone(const NodeDef& skip_node, const MutableGraphView& graph) {
if (skip_node.op() != "SkipDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(skip_node.input(1)), 0);
}
bool IsRepeatOne(const NodeDef& repeat_node, const MutableGraphView& graph) {
if (repeat_node.op() != "RepeatDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(repeat_node.input(1)), 1);
}
bool IsShardOne(const NodeDef& shard_node, const MutableGraphView& graph) {
if (shard_node.op() != "ShardDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(shard_node.input(1)), 1);
}
bool IsOutputIdentityOfInput(const FunctionDef& fdef, const string& output_arg,
const string& input_arg) {
if (!fdef.ret().contains(output_arg)) {
LOG(WARNING)
<< "Malformed FunctionDef: ret dict does not contain output arg key.";
return false;
}
const auto& ret_val = fdef.ret().at(output_arg);
auto input = function_utils::FunctionDefTensorDesc(ret_val);
while (function_utils::ContainsFunctionNodeWithName(input.node_name, fdef)) {
int idx = function_utils::FindFunctionNodeWithName(input.node_name, fdef);
const NodeDef& node = fdef.node_def(idx);
if (node.op() != kIdentity) {
return false;
}
input = function_utils::FunctionDefTensorDesc(node.input(0));
}
return input.node_name == input_arg;
}
bool IsMapIdentity(const NodeDef& map_node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
if (map_node.op() != "MapDataset" && map_node.op() != "ParallelMapDataset" &&
map_node.op() != "ParallelMapDatasetV2") {
return false;
}
if (map_node.attr().at("Targuments").list().type_size() != 0) return false;
const FunctionDef* fdef =
function_library.Find(map_node.attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *fdef)) {
return false;
}
const auto& sig = fdef->signature();
if (sig.input_arg_size() != sig.output_arg_size()) return false;
for (int i = 0; i < sig.input_arg_size(); ++i) {
if (!IsOutputIdentityOfInput(*fdef, sig.output_arg(i).name(),
sig.input_arg(i).name())) {
return false;
}
}
return true;
}
bool IsNoOp(const NodeDef& node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
return IsTakeAll(node, graph) || IsSkipNone(node, graph) ||
IsRepeatOne(node, graph) || IsShardOne(node, graph) ||
IsMapIdentity(node, graph, function_library);
}
}
Status NoOpElimination::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.graph()->library());
for (const NodeDef& node : item.graph.node()) {
if (!IsNoOp(node, graph, function_library)) continue;
NodeDef* const parent = graph_utils::GetInputNode(node, graph);
TF_RETURN_IF_ERROR(graph.UpdateFanouts(node.name(), parent->name()));
nodes_to_delete.insert(node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(NoOpElimination, "noop_elimination");
}
} | #include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include <tuple>
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<std::pair<string, AttrValue>> GetCommonAttributes() {
AttrValue shapes_attr, types_attr;
SetAttrValue("output_shapes", &shapes_attr);
SetAttrValue("output_types", &types_attr);
std::vector<std::pair<string, AttrValue>> commonAttributes = {
{"output_shapes", shapes_attr}, {"output_types", types_attr}};
return commonAttributes;
}
NodeDef *MakeNode(StringPiece node_type, std::vector<int> params,
string input_node, MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (int param : params) {
node_params.push_back(
graph_utils::AddScalarConstNode<int64_t>(param, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeNonConstNode(StringPiece node_type,
std::vector<DataType> param_dtypes, string input_node,
MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (DataType dtype : param_dtypes) {
node_params.push_back(graph_utils::AddScalarPlaceholder(dtype, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeCacheNode(string input_node, MutableGraphView *graph) {
NodeDef *node_filename =
graph_utils::AddScalarConstNode<StringPiece>("", graph);
return graph_utils::AddNode("", "CacheDataset",
{std::move(input_node), node_filename->name()},
GetCommonAttributes(), graph);
}
NodeDef *MakeRangeNode(MutableGraphView *graph) {
auto *start_node = graph_utils::AddScalarConstNode<int64_t>(0, graph);
auto *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, graph);
auto *step_node = graph_utils::AddScalarConstNode<int64_t>(1, graph);
std::vector<string> range_inputs = {start_node->name(), stop_node->name(),
step_node->name()};
return graph_utils::AddNode("", "RangeDataset", range_inputs,
GetCommonAttributes(), graph);
}
struct NoOpLastEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpLastEliminationTest, EliminateLastNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpLastEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
struct NoOpMiddleEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpMiddleEliminationTest, EliminateMiddleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NodeDef *cache_node = MakeCacheNode(node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
auto last_node_input = (should_keep_node ? node : range_node)->name();
EXPECT_EQ(cache_node_out.input(0), last_node_input);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMiddleEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
using NodesTypes = std::tuple<std::pair<string, std::vector<int>>,
std::pair<string, std::vector<int>>>;
struct NoOpMultipleEliminationTest : ::testing::TestWithParam<NodesTypes> {};
TEST_P(NoOpMultipleEliminationTest, EliminateMultipleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<int>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *previous = range_node;
std::vector<string> nodes_to_remove;
nodes_to_remove.reserve(noop_nodes.size());
for (const auto &noop_node : noop_nodes) {
NodeDef *node =
MakeNode(noop_node.first, noop_node.second, previous->name(), &graph);
nodes_to_remove.push_back(node->name());
previous = node;
}
NodeDef *cache_node = MakeCacheNode(previous->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_remove)
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
EXPECT_EQ(cache_node_out.input(0), range_node->name());
}
const auto *const kTakeNode =
new std::pair<string, std::vector<int>>{"TakeDataset", {-1}};
const auto *const kSkipNode =
new std::pair<string, std::vector<int>>{"SkipDataset", {0}};
const auto *const kRepeatNode =
new std::pair<string, std::vector<int>>{"RepeatDataset", {1}};
const auto *const kShardNode =
new std::pair<string, std::vector<int>>{"ShardDataset", {1, 0}};
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMultipleEliminationTest,
::testing::Combine(
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode),
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode)));
struct NoOpPlaceholdersTest
: ::testing::TestWithParam<
std::tuple<std::pair<string, std::vector<DataType>>,
std::pair<string, std::vector<DataType>>>> {};
TEST_P(NoOpPlaceholdersTest, NonConstNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<DataType>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
std::vector<string> nodes_to_keep;
nodes_to_keep.reserve(noop_nodes.size());
NodeDef *previous = range_node;
for (const auto &noop_node : noop_nodes) {
NodeDef *node = MakeNonConstNode(noop_node.first, noop_node.second,
previous->name(), &graph);
nodes_to_keep.push_back(node->name());
previous = node;
}
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_keep)
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
}
const auto *const kNonConstTakeNode =
new std::pair<string, std::vector<DataType>>{"TakeDataset", {DT_INT32}};
const auto *const kNonConstSkipNode =
new std::pair<string, std::vector<DataType>>{"SkipDataset", {DT_INT32}};
const auto *const kNonConstRepeatNode =
new std::pair<string, std::vector<DataType>>{"RepeatDataset", {DT_INT32}};
const auto *const kNonConstShardNode =
new std::pair<string, std::vector<DataType>>{"ShardDataset",
{DT_INT32, DT_INT32}};
INSTANTIATE_TEST_CASE_P(
DoNotRemovePlaceholders, NoOpPlaceholdersTest,
::testing::Combine(::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode),
::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/noop_elimination.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/noop_elimination_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8b78d62-c279-4dac-875d-cb2ea61040db | cpp | tensorflow/tensorflow | inject_io_prefetch | tensorflow/core/grappler/optimizers/data/inject_io_prefetch.cc | tensorflow/core/grappler/optimizers/data/inject_io_prefetch_test.cc | #include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h"
#include <array>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAutotune[] = "autotune";
constexpr char kFunctionAttrKey[] = "f";
constexpr char kParallelInterleave[] = "ParallelInterleaveDataset";
constexpr char kParallelMap[] = "ParallelMapDataset";
constexpr char kPrefetch[] = "PrefetchDataset";
constexpr std::array<const char*, 5> kAsync = {
"MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset",
"ParallelMapDataset", "PrefetchDataset"};
constexpr std::array<const char*, 6> kIo = {
"ArrayRecordDataset", "FixedLengthRecordDataset", "RecordIODataset",
"SSTableDataset", "TextLineDataset", "TFRecordDataset"};
bool IsAsync(const NodeDef* node) {
if (!node) {
return false;
}
return absl::c_any_of(kAsync, [&](const char* dataset) {
return data::MatchesAnyVersion(dataset, node->op());
});
}
bool IsIo(const NodeDef* node) {
if (!node) {
return false;
}
return absl::c_any_of(kIo, [&](const char* dataset) {
return data::MatchesAnyVersion(dataset, node->op());
});
}
bool IsIo(const FunctionDef& function) {
for (const auto& node : function.node_def()) {
if (IsIo(&node)) {
return true;
}
}
return false;
}
bool IsIoFunction(const std::string& function_name,
const MutableGraphView& graph) {
for (const auto& function : graph.graph()->library().function()) {
if (function.signature().name() == function_name) {
return IsIo(function);
}
}
return false;
}
bool HasIoFunction(const NodeDef* node, const MutableGraphView& graph) {
if (auto it = node->attr().find(kFunctionAttrKey); it != node->attr().end()) {
return IsIoFunction(it->second.func().name(), graph);
}
return false;
}
bool IsParallelInterleaveWithIo(const NodeDef* node,
const MutableGraphView& graph) {
if (!node || !data::MatchesAnyVersion(kParallelInterleave, node->op())) {
return false;
}
return HasIoFunction(node, graph);
}
bool IsParallelMap(const NodeDef* node) {
if (!node) {
return false;
}
return data::MatchesAnyVersion(kParallelMap, node->op());
}
bool IsPrefetch(const NodeDef* node) {
if (!node) {
return false;
}
return node->op() == kPrefetch;
}
struct Edge {
NodeDef* input;
NodeDef* output;
template <typename H>
friend H AbslHashValue(H h, const Edge& e) {
return H::combine(std::move(h), e.input, e.output);
}
friend bool operator==(const Edge& lhs, const Edge& rhs) {
return lhs.input == rhs.input && lhs.output == rhs.output;
}
};
absl::StatusOr<bool> InjectPrefetch(const Edge& edge, MutableGraphView& graph) {
NodeDef prefetch;
graph_utils::SetUniqueGraphNodeName(
absl::StrCat("inject/io_prefetch", edge.input->name()), graph.graph(),
&prefetch);
prefetch.set_op(kPrefetch);
*prefetch.mutable_input()->Add() = edge.input->name();
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
*prefetch.mutable_input()->Add() = autotune_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*edge.input, &prefetch)) {
return false;
}
TF_RETURN_IF_ERROR(graph_utils::SetMetadataName(prefetch.name(), &prefetch));
NodeDef* added_prefetch = graph.AddNode(std::move(prefetch));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(edge.input->name(), added_prefetch->name()));
return true;
}
void GetPrefetchInjectionEdges(
const MutableGraphView& graph, NodeDef* node, NodeDef* output,
NodeDef* output_output, NodeDef* last_async, NodeDef* last_async_output,
NodeDef* last_last_async,
absl::flat_hash_set<Edge>& prefetch_injection_edges) {
if (!node) {
return;
}
if (IsAsync(output)) {
last_last_async = last_async;
last_async_output = output_output;
last_async = output;
}
if (IsIo(node)) {
if (IsParallelMap(last_async) && !IsPrefetch(last_last_async)) {
prefetch_injection_edges.insert({last_async, last_async_output});
}
return;
}
if (IsParallelInterleaveWithIo(node, graph)) {
if (!IsPrefetch(last_async)) {
prefetch_injection_edges.insert({node, output});
}
return;
}
for (int64_t i = 0; i < node->input_size(); ++i) {
NodeDef* input = graph_utils::GetInputNode(*node, graph, i);
GetPrefetchInjectionEdges(graph, input, node,
output, last_async,
last_async_output, last_last_async,
prefetch_injection_edges);
}
}
absl::StatusOr<absl::flat_hash_set<Edge>> GetPrefetchInjectionEdges(
const GrapplerItem& item, const MutableGraphView& graph) {
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) {
return absl::flat_hash_set<Edge>();
}
if (item.fetch.size() != 1) {
return absl::InvalidArgumentError(
absl::StrCat("Expected only one fetch node but there were ",
item.fetch.size(), ": ", absl::StrJoin(item.fetch, ", ")));
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
absl::flat_hash_set<Edge> prefetch_injection_edges;
GetPrefetchInjectionEdges(
graph, last_node, sink_node,
nullptr,
nullptr, nullptr,
nullptr, prefetch_injection_edges);
return prefetch_injection_edges;
}
}
absl::Status InjectIoPrefetchEligible::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
return absl::OkStatus();
}
MutableGraphView graph(output);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges,
GetPrefetchInjectionEdges(item, graph));
stats->num_changes += prefetch_injection_edges.size();
return absl::OkStatus();
}
absl::Status InjectIoPrefetch::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
return absl::OkStatus();
}
MutableGraphView graph(output);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges,
GetPrefetchInjectionEdges(item, graph));
for (const auto& edge : prefetch_injection_edges) {
TF_ASSIGN_OR_RETURN(bool success, InjectPrefetch(edge, graph));
stats->num_changes += success;
}
return absl::OkStatus();
}
absl::Status InjectIoPrefetch::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) {
return absl::OkStatus();
}
const std::string& autotune = config->parameter_map().at(kAutotune).s();
if (autotune == "true") {
autotune_ = true;
} else if (autotune == "false") {
autotune_ = false;
} else {
return absl::InvalidArgumentError(absl::StrCat(
"Received an invalid value for parameter ", kAutotune, ": ", autotune));
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(InjectIoPrefetch, "inject_io_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::GDef;
using test::function::NDef;
FunctionDef InterleaveIoFunction(const std::string& name) {
return FunctionDefHelper::Create(
name,
{"args_0: int64"},
{"identity: variant"},
{},
{
{{"key_prefix"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"start_key"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"stop_key"}, "Const", {}, {{"dtype", DT_STRING}}},
{{"SSTableDataset"},
"SSTableDataset",
{"args_0", "key_prefix:output:0", "start_key:output:0",
"stop_key:output:0"},
{}},
},
{});
}
GraphDef EligibleInterleaveCase() {
return GDef(
{NDef("files_string_1", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("files_tensor_1", "TensorSliceDataset", {"files_1_string"},
{{"is_files", true}}),
NDef("cycle_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls_1", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave_1", "files_tensor_1", "cycle_length_1", "block_length_1",
"num_parallel_calls_1", "io_1", "default"),
NDef("files_string_2", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("files_tensor_2", "TensorSliceDataset", {"files_2_string"},
{{"is_files", true}}),
NDef("cycle_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls_2", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave_2", "files_tensor_2", "cycle_length_2", "block_length_2",
"num_parallel_calls_2", "io_2", "default"),
NDef("zip", "ZipDataset", {"interleave_1", "interleave_2"}, {}),
NDef("Sink", "Identity", {"zip"}, {})},
{InterleaveIoFunction("io_1"), InterleaveIoFunction("io_2")});
}
GraphDef EligibleMapCase() {
return GDef(
{NDef("files_1", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("key_prefix_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("start_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("stop_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("io_1", "SSTableDataset",
{"files_1", "key_prefix_1", "start_key_1", "stop_key_1"}, {}),
NDef("num_parallel_calls_1", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map_1", "io_1", "num_parallel_calls_1", "noop_1",
"default", false),
NDef("files_2", "Const", {},
{{"value", "file1file2"}, {"dtype", DT_STRING}}),
NDef("key_prefix_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("start_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("stop_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}),
NDef("io_2", "SSTableDataset",
{"files_2", "key_prefix_2", "start_key_2", "stop_key_2"}, {}),
NDef("num_parallel_calls_2", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map_2", "io_2", "num_parallel_calls_2", "noop_2",
"default", false),
NDef("zip", "ZipDataset", {"map_1", "map_2"}, {}),
NDef("Sink", "Identity", {"zip"}, {})},
{});
}
TEST(InjectIoPrefetchEligible, EligibleInterleaveCaseHasNoInjection) {
GrapplerItem item;
item.graph = EligibleInterleaveCase();
item.fetch.push_back("Sink");
InjectIoPrefetchEligible optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_NE(input_node.op(), "PrefetchDataset");
}
EXPECT_EQ(item.graph.DebugString(), output.DebugString());
}
TEST(InjectIoPrefetchEligible, EligibleMapCaseHasNoInjection) {
GrapplerItem item;
item.graph = EligibleMapCase();
item.fetch.push_back("Sink");
InjectIoPrefetchEligible optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_NE(input_node.op(), "PrefetchDataset");
}
EXPECT_EQ(item.graph.DebugString(), output.DebugString());
}
TEST(InjectIoPrefetch, InterleaveCaseHasInjection) {
GrapplerItem item;
item.graph = EligibleInterleaveCase();
item.fetch.push_back("Sink");
InjectIoPrefetch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_EQ(input_node.op(), "PrefetchDataset");
}
}
TEST(InjectIoPrefetch, MapCaseHasInjection) {
GrapplerItem item;
item.graph = EligibleMapCase();
item.fetch.push_back("Sink");
InjectIoPrefetch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
NodeDef zip_node =
output.node(graph_utils::FindGraphNodeWithName("zip", output));
for (const auto& input_node_name : zip_node.input()) {
NodeDef input_node = output.node(
graph_utils::FindGraphNodeWithName(input_node_name, output));
EXPECT_EQ(input_node.op(), "PrefetchDataset");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_io_prefetch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_io_prefetch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b8548cf-f615-4eb1-8311-cc3d750d5b78 | cpp | tensorflow/tensorflow | fusion_utils | tensorflow/core/grappler/optimizers/data/fusion_utils.cc | tensorflow/core/grappler/optimizers/data/fusion_utils_test.cc | #include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
constexpr char kControlInputPrefix[] = "^";
bool IsControlInput(const string& node_input) {
return absl::StartsWith(node_input, kControlInputPrefix);
}
string StripControlInputNotation(const string& node_input) {
return string(absl::StripPrefix(node_input, kControlInputPrefix));
}
string AddControlInputNotation(const string& node_input) {
return absl::StrCat(kControlInputPrefix, node_input);
}
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
string ParseOutputNode(const string& name) {
if (name.find(':') == string::npos) return {};
return name.substr(name.find(':'), string::npos);
}
string GetOutputNode(const FunctionDef& function, int output_idx) {
const auto& ret_output_name =
function.signature().output_arg(output_idx).name();
return function.ret().at(ret_output_name);
}
string& GetMutableOutputNode(FunctionDef* function, int output_idx) {
const auto& ret_output_name =
function->signature().output_arg(output_idx).name();
return function->mutable_ret()->at(ret_output_name);
}
template <typename Iterable>
StringCollection GetNames(const Iterable& iterable, int allocate_size) {
StringCollection names;
names.reserve(allocate_size);
for (auto& arg : iterable) names.push_back(arg.name());
return names;
}
template <typename Iterable>
gtl::FlatSet<string> GetNodeNamesSet(const Iterable& nodes) {
gtl::FlatSet<string> names;
for (const auto& node : nodes) {
CHECK(gtl::InsertIfNotPresent(&names, node.name()))
<< "Functions should have unique node names. Node with name "
<< node.name() << " already exists";
}
return names;
}
template <typename Iterable>
gtl::FlatMap<string, string> GetUniqueNames(const Iterable& first_iterable,
const Iterable& second_iterable) {
gtl::FlatMap<string, string> changed_node_names;
const auto first_names = GetNodeNamesSet(first_iterable);
auto second_names = GetNodeNamesSet(first_iterable);
int id = second_iterable.size();
for (const auto& node : second_iterable) {
string name_before = node.name();
string name = name_before;
bool changed_name = false;
while (first_names.count(name) ||
(changed_name && second_names.count(name))) {
name = strings::StrCat(name_before, "/_", id);
changed_name = true;
++id;
}
if (changed_name) {
changed_node_names[name_before] = name;
second_names.insert(std::move(name));
}
}
return changed_node_names;
}
void RenameFunctionNodes(
const FunctionDef& first_function,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<string>* control_outputs_to_fuse) {
const gtl::FlatMap<string, string> changed_node_names =
GetUniqueNames(first_function.node_def(), *nodes_to_fuse);
auto updated_name = [&changed_node_names](const string& input) {
string input_node = ParseNodeConnection(input);
auto iter = changed_node_names.find(input_node);
if (iter != changed_node_names.end()) {
return iter->second + ParseOutputNode(input);
}
return input;
};
for (NodeDef& function_node : *nodes_to_fuse) {
if (const string* new_name =
gtl::FindOrNull(changed_node_names, function_node.name())) {
function_node.set_name(*new_name);
}
for (string& input : *function_node.mutable_input()) {
input = updated_name(input);
}
}
for (auto& [unused, ret_node] : *rets_to_fuse) {
ret_node = updated_name(ret_node);
}
protobuf::Map<string, string> new_control_rets_to_fuse;
protobuf::RepeatedPtrField<string> new_control_outputs_to_fuse;
for (const auto& [unused, control_ret_node] : *control_rets_to_fuse) {
string updated_control_ret_node = updated_name(control_ret_node);
new_control_rets_to_fuse.insert(
{updated_control_ret_node, updated_control_ret_node});
*new_control_outputs_to_fuse.Add() = updated_control_ret_node;
}
*control_rets_to_fuse = new_control_rets_to_fuse;
*control_outputs_to_fuse = new_control_outputs_to_fuse;
}
StringCollection GetFunctionInputs(const FunctionDef& function) {
return GetNames(function.signature().input_arg(),
function.signature().input_arg_size());
}
OpDef GetUniqueSignature(const OpDef& first_signature,
const OpDef& second_signature,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
const gtl::FlatMap<string, string> changed_input_names =
GetUniqueNames(first_signature.input_arg(), second_signature.input_arg());
OpDef signature;
signature.set_name(second_signature.name());
for (const auto& input_arg : second_signature.input_arg()) {
auto& input = *signature.add_input_arg();
input = input_arg;
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input.name())) {
input.set_name(*new_name);
}
}
const gtl::FlatMap<string, string> changed_output_names = GetUniqueNames(
first_signature.output_arg(), second_signature.output_arg());
for (const auto& output_arg : second_signature.output_arg()) {
auto& output = *signature.add_output_arg();
output = output_arg;
if (const string* new_name =
gtl::FindOrNull(changed_output_names, output.name())) {
output.set_name(*new_name);
}
}
auto new_rets = [&](const protobuf::Map<string, string>& old_rets) {
protobuf::Map<string, string> new_rets;
for (const auto& ret : old_rets) {
const auto& key = changed_output_names.count(ret.first)
? changed_output_names.at(ret.first)
: ret.first;
const auto& input = ParseNodeConnection(ret.second);
const auto& value =
changed_input_names.count(input)
? changed_input_names.at(input) + ParseOutputNode(ret.second)
: ret.second;
new_rets[key] = value;
}
return new_rets;
};
*rets_to_fuse = new_rets(*rets_to_fuse);
*control_rets_to_fuse = new_rets(*control_rets_to_fuse);
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
const auto& input =
ParseNodeConnection(StripControlInputNotation(node_input));
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input)) {
node_input = *new_name + ParseOutputNode(node_input);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
if (second_signature.is_stateful()) {
signature.set_is_stateful(true);
}
return signature;
}
void FuseFunctionNodes(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
auto parsed_name =
ParseNodeConnection(StripControlInputNotation(node_input));
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), parsed_name);
if (input_it == second_inputs.end()) continue;
auto arg_num = std::distance(second_inputs.begin(), input_it);
node_input =
set_input(first_inputs, second_inputs, first_outputs, arg_num);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
void FuseReturns(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::Map<string, string>* fused_ret) {
for (auto& ret : *fused_ret) {
auto return_input = ParseNodeConnection(ret.second);
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), return_input);
if (input_it == second_inputs.end()) continue;
auto input_idx = std::distance(second_inputs.begin(), input_it);
ret.second =
set_input(first_inputs, second_inputs, first_outputs, input_idx);
}
}
StringCollection GetFunctionOutputs(const FunctionDef& function) {
const auto number_of_outputs = function.signature().output_arg_size();
StringCollection outputs;
outputs.reserve(number_of_outputs);
for (int output_idx = 0; output_idx < number_of_outputs; output_idx++)
outputs.push_back(GetOutputNode(function, output_idx));
return outputs;
}
FunctionDef* CreateFalsePredicate(
const protobuf::RepeatedPtrField<OpDef_ArgDef>& fake_args,
FunctionDefLibrary* library) {
GraphDef graph;
MutableGraphView graph_view(&graph);
auto* node = graph_utils::AddScalarConstNode(false, &graph_view);
auto* false_predicate = library->add_function();
graph_utils::SetUniqueGraphFunctionName("false_predicate", library,
false_predicate);
int num = 0;
for (const auto& fake_arg : fake_args) {
auto* arg = false_predicate->mutable_signature()->add_input_arg();
arg->set_type(fake_arg.type());
arg->set_name(strings::StrCat("fake_arg", num));
num++;
}
auto* output = false_predicate->mutable_signature()->add_output_arg();
output->set_name("false_out");
output->set_type(DT_BOOL);
(*false_predicate->mutable_ret())["false_out"] = node->name() + ":output:0";
*false_predicate->mutable_node_def() = std::move(*graph.mutable_node());
return false_predicate;
}
void CheckIfCanCompose(const OpDef& first_signature,
const OpDef& second_signature) {
CHECK(CanCompose(first_signature, second_signature))
<< "The number of input arguments of function " << second_signature.name()
<< " should be the same as the number of output arguments of function "
<< first_signature.name() << ".";
}
}
void MergeNodes(const FunctionDef& first_function,
const FunctionDef& second_function, FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
fused_function->mutable_node_def()->MergeFrom(second_function.node_def());
}
bool CanCompose(const OpDef& first_signature, const OpDef& second_signature) {
return first_signature.output_arg_size() == second_signature.input_arg_size();
}
string ComposeInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_outputs.at(arg_num);
}
void ComposeSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature->mutable_input_arg() = first_signature.input_arg();
*fused_signature->mutable_output_arg() = second_signature.output_arg();
if (first_signature.is_stateful() || second_signature.is_stateful()) {
if (!(first_signature.is_stateful() && second_signature.is_stateful())) {
metrics::RecordTFDataDebug("fused_with_mixed_statefulness");
}
fused_signature->set_is_stateful(true);
}
fused_signature->mutable_control_output()->Add(
first_signature.control_output().begin(),
first_signature.control_output().end());
fused_signature->mutable_control_output()->Add(
second_signature.control_output().begin(),
second_signature.control_output().end());
}
void ComposeOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = second_ret;
}
void CombineSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature = first_signature;
fused_signature->mutable_output_arg()->MergeFrom(
second_signature.output_arg());
}
void CombineOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = first_ret;
fused_ret->insert(second_ret.begin(), second_ret.end());
}
string SameInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_inputs.at(arg_num);
}
bool HasSameSignature(const OpDef& first_signature,
const OpDef& second_signature) {
return first_signature.input_arg_size() ==
second_signature.input_arg_size() &&
first_signature.output_arg_size() ==
second_signature.output_arg_size();
}
void SameSignature(const OpDef& first_signature, const OpDef& second_signature,
OpDef* fused_signature) {
CHECK(HasSameSignature(first_signature, second_signature))
<< "Functions do not have the same signature";
*fused_signature = first_signature;
}
void LazyConjunctionNodes(const FunctionDef& first_function,
const FunctionDef& second_function,
FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
NodeDefBuilder if_builder("", "If");
if_builder.Input(GetOutputNode(first_function, 0), 0, DT_BOOL);
DataTypeVector in_arg_types;
std::vector<NodeDefBuilder::NodeOut> inputs;
for (const auto& input_arg : first_function.signature().input_arg()) {
inputs.push_back({input_arg.name(), 0, input_arg.type()});
in_arg_types.push_back(input_arg.type());
}
if_builder.Attr("Tin", in_arg_types);
if_builder.Attr("Tcond", DT_BOOL);
if_builder.Attr("Tout", DataTypeVector{DT_BOOL});
if_builder.Attr("_lower_using_switch_merge", true);
NameAttrList then_branch;
then_branch.set_name(second_function.signature().name());
if_builder.Attr("then_branch", then_branch);
auto* false_predicate =
CreateFalsePredicate(first_function.signature().input_arg(), library);
NameAttrList else_branch;
else_branch.set_name(false_predicate->signature().name());
if_builder.Attr("else_branch", else_branch);
if_builder.Input(inputs);
auto* if_node = fused_function->add_node_def();
TF_CHECK_OK(if_builder.Finalize(if_node));
function_utils::SetUniqueFunctionNodeName("cond", fused_function, if_node);
GetMutableOutputNode(fused_function, 0) = if_node->name() + ":output:0";
}
void LazyConjunctionOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
CHECK_EQ(first_ret.size(), 1);
CHECK_EQ(second_ret.size(), 1);
*fused_ret = first_ret;
}
FunctionDef* FuseFunctions(
const FunctionDef& first_function, const FunctionDef& second_function,
StringPiece fused_name_prefix, const SetFunctionSignatureFn& set_signature,
const SetInputFn& set_input, const SetOutputFn& set_output,
const SetNodesFn& set_nodes, FunctionDefLibrary* library) {
auto has_unknown_attrs = [](const FunctionDef& func) {
int known_attribute_size = 0;
if (data::IsTFDataFunction(func)) known_attribute_size += 1;
if (func.attr().contains("_construction_context"))
known_attribute_size += 1;
return func.attr_size() > known_attribute_size;
};
if (has_unknown_attrs(first_function) || has_unknown_attrs(second_function)) {
return nullptr;
}
FunctionDef setup_function = second_function;
*setup_function.mutable_signature() = GetUniqueSignature(
first_function.signature(), setup_function.signature(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_node_def());
FunctionDef* fused_function = library->add_function();
RenameFunctionNodes(
first_function, setup_function.mutable_node_def(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_signature()->mutable_control_output());
set_output(first_function.ret(), setup_function.ret(),
fused_function->mutable_ret());
CombineOutput(first_function.control_ret(), setup_function.control_ret(),
fused_function->mutable_control_ret());
set_signature(first_function.signature(), setup_function.signature(),
fused_function->mutable_signature());
graph_utils::SetUniqueGraphFunctionName(fused_name_prefix, library,
fused_function);
CHECK(fused_function->signature().output_arg_size() ==
fused_function->ret_size())
<< "Fused function must have the same number of returns as output "
"args. Output size: "
<< fused_function->signature().output_arg_size()
<< ", ret size: " << fused_function->ret_size();
const auto first_inputs = GetFunctionInputs(first_function);
const auto second_inputs = GetFunctionInputs(setup_function);
const auto first_outputs = GetFunctionOutputs(first_function);
FuseFunctionNodes(first_inputs, second_inputs, first_outputs, set_input,
setup_function.mutable_node_def());
FuseReturns(first_inputs, second_inputs, first_outputs, set_input,
fused_function->mutable_ret());
set_nodes(first_function, setup_function, fused_function, library);
(*fused_function->mutable_attr())[data::kTFDataFunction].set_b(true);
auto get_construction_context = [](const FunctionDef& func) {
auto iter = func.attr().find("_construction_context");
if (iter == func.attr().cend()) return std::string();
return iter->second.s();
};
std::string first_construction_context =
get_construction_context(first_function);
std::string second_construction_context =
get_construction_context(second_function);
if (first_construction_context != second_construction_context) {
LOG(ERROR) << "_construction_context attribute mismatch during fused "
"function optimization pass. First function: "
<< first_construction_context
<< " Second function: " << first_construction_context;
}
if (!first_construction_context.empty()) {
(*fused_function->mutable_attr())["_construction_context"].set_s(
first_construction_context);
}
return fused_function;
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
void CheckUniqueNames(const FunctionDef& function) {
std::unordered_set<string> inputs;
for (const auto& input_arg : function.signature().input_arg())
inputs.insert(input_arg.name());
EXPECT_EQ(inputs.size(), function.signature().input_arg_size());
std::unordered_set<string> outputs;
for (const auto& output_arg : function.signature().output_arg())
outputs.insert(output_arg.name());
EXPECT_EQ(outputs.size(), function.signature().output_arg_size());
std::unordered_set<string> nodes;
for (const auto& node : function.node_def()) nodes.insert(node.name());
EXPECT_EQ(nodes.size(), function.node_def_size());
}
TEST(FusionUtilsTest, FuseFunctionsByComposition) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
std::cerr << fused_function->DebugString();
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(0)), parent_mul->name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionsWithControlInputs) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwoWithControlInput();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwoWithControlInput();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(1)),
absl::StrCat("^", parent_mul->name()));
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionWithControlOutputs) {
GraphDef graph;
auto *f1 = graph.mutable_library()->add_function();
*f1 = test::function::XTimesTwoWithControlOutput();
f1->mutable_signature()->set_name("f1");
auto *f2 = graph.mutable_library()->add_function();
*f2 = test::function::XTimesTwoWithControlOutput();
f2->mutable_signature()->set_name("f2");
auto *fused_function =
FuseFunctions(*f1, *f2, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().control_output_size(), 2);
string control_output_1 = fused_function->signature().control_output(0);
string control_output_2 = fused_function->signature().control_output(1);
EXPECT_NE(control_output_1, control_output_2);
EXPECT_EQ(fused_function->control_ret_size(), 2);
EXPECT_TRUE(fused_function->control_ret().contains(control_output_1));
EXPECT_TRUE(fused_function->control_ret().contains(control_output_2));
EXPECT_EQ(fused_function->control_ret().at(control_output_1),
control_output_1);
EXPECT_EQ(fused_function->control_ret().at(control_output_2),
control_output_2);
}
struct StatefulnessTestCase {
bool is_stateful_a, is_stateful_b;
};
using FusionUtilsTest_Statefulness =
::testing::TestWithParam<StatefulnessTestCase>;
TEST_P(FusionUtilsTest_Statefulness, FuseFunctionStatefulness) {
const StatefulnessTestCase &test_case = GetParam();
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
if (test_case.is_stateful_a) {
parent_function->mutable_signature()->set_is_stateful(true);
}
if (test_case.is_stateful_b) {
function->mutable_signature()->set_is_stateful(true);
}
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().is_stateful(),
test_case.is_stateful_a || test_case.is_stateful_b);
}
INSTANTIATE_TEST_SUITE_P(
StatefulnessTests, FusionUtilsTest_Statefulness,
::testing::ValuesIn<StatefulnessTestCase>(
{{false, false}, {false, true}, {true, false}, {true, true}}));
TEST(FusionUtilsTest, FuseFunctionWithPredicate) {
GraphDef graph;
auto *xtimes_two = graph.mutable_library()->add_function();
*xtimes_two = test::function::XTimesTwo();
auto *is_zero = graph.mutable_library()->add_function();
*is_zero = test::function::IsZero();
auto *fused_function =
FuseFunctions(*xtimes_two, *is_zero, "fused_map_and_filter_function",
fusion_utils::CombineSignature, fusion_utils::ComposeInput,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(),
"fused_map_and_filter_function");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
ASSERT_TRUE(
function_utils::ContainsFunctionNodeWithOp("Equal", *fused_function));
const auto& equal_node = fused_function->node_def(
function_utils::FindFunctionNodeWithOp("Equal", *fused_function));
EXPECT_EQ(xtimes_two->signature().output_arg(0).name(),
fused_function->signature().output_arg(0).name());
EXPECT_EQ(fused_function->signature().output_arg(1).name(),
equal_node.name());
EXPECT_EQ(ParseNodeConnection(equal_node.input(0)),
fused_function->signature().output_arg(0).name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(1).name());
EXPECT_EQ(ParseNodeConnection(output_value), equal_node.name());
}
TEST(FusionUtilsTest, FuseSameFunctionWithExtraOutput) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::CombineSignature,
fusion_utils::ComposeInput, fusion_utils::CombineOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
TEST(FusionUtilsTest, ZipFusion) {
GraphDef graph;
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto zip_signature = [](const OpDef& parent_function_signature,
const OpDef& function_signature,
OpDef *fused_function_signature) {
*fused_function_signature = parent_function_signature;
fused_function_signature->mutable_input_arg()->MergeFrom(
function_signature.input_arg());
fused_function_signature->mutable_output_arg()->MergeFrom(
function_signature.output_arg());
};
auto zip_input = [](const StringCollection& parent_inputs,
const StringCollection& function_inputs,
const StringCollection& parent_outputs, int arg_num) {
return function_inputs.at(arg_num);
};
auto *fused_function =
FuseFunctions(*function, *function, "zip_maps", zip_signature, zip_input,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 2);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/fusion_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/fusion_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5178b33-ed64-441e-8417-32d1894954ac | cpp | tensorflow/tensorflow | slack | tensorflow/core/grappler/optimizers/data/slack.cc | tensorflow/core/grappler/optimizers/data/slack_test.cc | #include "tensorflow/core/grappler/optimizers/data/slack.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrefetchDatasetOp[] = "PrefetchDataset";
template <std::size_t SIZE>
bool IsDatasetNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& arr) {
for (const auto& dataset_op_name : arr) {
if (node.op() == dataset_op_name) return true;
}
return false;
}
constexpr std::array<const char*, 2> kMultipleInputsDatasetOps = {
"ZipDataset", "ConcatenateDataset"};
constexpr std::array<const char*, 22> kPassThroughOps = {
"CacheDataset",
"CacheDatasetV2",
"ExperimentalMaxIntraOpParallelismDataset",
"ExperimentalPrivateThreadPoolDataset",
"FilterDataset",
"Identity",
"MapDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"OptimizeDataset",
"ParallelMapDataset",
"PrivateThreadPoolDataset",
"ReduceDataset",
"RepeatDataset",
"ShardDataset",
"ShuffleAndRepeatDataset",
"ShuffleDataset",
"ShuffleDatasetV2",
"ShuffleDatasetV3",
"SkipDataset",
"TakeDataset",
"WindowDataset",
};
}
Status Slack::RecursivelyHandleOp(const MutableGraphView& graph,
NodeDef* dataset_node) {
if (dataset_node->op() == kPrefetchDatasetOp) {
if (HasNodeAttr(*dataset_node, "slack_period")) {
(*dataset_node->mutable_attr())["slack_period"].set_i(slack_period_);
} else {
AddNodeAttr("slack_period", slack_period_, dataset_node);
}
return absl::OkStatus();
}
if (IsDatasetNodeOfType(*dataset_node, kPassThroughOps)) {
NodeDef* input_node = graph_utils::GetInputNode(*dataset_node, graph, 0);
return RecursivelyHandleOp(graph, input_node);
}
if (IsDatasetNodeOfType(*dataset_node, kMultipleInputsDatasetOps)) {
for (int i = 0; i < dataset_node->input_size(); ++i) {
NodeDef* input_node = graph_utils::GetInputNode(*dataset_node, graph, i);
TF_RETURN_IF_ERROR(RecursivelyHandleOp(graph, input_node));
}
return absl::OkStatus();
}
LOG(WARNING) << "Could not find a final `prefetch` in the input pipeline to "
"which to introduce slack.";
return absl::OkStatus();
}
Status Slack::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
if (slack_period_ < 1)
return errors::InvalidArgument("Invalid `slack_period` parameter: ",
slack_period_);
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
NodeDef* dataset_node = graph.GetNode(item.fetch.at(0));
return RecursivelyHandleOp(graph, dataset_node);
}
REGISTER_GRAPH_OPTIMIZER_AS(Slack, "slack");
}
} | #include "tensorflow/core/grappler/optimizers/data/slack.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void SetupGrapplerItem(GrapplerItem *item) {
MutableGraphView graph(&item->graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(std::vector<TensorShape>({{}}), &shapes_attr);
common_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue(std::vector<DataType>({DT_INT64}), &types_attr);
common_attrs[1] = std::make_pair("output_types", types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode(
"RangeDataset", "RangeDataset", range_inputs, common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(1, &graph);
NodeDef *prefetch_node = graph_utils::AddNode(
"PrefetchDataset", "PrefetchDataset",
{range_node->name(), buffer_size_node->name()}, common_attrs, &graph);
item->fetch.push_back(prefetch_node->name());
}
struct ParameterizedSlackTest
: ::testing::TestWithParam<std::tuple<string, int>> {};
TEST_P(ParameterizedSlackTest, BasicTest) {
GrapplerItem item;
SetupGrapplerItem(&item);
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s(
std::get<0>(GetParam()));
TF_ASSERT_OK(optimizer.Init(&config));
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
NodeDef optimized_prefetch_node =
output.node(graph_utils::FindGraphNodeWithOp("PrefetchDataset", output));
EXPECT_EQ(optimized_prefetch_node.attr().at("slack_period").i(),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(DifferentSlackEveryValues, ParameterizedSlackTest,
::testing::Values(std::make_tuple("1", 1),
std::make_tuple("8", 8)));
TEST(SlackTest, TestFailWithoutInit) {
GrapplerItem item;
Slack optimizer;
GraphDef output;
Status result = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(result.ok());
EXPECT_TRUE(absl::IsInvalidArgument(result));
}
TEST(SlackTest, TestFailWithInvalidSlackEveryParam) {
GrapplerItem item;
SetupGrapplerItem(&item);
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s("0");
TF_ASSERT_OK(optimizer.Init(&config));
GraphDef output;
Status result = optimizer.Optimize(nullptr, item, &output);
EXPECT_FALSE(result.ok());
EXPECT_TRUE(absl::IsInvalidArgument(result));
}
TEST(SlackTest, TestFunctionNotOptimized) {
GrapplerFunctionItem item;
FunctionDefLibrary lib_def;
FunctionDef *fdef = lib_def.add_function();
fdef->mutable_signature()->set_name("nested_function");
auto *input_arg = fdef->mutable_signature()->add_input_arg();
input_arg->set_name("args_0");
input_arg->set_type(DT_INT64);
auto *output_arg = fdef->mutable_signature()->add_output_arg();
output_arg->set_name("identity");
output_arg->set_type(DT_VARIANT);
fdef->mutable_signature()->set_is_stateful(true);
AttrValue shapes_attr;
SetAttrValue(std::vector<TensorShape>({{}}), &shapes_attr);
AttrValue types_attr;
SetAttrValue(std::vector<DataType>({DT_INT64}), &types_attr);
NodeDef *tensor_dataset_node =
function_utils::AddNode("TensorDataset", "TensorDataset", {"args_0"},
{std::make_pair("output_shapes", shapes_attr),
std::make_pair("Toutput_types", types_attr)},
fdef);
NodeDef *prefetch_node = function_utils::AddNode(
"PrefetchDataset", "PrefetchDataset",
{strings::StrCat(tensor_dataset_node->name(), ":handle:0"), "args_0"},
{std::make_pair("output_shapes", shapes_attr),
std::make_pair("output_types", types_attr)},
fdef);
AttrValue variant_type_attr;
SetAttrValue(DT_VARIANT, &variant_type_attr);
NodeDef *identity_node = function_utils::AddNode(
"Identity", "Identity",
{strings::StrCat(prefetch_node->name(), ":handle:0"),
strings::StrCat("^", tensor_dataset_node->name())},
{std::make_pair("T", variant_type_attr)}, fdef);
(*fdef->mutable_ret())["identity"] =
strings::StrCat(identity_node->name(), ":output:0");
(*fdef->mutable_control_ret())[tensor_dataset_node->name()] =
tensor_dataset_node->name();
fdef->mutable_signature()->add_control_output(tensor_dataset_node->name());
FunctionLibraryDefinition flib(OpRegistry::Global(), lib_def);
TF_ASSERT_OK(
MakeGrapplerFunctionItem(*fdef, flib, 27, &item));
GraphDef output;
Slack optimizer;
tensorflow::RewriterConfig_CustomGraphOptimizer config;
(*config.mutable_parameter_map())["slack_period"].set_s("8");
TF_ASSERT_OK(optimizer.Init(&config));
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
NodeDef optimized_prefetch_node =
output.node(graph_utils::FindGraphNodeWithOp("PrefetchDataset", output));
EXPECT_EQ(optimized_prefetch_node.attr().at("slack_period").i(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/slack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/slack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6432a53e-abeb-4a52-84d7-ca55e4526dee | cpp | tensorflow/tensorflow | batch_parallelization | tensorflow/core/grappler/optimizers/data/batch_parallelization.cc | tensorflow/core/grappler/optimizers/data/batch_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchDataset[] = "BatchDatasetV2";
constexpr char kParallelBatchDataset[] = "ParallelBatchDataset";
NodeDef MakeParallelBatch(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_batch = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelBatchDataset, graph->graph(),
¶llel_batch);
parallel_batch.set_op(kParallelBatchDataset);
auto* num_parallel_calls =
graph_utils::AddScalarConstNode(data::model::kAutotune, graph);
string drop_remainder_name = parallel_batch.input(2);
parallel_batch.set_input(2, num_parallel_calls->name());
parallel_batch.add_input(drop_remainder_name);
return parallel_batch;
}
}
Status BatchParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization batch_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_batch_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kBatchDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* batch_node = get_batch_node(node);
if (!batch_node) continue;
auto* parallel_batch =
graph.AddNode(MakeParallelBatch(batch_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node->name(), parallel_batch->name()));
nodes_to_delete.insert(batch_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchParallelization, "batch_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithBatchParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
BatchParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeBatchV2Node;
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, BatchParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, BatchParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", op, {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
class ValueRewrites : public ::testing::TestWithParam<bool> {};
TEST_P(ValueRewrites, BatchParallelizationTest) {
const bool parallel_copy = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
parallel_copy),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
NodeDef batch =
item.graph.node(graph_utils::FindGraphNodeWithName("batch", item.graph));
EXPECT_TRUE(batch.attr().find("parallel_copy") != batch.attr().end());
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output));
NodeDef parallel_batch = output.node(
graph_utils::FindGraphNodeWithOp("ParallelBatchDataset", output));
EXPECT_EQ(parallel_batch.input_size(), 4);
EXPECT_EQ(parallel_batch.input(0), "range");
EXPECT_EQ(parallel_batch.input(1), "batch_size");
EXPECT_EQ(parallel_batch.input(3), "drop_remainder");
EXPECT_EQ(parallel_batch.attr().at("parallel_copy").b(), parallel_copy);
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallel_batch.input(2), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, ValueRewrites, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/batch_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/batch_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
798ba46e-3eac-4fcd-aa56-8b804c33f805 | cpp | tensorflow/tensorflow | parallel_batch | tensorflow/core/grappler/optimizers/data/parallel_batch.cc | tensorflow/core/grappler/optimizers/data/parallel_batch_test.cc | #include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
namespace tensorflow {
namespace grappler {
Status ParallelBatch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == "BatchDatasetV2" || node.op() == "PaddedBatchDatasetV2") {
(*node.mutable_attr())["parallel_copy"].set_b(true);
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ParallelBatch, "parallel_batch");
}
} | #include "tensorflow/core/grappler/optimizers/data/parallel_batch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(ParallelBatch, BatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "BatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
TEST(ParallelBatch, PaddedBatchDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
NDef("batch", "PaddedBatchDatasetV2",
{"range", "batch_size", "drop_remainder"}, {})});
ParallelBatch optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_TRUE(output.node(index).attr().at("parallel_copy").b());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/parallel_batch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/parallel_batch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0a397fc-dd5d-4fcd-9a26-2967c72f8146 | cpp | tensorflow/tensorflow | map_and_batch_fusion | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc | tensorflow/core/grappler/optimizers/data/map_and_batch_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedOpName[] = "MapAndBatchDataset";
constexpr char kParallelMap[] = "ParallelMapDataset";
constexpr char kParallelMapV2[] = "ParallelMapDatasetV2";
bool IsParallelMap(const NodeDef& node) {
return node.op() == kParallelMap || node.op() == kParallelMapV2;
}
NodeDef MakeMapAndBatchNode(const NodeDef& map_node, const NodeDef& batch_node,
MutableGraphView* graph) {
NodeDef new_node;
new_node.set_op(kFusedOpName);
graph_utils::SetUniqueGraphNodeName(kFusedOpName, graph->graph(), &new_node);
new_node.add_input(map_node.input(0));
int num_other_args;
if (IsParallelMap(map_node)) {
num_other_args = map_node.input_size() - 2;
} else {
num_other_args = map_node.input_size() - 1;
}
for (int i = 0; i < num_other_args; i++) {
new_node.add_input(map_node.input(i + 1));
}
new_node.add_input(batch_node.input(1));
if (map_node.op() == kParallelMap) {
NodeDef* v = graph->GetNode(map_node.input(map_node.input_size() - 1));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(
v->attr().at("value").tensor().int_val(0), graph);
new_node.add_input(tmp->name());
} else if (map_node.op() == kParallelMapV2) {
new_node.add_input(map_node.input(map_node.input_size() - 1));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(1, graph);
new_node.add_input(tmp->name());
}
if (batch_node.op() == "BatchDatasetV2") {
new_node.add_input(batch_node.input(2));
} else {
NodeDef* tmp = graph_utils::AddScalarConstNode<bool>(false, graph);
new_node.add_input(tmp->name());
}
for (auto key : {"f", "Targuments"}) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
graph_utils::CopyShapesAndTypesAttrs(batch_node, &new_node);
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(map_node.attr(), key)) {
graph_utils::CopyAttribute(key, map_node, &new_node);
}
}
graph_utils::MaybeSetFusedMetadata(map_node, batch_node, &new_node);
return new_node;
}
}
Status MapAndBatchFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& node : item.graph.node()) {
if (node.op() != "BatchDataset" && node.op() != "BatchDatasetV2") {
continue;
}
const NodeDef& batch_node = node;
NodeDef* node2 = graph_utils::GetInputNode(batch_node, graph);
if (node2->op() != "MapDataset" && !IsParallelMap(*node2)) {
continue;
}
if (node2->attr().find("use_unbounded_threadpool") != node2->attr().end() &&
node2->attr().at("use_unbounded_threadpool").b()) {
continue;
}
NodeDef* map_node = node2;
auto* new_node =
graph.AddNode(MakeMapAndBatchNode(*map_node, batch_node, &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node.name(), new_node->name()));
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(batch_node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapAndBatchFusion, "map_and_batch_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MapAndBatchFusionTest, FuseMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseMapAndBatchV2NodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(2);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node =
graph_utils::AddNode("", "MapDataset", map_inputs, map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *drop_remainder_node =
graph_utils::AddScalarConstNode<bool>(true, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(3);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
batch_inputs[2] = drop_remainder_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDatasetV2", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node.attr().at("value").tensor().int64_val(0),
1);
EXPECT_EQ(map_and_batch_node.input(4), batch_node->input(2));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapAndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDataset", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, FuseParallelMapV2AndBatchNodesIntoOne) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int64_t>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(2);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
map_node = graph_utils::AddNode("", "ParallelMapDatasetV2", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(map_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(batch_node->name(), output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
NodeDef map_and_batch_node = output.node(
graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output));
EXPECT_EQ(map_and_batch_node.input_size(), 5);
EXPECT_EQ(map_and_batch_node.input(0), map_node->input(0));
EXPECT_EQ(map_and_batch_node.input(1), map_node->input(1));
EXPECT_EQ(map_and_batch_node.input(2), batch_node->input(1));
NodeDef num_parallel_calls_node2 = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(3), output));
EXPECT_EQ(num_parallel_calls_node2.attr().at("value").tensor().int64_val(0),
2);
NodeDef drop_remainder_node = output.node(
graph_utils::FindGraphNodeWithName(map_and_batch_node.input(4), output));
EXPECT_EQ(drop_remainder_node.attr().at("value").tensor().bool_val(0), false);
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("f"),
map_node->attr().at("f")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("Targuments"),
map_node->attr().at("Targuments")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_shapes"),
batch_node->attr().at("output_shapes")));
EXPECT_TRUE(AreAttrValuesEqual(map_and_batch_node.attr().at("output_types"),
batch_node->attr().at("output_types")));
}
TEST(MapAndBatchFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
std::vector<string> batch_inputs(2);
batch_inputs[0] = range_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
graph_utils::AddNode("", "BatchDataset", batch_inputs, batch_attrs, &graph);
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
TEST(MapAndBatchFusionTest, NoChange_UnboundedThreadpoolParallelMap) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
range_attrs, &graph);
NodeDef *captured_input_node =
graph_utils::AddScalarConstNode<StringPiece>("hello", &graph);
NodeDef *num_parallel_calls_node =
graph_utils::AddScalarConstNode<int>(2, &graph);
NodeDef *map_node;
{
std::vector<string> map_inputs(3);
map_inputs[0] = range_node->name();
map_inputs[1] = captured_input_node->name();
map_inputs[2] = num_parallel_calls_node->name();
std::vector<std::pair<string, AttrValue>> map_attrs(3);
AttrValue f_attr;
SetAttrValue("f", &f_attr);
map_attrs[0] = std::make_pair("f", f_attr);
AttrValue args_attr;
SetAttrValue("Targuments", &args_attr);
map_attrs[1] = std::make_pair("Targuments", args_attr);
AttrValue use_unbounded_threadpool_attr;
SetAttrValue(true, &use_unbounded_threadpool_attr);
map_attrs[2] = std::make_pair("use_unbounded_threadpool",
use_unbounded_threadpool_attr);
map_node = graph_utils::AddNode("", "ParallelMapDataset", map_inputs,
map_attrs, &graph);
}
NodeDef *batch_size_node =
graph_utils::AddScalarConstNode<int64_t>(5, &graph);
NodeDef *batch_node;
{
std::vector<string> batch_inputs(2);
batch_inputs[0] = map_node->name();
batch_inputs[1] = batch_size_node->name();
std::vector<std::pair<string, AttrValue>> batch_attrs(2);
AttrValue shapes_attr;
SetAttrValue("output_shapes", &shapes_attr);
batch_attrs[0] = std::make_pair("output_shapes", shapes_attr);
AttrValue types_attr;
SetAttrValue("output_types", &types_attr);
batch_attrs[1] = std::make_pair("output_types", types_attr);
batch_node = graph_utils::AddNode("", "BatchDataset", batch_inputs,
batch_attrs, &graph);
}
MapAndBatchFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_batch_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d19e6e02-4edc-4ad0-84de-f8a2f4b058e1 | cpp | tensorflow/tensorflow | autotune_buffer_sizes | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.cc | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes_test.cc | #include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBufferSizeMin[] = "buffer_size_min";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 8> kAsyncDatasetOps = {
"ExperimentalMapAndBatchDataset",
"MapAndBatchDataset",
"ParallelBatchDataset",
"ParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDataset",
"ParallelMapDatasetV2",
};
}
Status AutotuneBufferSizes::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization autotune_buffer_sizes is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
absl::flat_hash_set<string> already_prefetched;
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
NodeDef* buffer_size_node = graph.GetNode(node.input(1));
if (buffer_size_node->op() == "Const") {
int64_t initial_buffer_size =
buffer_size_node->attr().at("value").tensor().int64_val(0);
if (initial_buffer_size != data::model::kAutotune) {
TF_RETURN_IF_ERROR(graph.UpdateFanin(node.name(),
{buffer_size_node->name(), 0},
{autotune_value->name(), 0}));
node.mutable_attr()->at(kBufferSizeMin).set_i(initial_buffer_size);
stats->num_changes++;
}
} else {
return absl::FailedPreconditionError(
"The autotune_buffer_sizes rewrite does not currently support "
"non-constant buffer_size input.");
}
NodeDef* prefetched_node = graph_utils::GetInputNode(node, graph);
if (prefetched_node) {
already_prefetched.insert(prefetched_node->name());
}
}
}
std::vector<const NodeDef*> async_datasets;
for (const NodeDef& node : item.graph.node()) {
if (already_prefetched.find(node.name()) != already_prefetched.end()) {
continue;
}
for (const auto& async_dataset_op : kAsyncDatasetOps) {
if (node.op() == async_dataset_op) {
async_datasets.push_back(&node);
stats->num_changes++;
break;
}
}
}
if (async_datasets.empty()) return absl::OkStatus();
for (const NodeDef* async_dataset_node : async_datasets) {
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", async_dataset_node->name()),
graph.graph(), &prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = async_dataset_node->name();
*prefetch_node.mutable_input()->Add() = autotune_value->name();
graph_utils::CopyShapesAndTypesAttrs(*async_dataset_node, &prefetch_node);
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(async_dataset_node->name(), added_node->name()));
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(AutotuneBufferSizes, "autotune_buffer_sizes");
}
} | #include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithAutotuneBufferSizes(const GrapplerItem &item,
GraphDef *output, bool autotune) {
AutotuneBufferSizes optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleInject : public ::testing::TestWithParam<string> {};
TEST_P(SimpleInject, AutotuneBufferSizesTest) {
const string async_dataset = GetParam();
using test::function::NDef;
GrapplerItem item;
if (async_dataset == "map") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode(
"map", "range", "num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "interleave") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "map_and_batch") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 32}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{
test::function::XTimesTwo(),
});
}
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("PrefetchDataset", output);
const NodeDef prefetch_node = output.node(index);
EXPECT_TRUE(prefetch_node.attr().find("legacy_autotune") ==
prefetch_node.attr().end());
EXPECT_EQ(prefetch_node.input_size(), 2);
NodeDef async_node = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(0), output));
EXPECT_EQ(async_node.name(), async_dataset);
NodeDef buffer_size_val = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(1), output));
EXPECT_EQ(buffer_size_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, SimpleInject,
::testing::Values("map", "interleave",
"map_and_batch"));
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, AutotuneBufferSizesTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("PrefetchDataset", output),
autotune);
}
class MultipleNodes
: public ::testing::TestWithParam<std::tuple<bool, int64_t>> {};
TEST_P(MultipleNodes, AutotuneBufferSizesTest) {
const bool legacy_autotune = std::get<0>(GetParam());
const int64_t initial_buffer_size = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> map_inputs1(2);
map_inputs1[0] = range_node->name();
map_inputs1[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> map_attrs(4);
AttrValue attr_val;
SetAttrValue("value", &attr_val);
map_attrs[0] = std::make_pair("f", attr_val);
map_attrs[1] = std::make_pair("Targuments", attr_val);
map_attrs[2] = std::make_pair("output_types", attr_val);
map_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *map_node1 = graph_utils::AddNode("map1", "ParallelMapDatasetV2",
map_inputs1, map_attrs, &graph);
NodeDef *buffer_size_val =
graph_utils::AddScalarConstNode<int64_t>(initial_buffer_size, &graph);
std::vector<string> prefetch_inputs(2);
prefetch_inputs[0] = map_node1->name();
prefetch_inputs[1] = buffer_size_val->name();
std::vector<std::pair<string, AttrValue>> prefetch_attrs(4);
AttrValue legacy_autotune_attr;
SetAttrValue(legacy_autotune, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
SetAttrValue(0, &buffer_size_min_attr);
prefetch_attrs[0] = std::make_pair("legacy_autotune", legacy_autotune_attr);
prefetch_attrs[1] = std::make_pair("buffer_size_min", buffer_size_min_attr);
prefetch_attrs[2] = std::make_pair("output_types", attr_val);
prefetch_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *prefetch_node = graph_utils::AddNode(
"prefetch", "PrefetchDataset", prefetch_inputs, prefetch_attrs, &graph);
std::vector<string> map_inputs2(2);
map_inputs2[0] = prefetch_node->name();
map_inputs2[1] = parallelism_val->name();
NodeDef *map_node2 = graph_utils::AddNode("map2", "ParallelMapDatasetV2",
map_inputs2, map_attrs, &graph);
std::vector<string> map_inputs3(1);
map_inputs3[0] = map_node2->name();
graph_utils::AddNode("map3", "MapDataset", map_inputs3, map_attrs, &graph);
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
std::vector<int> prefetch_indices =
graph_utils::FindAllGraphNodesWithOp("PrefetchDataset", output);
EXPECT_EQ(prefetch_indices.size(), 2);
NodeDef new_map_node3 =
output.node(graph_utils::FindGraphNodeWithName("map3", output));
NodeDef new_prefetch_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node3.input(0), output));
EXPECT_EQ(new_prefetch_node2.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node2.input_size(), 2);
EXPECT_TRUE(new_prefetch_node2.attr().find("legacy_autotune") ==
new_prefetch_node2.attr().end());
EXPECT_TRUE(new_prefetch_node2.attr().find("buffer_size_min") ==
new_prefetch_node2.attr().end());
NodeDef new_buffer_size_val2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(1), output));
EXPECT_EQ(new_buffer_size_val2.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(0), output));
EXPECT_EQ(new_map_node2.name(), "map2");
NodeDef new_prefetch_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node2.input(0), output));
EXPECT_EQ(new_prefetch_node1.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node1.input_size(), 2);
EXPECT_EQ(new_prefetch_node1.attr().at("legacy_autotune").b(),
legacy_autotune);
EXPECT_EQ(new_prefetch_node1.attr().at("buffer_size_min").i(),
(initial_buffer_size == -1 ? 0 : initial_buffer_size));
NodeDef new_buffer_size_val1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(1), output));
EXPECT_EQ(new_buffer_size_val1.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(0), output));
EXPECT_EQ(new_map_node1.name(), "map1");
}
INSTANTIATE_TEST_SUITE_P(Test, MultipleNodes,
::testing::Combine(::testing::Values(true, false),
::testing::Values(-1, 3)));
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5293b335-1054-4251-bbc9-edecf54c9bf3 | cpp | tensorflow/tensorflow | inject_prefetch | tensorflow/core/grappler/optimizers/data/inject_prefetch.cc | tensorflow/core/grappler/optimizers/data/inject_prefetch_test.cc | #include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 5> kAsyncTransforms = {
"MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset",
"ParallelMapDataset", "PrefetchDataset"};
constexpr std::array<const char*, 8> kDatasetsToSkip = {
"AssertNextDataset",
"ExperimentalAssertNextDataset",
"IgnoreErrorsDataset",
"OptionsDataset",
"ModelDataset",
"OptimizeDataset",
"MaxIntraOpParallelismDataset",
"PrivateThreadPoolDataset",
};
bool ShouldInjectPrefetch(const NodeDef* last_node,
const MutableGraphView& graph) {
while (last_node != nullptr &&
absl::c_any_of(kDatasetsToSkip, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
if (last_node == nullptr) {
VLOG(1) << "The optimization inject_prefetch is not applied because graph "
"rewrite failed to find a dataset node.";
return false;
}
if (absl::c_any_of(kAsyncTransforms, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
VLOG(1) << "The optimization inject_prefetch is not applied because the "
"last transformation of the input pipeline is an asynchronous "
"transformation: "
<< last_node->op();
return false;
}
return true;
}
}
Status InjectPrefetch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization inject_prefetch is not applied if autotune is "
"off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) {
return absl::OkStatus();
}
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (!ShouldInjectPrefetch(last_node, graph)) {
return absl::OkStatus();
}
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", last_node->name()), graph.graph(),
&prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = last_node->name();
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
*prefetch_node.mutable_input()->Add() = autotune_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &prefetch_node))
return absl::OkStatus();
TF_RETURN_IF_ERROR(
graph_utils::SetMetadataName(prefetch_node.name(), &prefetch_node));
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(InjectPrefetch, "inject_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
constexpr char kOptionsDataset[] = "OptionsDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
Status Optimize(InjectPrefetch &optimizer, const GrapplerItem &item,
GraphDef *output, bool autotune) {
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
Status OptimizeWithInjectPrefetch(const GrapplerItem &item, GraphDef *output,
bool autotune) {
InjectPrefetch optimizer;
return Optimize(optimizer, item, output, autotune);
}
class InjectPrefetchParameterizedTest : public ::testing::TestWithParam<bool> {
};
TEST_P(InjectPrefetchParameterizedTest, TestAutotuneSetting) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef inject_prefetch_output;
TF_ASSERT_OK(
OptimizeWithInjectPrefetch(item, &inject_prefetch_output, autotune));
EXPECT_EQ(autotune, graph_utils::ContainsNodeWithOp(kPrefetchDataset,
inject_prefetch_output));
EXPECT_EQ(autotune, graph_utils::ContainsGraphNodeWithName(
"inject/prefetch_range", inject_prefetch_output));
}
INSTANTIATE_TEST_SUITE_P(AutotuneSetting, InjectPrefetchParameterizedTest,
::testing::Values(false, true));
TEST(InjectPrefetchTest, FromFunctionDef) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "_Retval", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
}
TEST(InjectPrefetchTest, AlreadyPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"}, {}),
NDef("Sink", "Identity", {"prefetch"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, AlreadyParallelMap) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("parallel_map", kParallelMapDataset, {"range"},
{{"f", "__inference_Dataset_map_normalize_8232"},
{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"parallel_map"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, OptionsFollowedByPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("options", kOptionsDataset, {"prefetch"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"options"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("inject/prefetch_options",
output));
EXPECT_EQ(7, output.node_size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_prefetch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_prefetch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e3d9076-7c79-4ee2-b5df-2a986817bca7 | cpp | tensorflow/tensorflow | disable_prefetch_legacy_autotune | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.cc | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune_test.cc | #include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kLegacyAutotune[] = "legacy_autotune";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
}
Status DisablePrefetchLegacyAutotune::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization disable_prefetch_legacy_autotune is not "
"applied if autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
if (node.attr().find(kLegacyAutotune) == node.attr().end() ||
node.attr().at(kLegacyAutotune).b()) {
(*node.mutable_attr())[kLegacyAutotune].set_b(false);
stats->num_changes++;
}
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisablePrefetchLegacyAutotune,
"disable_prefetch_legacy_autotune");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
Status OptimizeWithDisablePrefetchLegacyAutotune(const GrapplerItem &item,
GraphDef *output,
bool autotune) {
DisablePrefetchLegacyAutotune optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class RewriteTest : public ::testing::TestWithParam<bool> {};
TEST_P(RewriteTest, DisablePrefetchLegacyAutotune) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef({
NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch1", "PrefetchDataset", {"range"},
{{"legacy_autotune", true}}),
NDef("prefetch2", "PrefetchDataset", {"prefetch1"},
{{"legacy_autotune", false}}),
NDef("prefetch3", "PrefetchDataset", {"prefetch2"}, {}),
});
GraphDef output;
TF_ASSERT_OK(
OptimizeWithDisablePrefetchLegacyAutotune(item, &output, autotune));
NodeDef prefetch_node1 =
output.node(graph_utils::FindGraphNodeWithName("prefetch1", output));
EXPECT_EQ(prefetch_node1.attr().at("legacy_autotune").b(), !autotune);
NodeDef prefetch_node2 =
output.node(graph_utils::FindGraphNodeWithName("prefetch2", output));
EXPECT_FALSE(prefetch_node2.attr().at("legacy_autotune").b());
NodeDef prefetch_node3 =
output.node(graph_utils::FindGraphNodeWithName("prefetch3", output));
if (autotune) {
EXPECT_FALSE(prefetch_node3.attr().at("legacy_autotune").b());
} else {
EXPECT_TRUE(prefetch_node3.attr().find("legacy_autotune") ==
prefetch_node3.attr().end());
}
}
INSTANTIATE_TEST_SUITE_P(Test, RewriteTest, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9e90afce-2432-421a-af39-184f82dff8c1 | cpp | tensorflow/tensorflow | disable_intra_op_parallelism | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.cc | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism_test.cc | #include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMaxIntraOpParallelismDataset[] = "MaxIntraOpParallelismDataset";
constexpr char kModelDataset[] = "ModelDataset";
constexpr std::array<const char*, 2> kMaxIntraOpParallelismDatasetOps = {
"MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset",
};
}
Status DisableIntraOpParallelism::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
for (const NodeDef& node : item.graph.node()) {
for (const auto& target_dataset_op : kMaxIntraOpParallelismDatasetOps) {
if (node.op() == target_dataset_op) {
return absl::OkStatus();
}
}
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (last_node->op() == kModelDataset) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
NodeDef* max_parallelism_value =
graph_utils::AddScalarConstNode(int64_t{1}, &graph);
NodeDef insert_node;
graph_utils::SetUniqueGraphNodeName("intra_op_parallelism", graph.graph(),
&insert_node);
insert_node.set_op(kMaxIntraOpParallelismDataset);
*insert_node.mutable_input()->Add() = last_node->name();
*insert_node.mutable_input()->Add() = max_parallelism_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &insert_node))
return absl::OkStatus();
auto* added_node = graph.AddNode(std::move(insert_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisableIntraOpParallelism,
"disable_intra_op_parallelism");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
class IntraOpAlreadySetTest
: public ::testing::TestWithParam<std::tuple<string, int64_t>> {};
TEST_P(IntraOpAlreadySetTest, IntraOpParallelism) {
const string op = std::get<0>(GetParam());
const int64_t value = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(value, &graph);
std::vector<string> parallelism_inputs(2);
parallelism_inputs[0] = range_node->name();
parallelism_inputs[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> parallelism_attrs;
NodeDef *parallelism_node = graph_utils::AddNode(
"max_parallelism", op, parallelism_inputs, parallelism_attrs, &graph);
std::vector<string> sink_inputs(1);
sink_inputs[0] = parallelism_node->name();
std::vector<std::pair<string, AttrValue>> sink_attrs;
NodeDef *sink_node =
graph_utils::AddNode("Sink", "Identity", sink_inputs, sink_attrs, &graph);
item.fetch.push_back(sink_node->name());
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, item.graph));
EXPECT_EQ(item.graph.node_size(), 7);
EXPECT_EQ(parallelism_val->attr().at("value").tensor().int64_val(0), value);
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, output));
NodeDef new_parallelism_node =
output.node(graph_utils::FindGraphNodeWithOp(op, output));
NodeDef new_parallelism_val = output.node(graph_utils::FindGraphNodeWithName(
new_parallelism_node.input(1), output));
EXPECT_EQ(new_parallelism_val.attr().at("value").tensor().int64_val(0),
value);
}
INSTANTIATE_TEST_SUITE_P(
Test, IntraOpAlreadySetTest,
::testing::Combine(
::testing::Values("MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset"),
::testing::Values(1, 5)));
class IntraOpNotSetTest : public ::testing::TestWithParam<string> {};
TEST_P(IntraOpNotSetTest, IntraOpParallelism) {
const string op = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", op, {"range"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 5);
item.fetch.push_back("Sink_fake");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
EXPECT_EQ(output.node_size(), 5);
item.fetch[0] = "Sink";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (op == "_Retval") {
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
output));
EXPECT_EQ(output.node_size(), 5);
return;
}
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
INSTANTIATE_TEST_SUITE_P(Test, IntraOpNotSetTest,
::testing::Values("Identity", "_Retval"));
TEST(AutotuneWithModelTest, IntraOpParallelism) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("model", "ModelDataset", {"range"}, {}),
NDef("Sink", "Identity", {"model"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 6);
item.fetch.push_back("Sink");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 8);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef model_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(model_node.op(), "ModelDataset");
EXPECT_EQ(model_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(model_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
47d9aac5-874a-4c9c-ab63-6ce9ce4b9f1b | cpp | tensorflow/tensorflow | make_sloppy | tensorflow/core/grappler/optimizers/data/make_sloppy.cc | tensorflow/core/grappler/optimizers/data/make_sloppy_test.cc | #include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
Status MakeSloppy::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
(*node.mutable_attr())["sloppy"].set_b(true);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op()) &&
node.attr().at("deterministic").s() == "default") {
(*node.mutable_attr())["deterministic"].set_s("false");
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MakeSloppy, "make_sloppy");
}
} | #include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MakeSloppy, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParseExampleDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParseExampleNode("parse_example", "range",
"num_parallel_calls",
false)},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("parse_example", output));
int index = graph_utils::FindGraphNodeWithName("parse_example", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(ChangeDefault, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", "default")},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "XTimesTwo",
"default", false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeParallelBatchNode(
"batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "default")},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_sloppy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_sloppy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
510f194a-8b26-442d-b84f-a1e12abb3685 | cpp | tensorflow/tensorflow | map_parallelization | tensorflow/core/grappler/optimizers/data/map_parallelization.cc | tensorflow/core/grappler/optimizers/data/map_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMapDataset[] = "MapDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
NodeDef MakeParallelMap(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_map = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelMapDataset, graph->graph(),
¶llel_map);
parallel_map.set_op(kParallelMapDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_map.add_input(num_parallel_calls->name());
parallel_map.mutable_attr()->erase("force_synchronous");
AddNodeAttr("deterministic", "true", ¶llel_map);
return parallel_map;
}
}
Status MapParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization map_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kMapDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* map_node = get_map_node(node);
if (!map_node) continue;
auto* function =
function_library.Find(map_node->attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true) ||
(map_node->attr().contains("force_synchronous") &&
map_node->attr().at("force_synchronous").b())) {
continue;
}
auto* parallel_map =
graph.AddNode(MakeParallelMap(map_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(map_node->name(), parallel_map->name()));
nodes_to_delete.insert(map_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapParallelization, "map_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithMapParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
MapParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeMapNode;
const char stateless_fun_name[] = "XTimesTwo";
const char stateful_fun_name[] = "RandomUniformFn";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, MapParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", "Identity", {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, MapParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", op, {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, MapParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range", stateful_fun_name),
MakeMapNode("map2", "map1", stateless_fun_name),
NDef("cache", "CacheDataset", {"map2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::XTimesTwo(),
test::function::RandomUniform(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f9a2563e-0ca3-4912-9e82-14717a0a6efa | cpp | tensorflow/tensorflow | graph_utils | tensorflow/core/grappler/optimizers/data/graph_utils.cc | tensorflow/core/grappler/optimizers/data/graph_utils_test.cc | #include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include <cstddef>
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
constexpr char kConstOpName[] = "Const";
constexpr char kRetValOp[] = "_Retval";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
template <typename Predicate, typename Collection>
std::vector<int> GetElementIndicesWithPredicate(const Predicate& predicate,
const Collection& collection) {
std::vector<int> indices = {};
unsigned idx = 0;
for (auto&& element : collection) {
if (predicate(element)) {
indices.push_back(idx);
}
idx++;
}
return indices;
}
std::vector<int> CreateNameIndex(const GraphDef& graph) {
std::map<string, int> names;
for (int i = 0; i < graph.node_size(); ++i) {
names[graph.node(i).name()] = i;
}
std::vector<int> index(graph.node_size());
int i = 0;
for (const auto& pair : names) {
index[i++] = pair.second;
}
return index;
}
std::vector<int> CreateInputIndex(const NodeDef& node) {
std::map<string, int> inputs;
for (int i = 0; i < node.input_size(); ++i) {
inputs[node.input(i)] = i;
}
std::vector<int> index(node.input_size());
int i = 0;
for (const auto& pair : inputs) {
index[i++] = pair.second;
}
return index;
}
NodeDef* AddScalarConstNodeHelper(
DataType dtype, const std::function<void(TensorProto*)>& add_value,
MutableGraphView* graph) {
NodeDef node;
node.set_op(kConstOpName);
SetUniqueGraphNodeName(kConstOpName, graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
std::unique_ptr<tensorflow::TensorProto> tensor =
std::make_unique<tensorflow::TensorProto>();
std::unique_ptr<tensorflow::TensorShapeProto> tensor_shape =
std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())["value"].set_allocated_tensor(tensor.release());
return graph->AddNode(std::move(node));
}
}
NodeDef* AddScalarPlaceholder(DataType dtype, MutableGraphView* graph) {
NodeDef node;
node.set_op("Placeholder");
SetUniqueGraphNodeName(node.op(), graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorShapeProto* shape = (*node.mutable_attr())["shape"].mutable_shape();
shape->set_unknown_rank(false);
return graph->AddNode(std::move(node));
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
MutableGraphView* graph) {
NodeDef node;
if (!name.empty()) {
node.set_name(string(name));
} else {
SetUniqueGraphNodeName(op, graph->graph(), &node);
}
node.set_op(string(op));
for (const string& input : inputs) {
node.add_input(input);
}
for (const auto& attr : attributes) {
(*node.mutable_attr())[attr.first] = attr.second;
}
return graph->AddNode(std::move(node));
}
template <>
NodeDef* AddScalarConstNode(bool v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_BOOL, [v](TensorProto* proto) { proto->add_bool_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(double v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_DOUBLE, [v](TensorProto* proto) { proto->add_double_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(float v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_FLOAT, [v](TensorProto* proto) { proto->add_float_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT32, [v](TensorProto* proto) { proto->add_int_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int64_t v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT64, [v](TensorProto* proto) { proto->add_int64_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(StringPiece v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_STRING,
[v](TensorProto* proto) { proto->add_string_val(v.data(), v.size()); },
graph);
}
Status GetScalarConstNodeValueHelper(
const NodeDef& node, DataType dtype,
const std::function<void(const Tensor&)>& get_value) {
if (node.op() != kConstOpName)
return errors::InvalidArgument("Node ", node.name(),
" is not a Const node. Op: ", node.op());
Tensor tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor));
if (!TensorShapeUtils::IsScalar(tensor.shape())) {
return errors::InvalidArgument(
"Node ", node.name(),
" should be a scalar but has shape: ", tensor.shape());
}
if (tensor.dtype() != dtype) {
return errors::InvalidArgument(
"Node ", node.name(), " should have type ", DataTypeString(dtype),
" but has type: ", DataTypeString(tensor.dtype()));
}
get_value(tensor);
return absl::OkStatus();
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, int64_t* value) {
return GetScalarConstNodeValueHelper(
node, DT_INT64,
[value](const Tensor& tensor) { *value = tensor.scalar<int64_t>()(); });
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, bool* value) {
return GetScalarConstNodeValueHelper(
node, DT_BOOL,
[value](const Tensor& tensor) { *value = tensor.scalar<bool>()(); });
}
bool Compare(const GraphDef& g1, const GraphDef& g2) {
if (g1.node_size() != g2.node_size()) {
return false;
}
std::vector<int> name_index1 = CreateNameIndex(g1);
std::vector<int> name_index2 = CreateNameIndex(g2);
for (int i = 0; i < g1.node_size(); ++i) {
int idx1 = name_index1[i];
int idx2 = name_index2[i];
if (g1.node(idx1).op() != g2.node(idx2).op()) {
return false;
}
if (g1.node(idx1).name() != g2.node(idx2).name()) {
return false;
}
if (g1.node(idx1).input_size() != g2.node(idx2).input_size()) {
return false;
}
std::vector<int> input_index1 = CreateInputIndex(g1.node(idx1));
std::vector<int> input_index2 = CreateInputIndex(g2.node(idx2));
for (int j = 0; j < g1.node(idx1).input_size(); ++j) {
if (!IsSameInput(g1.node(idx1).input(input_index1[j]),
g2.node(idx2).input(input_index2[j]))) {
return false;
}
}
}
return true;
}
bool ContainsGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return FindGraphFunctionWithName(name, library) != -1;
}
bool ContainsGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return FindGraphNodeWithName(name, graph) != -1;
}
bool ContainsNodeWithOp(StringPiece op, const GraphDef& graph) {
return FindGraphNodeWithOp(op, graph) != -1;
}
int FindGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return GetFirstElementIndexWithPredicate(
[&name](const FunctionDef& function) {
return function.signature().name() == name;
},
library.function());
}
int FindGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&name](const NodeDef& node) { return node.name() == name; },
graph.node());
}
int FindGraphNodeWithOp(StringPiece op, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
std::vector<int> FindAllGraphNodesWithOp(const string& op,
const GraphDef& graph) {
return GetElementIndicesWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph) {
if (node.input_size() == 0) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), 0);
return graph.GetRegularFanin(input_port).node;
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph,
int64_t i) {
if (node.input_size() <= i) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), i);
return graph.GetRegularFanin(input_port).node;
}
Status GetDatasetOutputTypesAttr(const NodeDef& node,
DataTypeVector* output_types) {
for (const string& attr_name : {"output_types", "Toutput_types"}) {
if (node.attr().contains(attr_name)) {
return GetNodeAttr(node, attr_name, output_types);
}
}
return errors::InvalidArgument("Could not find output_types attr for node: ",
node.name(), " with op: ", node.op());
}
void SetUniqueGraphNodeName(StringPiece prefix, GraphDef* graph,
NodeDef* node) {
string name = string(prefix);
int id = graph->node_size();
while (ContainsGraphNodeWithName(name, *graph)) {
if (name.rfind("_generated") != string::npos &&
(name.rfind("_generated") == (name.size() - strlen("_generated")))) {
name.insert(name.rfind("_generated"), strings::StrCat("/_", id));
} else {
name = strings::StrCat(prefix, "/_", id);
}
++id;
}
node->set_name(std::move(name));
}
void SetUniqueGraphFunctionName(StringPiece prefix,
const FunctionDefLibrary* library,
FunctionDef* function) {
string name = string(prefix);
int id = library->function_size();
while (ContainsGraphFunctionWithName(name, *library)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
function->mutable_signature()->set_name(std::move(name));
}
void CopyAttribute(const string& attribute_name, const NodeDef& from,
NodeDef* to_node) {
(*to_node->mutable_attr())[attribute_name] = from.attr().at(attribute_name);
}
void ConcatAttributeList(const string& attribute_name, const NodeDef& first,
const NodeDef& second, NodeDef* to_node) {
CopyAttribute(attribute_name, first, to_node);
(*to_node->mutable_attr())
.at(attribute_name)
.mutable_list()
->MergeFrom(second.attr().at(attribute_name).list());
}
Status EnsureNodeNamesUnique(Graph* g) {
std::unordered_map<string, int> name_map;
for (auto node : g->op_nodes()) {
const string& prefix = node->name();
if (auto entry = gtl::FindOrNull(name_map, prefix)) {
string unique_name;
do {
unique_name = strings::StrCat(prefix, "_", ++(*entry));
} while (name_map.find(unique_name) != name_map.end());
name_map.insert({unique_name, 0});
node->set_name(std::move(unique_name));
} else {
name_map.insert({node->name(), 0});
}
}
return absl::OkStatus();
}
Status GetFetchNode(const MutableGraphView& graph, const GrapplerItem& item,
NodeDef** fetch_node) {
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
*fetch_node = graph.GetNode(item.fetch.at(0));
return absl::OkStatus();
}
bool IsItemDerivedFromFunctionDef(const GrapplerItem& item,
const MutableGraphView& graph_view) {
for (const auto& fetch_name : item.fetch) {
auto fetch = graph_view.GetNode(fetch_name);
if (fetch != nullptr && fetch->op() != kRetValOp) {
return false;
}
}
return true;
}
void MaybeSetFusedMetadata(const NodeDef& node1, const NodeDef& node2,
NodeDef* fused_node) {
data::Metadata metadata1;
if (node1.attr().contains("metadata")) {
metadata1.ParseFromString(node1.attr().at("metadata").s());
}
data::Metadata metadata2;
if (node2.attr().contains("metadata")) {
metadata2.ParseFromString(node2.attr().at("metadata").s());
}
data::Metadata fused_metadata;
auto normalize_name = [](const string& name) {
return name.empty() ? "?" : name;
};
*fused_metadata.mutable_name() =
strings::StrCat("fused(", normalize_name(metadata1.name()), ",",
normalize_name(metadata2.name()), ")");
fused_metadata.SerializeToString(
(*fused_node->mutable_attr())["metadata"].mutable_s());
}
bool CopyShapesAndTypesAttrs(const NodeDef& from, NodeDef* to_node) {
auto* attr = gtl::FindOrNull(from.attr(), kOutputTypes);
attr = (attr == nullptr ? gtl::FindOrNull(from.attr(), kToutputTypes) : attr);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputTypes] = *attr;
attr = gtl::FindOrNull(from.attr(), kOutputShapes);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputShapes] = *attr;
return true;
}
namespace {
const auto* kSloppyAttrOps = new absl::flat_hash_set<string>{
"ParallelInterleaveDatasetV2",
"ParallelMapDataset",
"ParseExampleDataset",
};
const auto* kReplicateOnSplitAttrOps = new absl::flat_hash_set<string>{
"TensorSliceDataset",
"RangeDataset",
};
const auto* kDeterministicAttrOps = new absl::flat_hash_set<string>{
"LegacyParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDatasetV2",
"ParallelBatchDataset",
};
}
bool HasSloppyAttr(const string& op) { return kSloppyAttrOps->contains(op); }
bool HasReplicateOnSplitAttr(const string& op) {
return kReplicateOnSplitAttrOps->contains(op);
}
bool HasDeterministicAttr(const string& op) {
return kDeterministicAttrOps->contains(op);
}
Status SetMetadataName(const std::string& name, NodeDef* node) {
data::Metadata metadata;
if (node->attr().contains("metadata")) {
metadata.ParseFromString(node->attr().at("metadata").s());
}
if (!metadata.name().empty()) {
return errors::InvalidArgument("Node ", node->name(),
" already has a metadata name \"",
metadata.name(), "\".");
}
*metadata.mutable_name() = name;
metadata.SerializeToString((*node->mutable_attr())["metadata"].mutable_s());
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
using test::function::NDef;
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
TEST(GraphUtilsTest, GetFirstElementIndexWithPredicate) {
std::vector<int> vec({1, 2, 3, 4, 5, 6});
auto result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 3 == 0; }, vec);
EXPECT_EQ(result, 2);
result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 7 == 0; }, vec);
EXPECT_EQ(result, -1);
}
TEST(GraphUtilsTest, AddScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(bool_node->name(), *graph.graph()));
EXPECT_EQ(bool_node->attr().at("value").tensor().bool_val(0), true);
}
TEST(GraphUtilsTest, AddScalarConstNodeDouble) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* double_node = AddScalarConstNode<double>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(double_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(double_node->attr().at("value").tensor().double_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeFloat) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* float_node = AddScalarConstNode<float>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(float_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(float_node->attr().at("value").tensor().float_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int_node = AddScalarConstNode<int>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int_node->name(), *graph.graph()));
EXPECT_EQ(int_node->attr().at("value").tensor().int_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int64_node->name(), *graph.graph()));
EXPECT_EQ(int64_node->attr().at("value").tensor().int64_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeString) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* string_node = AddScalarConstNode<StringPiece>("hello", &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(string_node->name(), *graph.graph()));
EXPECT_EQ(string_node->attr().at("value").tensor().string_val(0), "hello");
}
TEST(GraphUtilsTest, GetScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
int64_t result;
EXPECT_TRUE(GetScalarConstNodeValue<int64_t>(*int64_node, &result).ok());
EXPECT_EQ(result, 128);
}
TEST(GraphUtilsTest, GetScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
bool result;
EXPECT_TRUE(GetScalarConstNodeValue<bool>(*bool_node, &result).ok());
EXPECT_EQ(result, true);
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithNonConst) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* non_const = AddScalarPlaceholder(DT_INT64, &graph);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(*non_const, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Placeholder is not a Const node. Op: Placeholder");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithType) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
bool result;
Status s = GetScalarConstNodeValue<bool>(*int64_node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Const should have type bool but has type: int64");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithVector) {
NodeDef node;
node.set_name("Const");
node.set_op("Const");
(*node.mutable_attr())["dtype"].set_type(DT_INT64);
auto tensor = (*node.mutable_attr())["value"].mutable_tensor();
tensor->set_dtype(DT_INT64);
tensor->mutable_tensor_shape()->mutable_dim()->Add()->set_size(1);
tensor->add_int64_val(128);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Node Const should be a scalar but has shape: [1]");
}
TEST(GraphUtilsTest, Compare) {
GraphDef graph_def_a;
MutableGraphView graph_a(&graph_def_a);
GraphDef graph_def_b;
MutableGraphView graph_b(&graph_def_b);
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
AddNode("A", "OpA", {}, {}, &graph_a);
AddNode("B", "OpB", {"A"}, {}, &graph_a);
EXPECT_FALSE(Compare(graph_def_a, graph_def_b));
graph_def_b.mutable_node()->CopyFrom(graph_def_a.node());
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
}
TEST(GraphUtilsTest, ContainsGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName("A", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
}
TEST(GraphUtilsTest, ContainsGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_FALSE(ContainsGraphFunctionWithName("new_function", library));
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_TRUE(
ContainsGraphFunctionWithName(new_function->signature().name(), library));
}
TEST(GraphUtilsTest, ContainsNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsNodeWithOp("OpA", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
}
TEST(GraphUtilsTest, FindGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_NE(FindGraphNodeWithName("A", *graph.graph()), -1);
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
}
TEST(GraphUtilsTest, FindGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_EQ(FindGraphFunctionWithName("new_function", library), -1);
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_NE(
FindGraphFunctionWithName(new_function->signature().name(), library), -1);
}
TEST(GraphUtilsTest, FindGraphNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"A"}, {}, &graph);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), 0);
EXPECT_TRUE(graph.DeleteNodes({"B"}).ok());
EXPECT_EQ(FindGraphNodeWithOp("OpB", *graph.graph()), -1);
EXPECT_EQ(FindGraphNodeWithName("A2", *graph.graph()), 1);
}
TEST(GraphUtilsTest, FindAllGraphNodesWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"B"}, {}, &graph);
std::vector<int> result_indices =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices.size(), 2);
EXPECT_EQ(result_indices.at(0), 0);
EXPECT_EQ(result_indices.at(1), 2);
EXPECT_TRUE(graph.DeleteNodes({"A2"}).ok());
std::vector<int> result_indices_new =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices_new.size(), 1);
EXPECT_EQ(result_indices_new.at(0), 0);
}
TEST(GraphUtilsTest, SetUniqueGraphNodeName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node1->name(), node2->name());
EXPECT_TRUE(graph.DeleteNodes({node1->name()}).ok());
NodeDef* node3 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node2->name(), node3->name());
}
TEST(GraphUtilsTest, SetUniqueGraphFunctionName) {
FunctionDefLibrary library;
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
FunctionDef* other_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, other_function);
EXPECT_NE(new_function->signature().name(),
other_function->signature().name());
}
TEST(GraphUtilsTest, GetInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {node1->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node2, graph), node1);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, GetIthInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
NodeDef* node3 = AddNode("", "A", {node1->name(), node2->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node3, graph), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 1), node2);
EXPECT_EQ(GetInputNode(*node3, graph, 0), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 2), nullptr);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, EnsureNodeNamesUnique) {
Graph g(OpRegistry::Global());
Node *const_0, *const_1, *const_2;
Tensor tensor(DT_INT32, {});
tensor.scalar<int32>()() = 5;
for (auto node : {&const_0, &const_1}) {
TF_EXPECT_OK(NodeBuilder("Const", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, node));
}
TF_EXPECT_OK(NodeBuilder("Const_1", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, &const_2));
TF_EXPECT_OK(EnsureNodeNamesUnique(&g));
EXPECT_NE(const_0->name(), const_1->name());
EXPECT_NE(const_1->name(), const_2->name());
EXPECT_NE(const_0->name(), const_2->name());
}
TEST(GraphUtilsTest, TestGetFetchNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node3->name());
NodeDef* sink_node;
TF_EXPECT_OK(GetFetchNode(graph, item, &sink_node));
EXPECT_EQ(sink_node->name(), node3->name());
}
TEST(GraphUtilsTest, TestFindSinkNodeMultipleFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node2->name());
item.fetch.push_back(node3->name());
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestFindSinkNodeNoFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
AddNode("node3", "Identity", {node2->name()}, {}, &graph);
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoShapes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputTypes, absl::Span<const DataType>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, absl::Span<const TensorShape>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsOutputTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsToutputTypes) {
NodeDef from = NDef("tensor", "TensorDataset", {},
{{kOutputShapes, 666}, {kToutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestSetMetadataName) {
NodeDef node = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
EXPECT_TRUE(SetMetadataName("metadata_name", &node).ok());
EXPECT_TRUE(node.attr().contains("metadata"));
data::Metadata metadata;
metadata.ParseFromString(node.attr().at("metadata").s());
EXPECT_EQ("metadata_name", metadata.name());
EXPECT_FALSE(SetMetadataName("new_metadata_name", &node).ok());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dda2eaf-2fed-45a0-a247-17e9f1f0761c | cpp | tensorflow/tensorflow | filter_parallelization | tensorflow/core/grappler/optimizers/data/filter_parallelization.cc | tensorflow/core/grappler/optimizers/data/filter_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFilterDataset[] = "FilterDataset";
constexpr char kParallelFilterDataset[] = "ParallelFilterDataset";
NodeDef MakeParallelFilter(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_filter = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelFilterDataset, graph->graph(),
¶llel_filter);
parallel_filter.set_op(kParallelFilterDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_filter.add_input(num_parallel_calls->name());
AddNodeAttr("deterministic", "true", ¶llel_filter);
return parallel_filter;
}
}
Status FilterParallelization::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization filter_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kFilterDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* filter_node = get_filter_node(node);
if (!filter_node) continue;
auto* function = function_library.Find(
filter_node->attr().at("predicate").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true)) {
continue;
}
auto* parallel_filter =
graph.AddNode(MakeParallelFilter(filter_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(filter_node->name(), parallel_filter->name()));
nodes_to_delete.insert(filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterParallelization, "filter_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithFilterParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
FilterParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeFilterNode;
const char stateless_fun_name[] = "NonZero";
const char stateful_fun_name[] = "RandomUniformLess";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, FilterParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", "Identity", {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
!autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, FilterParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter", "range", stateless_fun_name),
NDef("Sink", op, {"filter"}, {})},
{
test::function::NonZero(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("filter", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, FilterParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range", stateful_fun_name),
MakeFilterNode("filter2", "filter1", stateless_fun_name),
NDef("cache", "CacheDataset", {"filter2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::NonZero(),
test::function::RandomUniformLess(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithFilterParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelFilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed7f002c-4012-480a-8b57-224f7c1db26b | cpp | tensorflow/tensorflow | enable_gradient_descent | tensorflow/core/grappler/optimizers/data/enable_gradient_descent.cc | tensorflow/core/grappler/optimizers/data/enable_gradient_descent_test.cc | #include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kAlgorithm[] = "algorithm";
constexpr char kModelDataset[] = "ModelDataset";
constexpr int64_t HILL_CLIMB = 0;
constexpr int64_t GRADIENT_DESCENT = 1;
}
Status EnableGradientDescent::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization enable_gradient_descent is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
int index = graph_utils::FindGraphNodeWithOp(kModelDataset, *output);
NodeDef& model_node = *(output->mutable_node(index));
if (model_node.attr().at(kAlgorithm).i() == HILL_CLIMB) {
(*model_node.mutable_attr())[kAlgorithm].set_i(GRADIENT_DESCENT);
stats->num_changes++;
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(EnableGradientDescent, "enable_gradient_descent");
}
} | #include "tensorflow/core/grappler/optimizers/data/enable_gradient_descent.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithEnableGradientDescent(const GrapplerItem &item,
GraphDef *output, bool autotune) {
EnableGradientDescent optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleRewrite
: public ::testing::TestWithParam<std::tuple<bool, int64_t, string>> {};
TEST_P(SimpleRewrite, EnableGradientDescentTest) {
const bool autotune = std::get<0>(GetParam());
const int64_t algorithm_index = std::get<1>(GetParam());
const string op = std::get<2>(GetParam());
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 5}, {"dtype", DT_INT32}}),
NDef("batch", "BatchDataset", {"range", "batch_size"}, {}),
NDef("model", "ModelDataset", {"batch"},
{{"algorithm", algorithm_index}}),
NDef("Sink", op, {"model"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithEnableGradientDescent(item, &output, autotune));
EXPECT_EQ(item.graph.node().size(), output.node().size());
NodeDef model_node =
output.node(graph_utils::FindGraphNodeWithName("model", output));
EXPECT_EQ(model_node.attr().at("algorithm").i(),
(autotune && op != "_Retval") ? 1 : algorithm_index);
}
INSTANTIATE_TEST_SUITE_P(
Test, SimpleRewrite,
::testing::Combine(::testing::Values(false, true), ::testing::Values(0, 1),
::testing::Values("Identity", "_Retval")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/enable_gradient_descent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/enable_gradient_descent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb54a9bb-bafa-4c6f-b589-e6cee64df33b | cpp | tensorflow/tensorflow | use_private_thread_pool | tensorflow/core/grappler/optimizers/data/use_private_thread_pool.cc | tensorflow/core/grappler/optimizers/data/use_private_thread_pool_test.cc | #include "tensorflow/core/grappler/optimizers/data/use_private_thread_pool.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrivateThreadPoolDataset[] = "PrivateThreadPoolDataset";
constexpr char kModelDataset[] = "ModelDataset";
}
Status UsePrivateThreadPool::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
for (const NodeDef& node : item.graph.node()) {
if (node.op() == kPrivateThreadPoolDataset) {
return absl::OkStatus();
}
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (last_node->op() == kModelDataset) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
NodeDef* num_threads_value =
graph_utils::AddScalarConstNode(int64_t{0}, &graph);
NodeDef insert_node;
graph_utils::SetUniqueGraphNodeName("private_thread_pool", graph.graph(),
&insert_node);
insert_node.set_op(kPrivateThreadPoolDataset);
*insert_node.mutable_input()->Add() = last_node->name();
*insert_node.mutable_input()->Add() = num_threads_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &insert_node))
return absl::OkStatus();
auto* added_node = graph.AddNode(std::move(insert_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(UsePrivateThreadPool, "use_private_thread_pool");
}
} | #include "tensorflow/core/grappler/optimizers/data/use_private_thread_pool.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
class ThreadPoolOpAlreadySetTest : public ::testing::TestWithParam<int64_t> {};
TEST_P(ThreadPoolOpAlreadySetTest, PrivateThreadPool) {
const int64_t num_of_threads = GetParam();
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *num_of_threads_val =
graph_utils::AddScalarConstNode<int64_t>(num_of_threads, &graph);
std::vector<string> private_threads_inputs(2);
private_threads_inputs[0] = range_node->name();
private_threads_inputs[1] = num_of_threads_val->name();
std::vector<std::pair<string, AttrValue>> private_threads_attrs;
NodeDef *private_threads_node = graph_utils::AddNode(
"private_thread_pool", "PrivateThreadPoolDataset", private_threads_inputs,
private_threads_attrs, &graph);
std::vector<string> sink_inputs(1);
sink_inputs[0] = private_threads_node->name();
std::vector<std::pair<string, AttrValue>> sink_attrs;
NodeDef *sink_node =
graph_utils::AddNode("Sink", "Identity", sink_inputs, sink_attrs, &graph);
item.fetch.push_back(sink_node->name());
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 7);
EXPECT_EQ(num_of_threads_val->attr().at("value").tensor().int64_val(0),
num_of_threads);
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef new_private_threads_node = output.node(
graph_utils::FindGraphNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef new_num_of_threads_val =
output.node(graph_utils::FindGraphNodeWithName(
new_private_threads_node.input(1), output));
EXPECT_EQ(new_num_of_threads_val.attr().at("value").tensor().int64_val(0),
num_of_threads);
}
INSTANTIATE_TEST_SUITE_P(Test, ThreadPoolOpAlreadySetTest,
::testing::Values(1, 2, 4));
class ThreadPoolOpNotSetTest : public ::testing::TestWithParam<string> {};
TEST_P(ThreadPoolOpNotSetTest, PrivateThreadPool) {
const string op = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", op, {"range"}, {})});
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 5);
item.fetch.push_back("Sink_fake");
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
EXPECT_EQ(output.node_size(), 5);
item.fetch[0] = "Sink";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (op == "_Retval") {
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
EXPECT_EQ(output.node_size(), 5);
return;
}
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef private_threads_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(private_threads_node.op(), "PrivateThreadPoolDataset");
EXPECT_EQ(private_threads_node.input_size(), 2);
NodeDef range_node = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef num_of_threads_val = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(1), output));
EXPECT_EQ(num_of_threads_val.attr().at("value").tensor().int64_val(0), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, ThreadPoolOpNotSetTest,
::testing::Values("Identity", "_Retval"));
TEST(AutotuneWithModelTest, PrivateThreadPool) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("model", "ModelDataset", {"range"}, {}),
NDef("Sink", "Identity", {"model"}, {})});
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", item.graph));
EXPECT_EQ(item.graph.node_size(), 6);
item.fetch.push_back("Sink");
UsePrivateThreadPool optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 8);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("PrivateThreadPoolDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef model_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(model_node.op(), "ModelDataset");
EXPECT_EQ(model_node.input_size(), 1);
NodeDef private_threads_node = output.node(
graph_utils::FindGraphNodeWithName(model_node.input(0), output));
EXPECT_EQ(private_threads_node.op(), "PrivateThreadPoolDataset");
EXPECT_EQ(private_threads_node.input_size(), 2);
NodeDef range_node = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef num_of_threads_val = output.node(graph_utils::FindGraphNodeWithName(
private_threads_node.input(1), output));
EXPECT_EQ(num_of_threads_val.attr().at("value").tensor().int64_val(0), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/use_private_thread_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/use_private_thread_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
120ecc99-78d8-4bb3-b992-5841a2470b9d | cpp | tensorflow/tensorflow | filter_fusion | tensorflow/core/grappler/optimizers/data/filter_fusion.cc | tensorflow/core/grappler/optimizers/data/filter_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeFusedFilterNode(const NodeDef& first_filter_node,
const NodeDef& second_filter_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName("fused_filter", graph->graph(),
&fused_node);
fused_node.set_op("FilterDataset");
fused_node.add_input(first_filter_node.input(0));
auto attr = first_filter_node.attr().at("predicate");
*attr.mutable_func()->mutable_name() = fused_function.signature().name();
(*fused_node.mutable_attr())["predicate"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", first_filter_node, &fused_node);
for (auto key : {"output_shapes", "output_types"})
graph_utils::CopyAttribute(key, second_filter_node, &fused_node);
graph_utils::MaybeSetFusedMetadata(first_filter_node, second_filter_node,
&fused_node);
return fused_node;
}
}
Status FilterFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
output->library());
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == "FilterDataset" && node.input_size() == 1) return &node;
return nullptr;
};
auto make_fused_function =
[&](const NodeDef* first_filter_node,
const NodeDef* second_filter_node) -> FunctionDef* {
const auto& parent_fun = first_filter_node->attr().at("predicate");
const FunctionDef* first_func =
function_library.Find(parent_fun.func().name());
const auto& fun = second_filter_node->attr().at("predicate");
const FunctionDef* second_func = function_library.Find(fun.func().name());
if (!fusion_utils::HasSameSignature(first_func->signature(),
second_func->signature())) {
VLOG(1) << "Can't fuse Filters because they have different signature\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*first_func, *second_func, "fused_predicate",
fusion_utils::SameSignature, fusion_utils::SameInput,
fusion_utils::LazyConjunctionOutput, fusion_utils::LazyConjunctionNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* second_filter_node = get_filter_node(node);
if (!second_filter_node) continue;
const NodeDef* first_filter_node =
get_filter_node(*graph_utils::GetInputNode(*second_filter_node, graph));
if (!first_filter_node) continue;
const auto* fused_predicate =
make_fused_function(first_filter_node, second_filter_node);
if (!fused_predicate) continue;
const auto* fused_filter_node = graph.AddNode(MakeFusedFilterNode(
*first_filter_node, *second_filter_node, *fused_predicate, &graph));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(second_filter_node->name(),
fused_filter_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_predicate));
nodes_to_delete.insert(first_filter_node->name());
nodes_to_delete.insert(second_filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(FilterFusion, "filter_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/filter_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeFilterNode;
TEST(FilterFusionTest, FuseTwoFilterIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"),
MakeFilterNode("filter2", "filter1")},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
}
TEST(FilterFusionTest, FuseThreeNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeFilterNode("filter1", "range"), MakeFilterNode("filter2", "filter1"),
MakeFilterNode("filter3", "filter2"),
NDef("cache", "CacheDataset", {"filter3", "filename"}, {})},
{
test::function::IsZero(),
});
FilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter3", output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/filter_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
46a3c061-5a2a-484e-80e6-f9a69d4d72b6 | cpp | tensorflow/tensorflow | split_utils | tensorflow/core/data/split_utils.cc | tensorflow/core/data/split_utils_test.cc | #include "tensorflow/core/data/split_utils.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNumToSkip[] = "num_to_skip";
constexpr char kSplitProvider[] = "split_provider";
constexpr char kSlash[] = "/";
constexpr char kIndex[] = "index";
}
IndexSplitProvider::IndexSplitProvider(int64_t n) : i_(0), n_(n) {
VLOG(3) << "Created index split provider with " << n << " splits.";
}
absl::Status IndexSplitProvider::GetNext(Tensor* split, bool* end_of_splits) {
tsl::mutex_lock l(mu_);
if (i_ >= n_) {
*end_of_splits = true;
return absl::OkStatus();
}
*end_of_splits = false;
*split = Tensor(DT_INT64, TensorShape{});
split->scalar<int64_t>()() = i_++;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
i_ = 0;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
return writer->WriteScalar(full_name(kIndex), i_);
}
absl::Status IndexSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
return reader->ReadScalar(full_name(kIndex), &i_);
}
int64_t IndexSplitProvider::Cardinality() const {
if (n_ == tsl::kint64max) {
return kInfiniteCardinality;
}
return n_;
}
ShardingSplitProvider::ShardingSplitProvider(
int64_t num_shards, int64_t shard_index,
std::shared_ptr<SplitProvider> split_provider)
: num_shards_(num_shards),
shard_index_(shard_index),
split_provider_(split_provider),
num_to_skip_(shard_index_) {}
absl::Status ShardingSplitProvider::GetNext(Tensor* split,
bool* end_of_splits) {
tsl::mutex_lock l(mu_);
while (num_to_skip_ > 0) {
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
if (*end_of_splits) {
return absl::OkStatus();
}
num_to_skip_--;
}
num_to_skip_ = num_shards_ - 1;
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Reset());
num_to_skip_ = shard_index_;
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Save(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
writer));
return writer->WriteScalar(full_name(kNumToSkip), num_to_skip_);
}
absl::Status ShardingSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Restore(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
reader));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kNumToSkip), &num_to_skip_));
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<SplitProvider>> GetSingleSplitProvider(
IteratorContext* ctx, const DatasetBase* dataset) {
if (ctx->split_providers().size() != 1) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to get single split provider for dataset ",
dataset->DebugString(), ". Found ",
ctx->split_providers().size(), " split providers"));
}
return ctx->split_providers()[0];
}
absl::StatusOr<std::vector<std::unique_ptr<SplitProvider>>> GetSplitProviders(
const DatasetBase* dataset) {
std::vector<std::unique_ptr<SplitProvider>> result;
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
for (const auto& input : inputs) {
std::vector<std::unique_ptr<SplitProvider>> providers;
TF_RETURN_IF_ERROR(input->MakeSplitProviders(&providers));
for (auto& provider : providers) {
result.push_back(std::move(provider));
}
}
return result;
}
absl::StatusOr<std::vector<IteratorContext>> CreateInputIteratorContexts(
IteratorContext* ctx, const DatasetBase* dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
std::vector<IteratorContext> result;
if (ctx->split_providers().empty()) {
for (int i = 0; i < inputs.size(); ++i) {
result.emplace_back(ctx);
}
return result;
}
int64_t num_sources = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i]->num_sources() < 0) {
return absl::FailedPreconditionError(absl::StrCat(
"Failed to determine the number of sources for dataset of type ",
inputs[i]->type_string()));
}
num_sources += inputs[i]->num_sources();
}
if (num_sources != ctx->split_providers().size()) {
return absl::FailedPreconditionError(absl::StrCat(
"Attempted to feed ", ctx->split_providers().size(),
" split providers into a dataset with ", num_sources, " sources"));
}
int64_t split_provider_index = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
IteratorContext::Params params(ctx);
params.split_providers.clear();
for (int j = 0; j < inputs[i]->num_sources(); ++j) {
params.split_providers.push_back(
ctx->split_providers()[split_provider_index + j]);
}
split_provider_index += inputs[i]->num_sources();
result.emplace_back(std::move(params));
}
return result;
}
}
} | #include "tensorflow/core/data/split_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
std::string full_name(const std::string& name) {
return FullName("test", name);
}
Status SaveAndRestore(SplitProvider* split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider->Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider->Restore(full_name, &reader));
return absl::OkStatus();
}
Status CheckOutput(SplitProvider* split_provider,
std::vector<Tensor> expected) {
int64_t next = 0;
bool end_of_splits = false;
while (!end_of_splits) {
Tensor split;
TF_RETURN_IF_ERROR(split_provider->GetNext(&split, &end_of_splits));
if (!end_of_splits) {
test::ExpectEqual(split, expected[next++]);
}
}
EXPECT_EQ(next, expected.size());
return absl::OkStatus();
}
TEST(IndexSplitProviderTest, Empty) {
IndexSplitProvider split_provider(0);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(IndexSplitProviderTest, One) {
IndexSplitProvider split_provider(1);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}})));
}
TEST(IndexSplitProviderTest, Three) {
IndexSplitProvider split_provider(3);
TF_EXPECT_OK(
CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})));
}
TEST(IndexSplitProviderTest, SaveAndRestore) {
IndexSplitProvider split_provider(4);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}});
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
TEST(ShardingSplitProviderTest, TwoWayShardZero) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 0, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{0}, {2}})));
}
TEST(ShardingSplitProviderTest, TwoWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {3}})));
}
TEST(ShardingSplitProviderTest, ThreeWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(6);
ShardingSplitProvider split_provider(3, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {4}})));
}
TEST(ShardingSplitProviderTest, Empty) {
auto base = std::make_shared<IndexSplitProvider>(1);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(ShardingSplitProviderTest, SaveAndRestore) {
auto base = std::make_shared<IndexSplitProvider>(6);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{1}, {4}});
ShardingSplitProvider split_provider(3, 1, base);
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/split_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/split_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9c265655-7741-4db4-8797-aa96e0bba534 | cpp | tensorflow/tensorflow | auto_shard | tensorflow/core/grappler/optimizers/data/auto_shard.cc | tensorflow/core/grappler/optimizers/data/auto_shard_test.cc | #include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include <array>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/kernels/data/shard_dataset_op.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
using tensorflow::data::AutoShardPolicy;
constexpr char kAssertCardinalityDatasetOpName[] = "AssertCardinalityDataset";
constexpr char kBatchDatasetOpName[] = "BatchDataset";
constexpr char kBatchDatasetV2OpName[] = "BatchDatasetV2";
constexpr char kMapAndBatchDatasetOpName[] = "MapAndBatchDataset";
constexpr char kMapDatasetOpName[] = "MapDataset";
constexpr char kShardDatasetOpName[] = "ShardDataset";
constexpr char kShuffleDatasetOpName[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2OpName[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3OpName[] = "ShuffleDatasetV3";
constexpr char kParallelBatchDatasetOpName[] = "ParallelBatchDataset";
constexpr char kPrefetchDatasetOpName[] = "PrefetchDataset";
constexpr char kFinalizeDatasetOpName[] = "FinalizeDataset";
constexpr char kOptionsDatasetOpName[] = "OptionsDataset";
constexpr char kRebatchDatasetOpName[] = "RebatchDataset";
constexpr char kRebatchDatasetV2OpName[] = "RebatchDatasetV2";
constexpr char kTensorDatasetOpName[] = "TensorDataset";
constexpr char kTensorSliceDatasetOpName[] = "TensorSliceDataset";
constexpr char kPlaceholderOpName[] = "Placeholder";
constexpr char kConstOpName[] = "Const";
constexpr char kNumWorkersAttrName[] = "num_workers";
constexpr char kNumReplicasAttrName[] = "num_replicas";
constexpr char kIndexAttrName[] = "index";
constexpr char kAutoShardPolicyAttrName[] = "auto_shard_policy";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr std::array<const char*, 6> kReaderDatasetOps = {
"ArrayRecordDataset",
"FixedLengthRecordDataset",
"RecordIODataset",
"SSTableDataset",
"TextLineDataset",
"TFRecordDataset"
};
constexpr std::array<const char*, 2> kMultipleInputsDatasetOps = {
"ConcatenateDataset",
"ZipDataset"
};
constexpr std::array<const char*, 32> kPassThroughOps = {
"_Retval",
"AssertNextDataset",
"BatchDataset",
"CacheDataset",
"ExperimentalMapAndBatchDataset",
"ExperimentalParseExampleDataset",
"ExperimentalRebatchDataset",
"FilterDataset",
"FinalizeDataset",
"Identity",
"MapAndBatchDataset",
"MapDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"OptimizeDataset",
"OptionsDataset",
"PaddedBatchDataset",
"ParallelBatchDataset",
"ParallelMapDataset",
"ParseExampleDataset",
"PrefetchDataset",
"PrivateThreadPoolDataset",
"ReduceDataset",
"RebatchDataset",
"RepeatDataset",
"ShardDataset",
"ShuffleAndRepeatDataset",
"ShuffleDataset",
"SkipDataset",
"TakeDataset",
"UnbatchDataset",
"WindowDataset",
};
constexpr std::array<const char*, 5> kFuncDatasetOps = {
"ExperimentalParallelInterleaveDataset",
"FlatMapDataset",
"InterleaveDataset",
"LegacyParallelInterleaveDataset",
"ParallelInterleaveDataset",
};
constexpr std::array<const char*, 5> kUnshardableSourceDatasetOps = {
"GeneratorDataset",
"RangeDataset",
"SparseTensorsSliceDataset",
"TensorDataset",
"TensorSliceDataset",
};
constexpr std::array<const char*, 20> kBatchSizeOrthogonalDatasetOps = {
"AssertCardinalityDataset",
"AssertNextDataset",
"BytesProducedStatsDataset",
"CacheDataset",
"FinalizeDataset",
"Identity",
"LatencyStatsDataset",
"MaxIntraOpParallelismDataset",
"ModelDataset",
"NonSerializableDataset",
"OptimizeDataset",
"OptionsDataset",
"ParseExampleDataset",
"PrefetchDataset",
"PrivateThreadPoolDataset",
"RebatchDataset",
"RepeatDataset",
"SetStatsAggregatorDataset",
"SleepDataset",
"ThreadPoolDataset",
};
constexpr std::array<const char*, 3> kBatchDatasetOps = {
kBatchDatasetOpName,
kMapAndBatchDatasetOpName,
kParallelBatchDatasetOpName,
};
Status OptimizeGraph(const GrapplerItem& item, int64_t num_workers,
int64_t index, AutoShardPolicy policy,
int64_t num_replicas, GraphDef* output,
AutoShardPolicy* policy_applied);
template <std::size_t SIZE>
bool IsDatasetNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& arr) {
for (const auto& dataset_op_name : arr) {
if (tensorflow::data::MatchesAnyVersion(dataset_op_name,
node.op())) {
return true;
}
}
return false;
}
Status AddShardNode(MutableGraphView* graph, const NodeDef& add_before,
int64_t num_workers, int64_t index) {
NodeDef new_node;
new_node.set_op(kShardDatasetOpName);
graph_utils::SetUniqueGraphNodeName(kShardDatasetOpName, graph->graph(),
&new_node);
NodeDef* num_shards_node =
graph_utils::AddScalarConstNode<int64_t>(num_workers, graph);
NodeDef* index_node = graph_utils::AddScalarConstNode<int64_t>(index, graph);
new_node.add_input(add_before.input(0));
new_node.add_input(num_shards_node->name());
new_node.add_input(index_node->name());
(*(new_node.mutable_attr()))[data::ShardDatasetOp::kRequireNonEmpty].set_b(
true);
NodeDef* add_after = graph->GetNode(add_before.input(0));
if (absl::StrContains(add_after->op(), "Dataset")) {
if (add_after->attr().count(kOutputShapes) > 0) {
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
} else {
tensorflow::TensorShapeProto* shape =
(*(new_node.mutable_attr()))[kOutputShapes]
.mutable_list()
->add_shape();
shape->set_unknown_rank(true);
}
if (add_after->attr().count(kOutputTypes) > 0) {
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
} else if (add_after->attr().count("Toutput_types") > 0) {
(*(new_node.mutable_attr()))[kOutputTypes] =
add_after->attr().at("Toutput_types");
} else {
(*(new_node.mutable_attr()))[kOutputTypes].mutable_list()->add_type(
tensorflow::DataType::DT_STRING);
}
} else {
return errors::NotFound(
"Unable to shard this input. You may need to wrap the inputs to your "
"reader dataset in a TensorSliceDataset. Input node is ",
add_after->DebugString());
}
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDataset(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_node, const string& seed2_node,
bool reshuffle_each_iteration) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetOpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetOpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_node);
new_node.add_input(seed2_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
AttrValue reshuffle_attr;
reshuffle_attr.set_b(reshuffle_each_iteration);
(*new_node.mutable_attr())[kReshuffleEachIteration] = reshuffle_attr;
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDatasetV2(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_generator_node) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetV2OpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetV2OpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_generator_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
Status AddShuffleDatasetV3(MutableGraphView* graph, const NodeDef& add_before,
const string& buffer_size_node,
const string& seed_node, const string& seed2_node,
const string& seed_generator_node,
bool reshuffle_each_iteration) {
NodeDef* add_after = graph->GetNode(add_before.input(0));
NodeDef new_node;
new_node.set_op(kShuffleDatasetV3OpName);
graph_utils::SetUniqueGraphNodeName(kShuffleDatasetV3OpName, graph->graph(),
&new_node);
new_node.add_input(add_before.input(0));
new_node.add_input(buffer_size_node);
new_node.add_input(seed_node);
new_node.add_input(seed2_node);
new_node.add_input(seed_generator_node);
graph_utils::CopyAttribute(kOutputShapes, *add_after, &new_node);
graph_utils::CopyAttribute(kOutputTypes, *add_after, &new_node);
AttrValue reshuffle_attr;
reshuffle_attr.set_b(reshuffle_each_iteration);
(*new_node.mutable_attr())[kReshuffleEachIteration] = reshuffle_attr;
NodeDef* new_node_graph = graph->AddNode(std::move(new_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(add_after->name(), new_node_graph->name()));
return absl::OkStatus();
}
bool ReaderOpInFunction(const NodeDef& node,
const FunctionLibraryDefinition& flib) {
auto f_attr_it = node.attr().find("f");
if (f_attr_it == node.attr().end()) return false;
const FunctionDef* func = flib.Find(f_attr_it->second.func().name());
for (int i = 0; i < func->node_def_size(); i++) {
NodeDef node_in_func = func->node_def(i);
if (IsDatasetNodeOfType(node_in_func, kReaderDatasetOps) &&
node_in_func.input_size() > 0) {
return true;
}
if (IsDatasetNodeOfType(func->node_def(i), kFuncDatasetOps) &&
ReaderOpInFunction(func->node_def(i), flib)) {
return true;
}
}
return false;
}
Status RemoveShuffleDataset(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_node, string* seed2_node,
bool* reshuffle_each_iteration) {
if (node.op() == kShuffleDatasetOpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_node = node.input(2);
*seed2_node = node.input(3);
*reshuffle_each_iteration = node.attr().at(kReshuffleEachIteration).b();
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(RemoveShuffleDataset(
graph, *fanin.node, nodes_to_delete, op_name, buffer_size_node,
seed_node, seed2_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
Status RemoveShuffleDatasetV2(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_generator_node) {
if (node.op() == kShuffleDatasetV2OpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_generator_node = node.input(2);
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(
RemoveShuffleDatasetV2(graph, *fanin.node, nodes_to_delete, op_name,
buffer_size_node, seed_generator_node));
}
return absl::OkStatus();
}
Status RemoveShuffleDatasetV3(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
string* op_name, string* buffer_size_node,
string* seed_node, string* seed2_node,
string* seed_generator_node,
bool* reshuffle_each_iteration) {
if (node.op() == kShuffleDatasetV3OpName) {
*op_name = node.op();
*buffer_size_node = node.input(1);
*seed_node = node.input(2);
*seed2_node = node.input(3);
*seed_generator_node = node.input(4);
*reshuffle_each_iteration = node.attr().at(kReshuffleEachIteration).b();
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
nodes_to_delete->insert(node.name());
}
for (const auto& fanin : graph->GetFanins(node, true)) {
TF_RETURN_IF_ERROR(RemoveShuffleDatasetV3(
graph, *fanin.node, nodes_to_delete, op_name, buffer_size_node,
seed_node, seed2_node, seed_generator_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
Status ProcessDatasetSourceNode(MutableGraphView* graph, const NodeDef& node,
absl::flat_hash_set<string>* nodes_to_delete,
int64_t num_workers, int64_t index) {
string shuffle_op_name = "";
string buffer_size_node = "";
string seed_node = "";
string seed2_node = "";
string seed_generator_node = "";
bool reshuffle_each_iteration;
TF_RETURN_IF_ERROR(AddShardNode(graph, node, num_workers, index));
TF_RETURN_IF_ERROR(RemoveShuffleDataset(
graph, node, nodes_to_delete, &shuffle_op_name, &buffer_size_node,
&seed_node, &seed2_node, &reshuffle_each_iteration));
if (shuffle_op_name.empty()) {
TF_RETURN_IF_ERROR(
RemoveShuffleDatasetV2(graph, node, nodes_to_delete, &shuffle_op_name,
&buffer_size_node, &seed_generator_node));
}
if (shuffle_op_name.empty()) {
TF_RETURN_IF_ERROR(RemoveShuffleDatasetV3(
graph, node, nodes_to_delete, &shuffle_op_name, &buffer_size_node,
&seed_node, &seed2_node, &seed_generator_node,
&reshuffle_each_iteration));
}
if (shuffle_op_name == kShuffleDatasetOpName) {
TF_RETURN_IF_ERROR(AddShuffleDataset(graph, node, buffer_size_node,
seed_node, seed2_node,
reshuffle_each_iteration));
} else if (shuffle_op_name == kShuffleDatasetV2OpName) {
TF_RETURN_IF_ERROR(AddShuffleDatasetV2(graph, node, buffer_size_node,
seed_generator_node));
} else if (shuffle_op_name == kShuffleDatasetV3OpName) {
TF_RETURN_IF_ERROR(AddShuffleDatasetV3(
graph, node, buffer_size_node, seed_node, seed2_node,
seed_generator_node, reshuffle_each_iteration));
}
return absl::OkStatus();
}
const NodeDef* FindFuncAndTensorSliceDataset(
const NodeDef* node, int64_t num_workers, int64_t index,
FunctionLibraryDefinition* flib, MutableGraphView* graph,
absl::flat_hash_set<string>* nodes_to_delete) {
if (IsDatasetNodeOfType(*node, kFuncDatasetOps)) {
const NodeDef* input_node = graph_utils::GetInputNode(*node, *graph, 0);
if (input_node->op() == kTensorSliceDatasetOpName ||
input_node->op() == kTensorDatasetOpName) {
const NodeDef* next_input_node =
graph_utils::GetInputNode(*input_node, *graph, 0);
if (next_input_node->op() == kPlaceholderOpName) {
return node;
}
}
}
if (!IsDatasetNodeOfType(*node, kPassThroughOps)) {
return nullptr;
}
const NodeDef* input_node = graph_utils::GetInputNode(*node, *graph, 0);
return FindFuncAndTensorSliceDataset(input_node, num_workers, index, flib,
graph, nodes_to_delete);
}
enum class DropRemainderValue { kUnknown, kTrue, kFalse };
DropRemainderValue GetDropRemainder(const MutableGraphView& graph,
const NodeDef& batch_node) {
const NodeDef* drop_remainder = nullptr;
if (batch_node.op() == kBatchDatasetOpName ||
batch_node.op() == kBatchDatasetV2OpName) {
drop_remainder = graph.GetNode(batch_node.input(2));
} else if (batch_node.op() == kParallelBatchDatasetOpName) {
drop_remainder = graph.GetNode(batch_node.input(3));
} else if (batch_node.op() == kMapAndBatchDatasetOpName) {
int drop_remainder_index =
3 + batch_node.attr().at("Targuments").list().shape_size();
if (drop_remainder_index >= batch_node.input_size()) {
LOG(ERROR) << "Fail to find the drop_remainder of op: "
<< batch_node.DebugString();
return DropRemainderValue::kUnknown;
}
drop_remainder = graph.GetNode(batch_node.input(drop_remainder_index));
} else {
LOG(ERROR) << "Expect a batch node but get " << batch_node.DebugString();
return DropRemainderValue::kUnknown;
}
if (!IsConstant(*drop_remainder)) {
return DropRemainderValue::kUnknown;
}
bool drop_remainder_value;
if (!GetNodeAttr(*drop_remainder, "value", &drop_remainder_value).ok()) {
return DropRemainderValue::kUnknown;
}
return drop_remainder_value ? DropRemainderValue::kTrue
: DropRemainderValue::kFalse;
}
Status RecursivelyHandleOp(const NodeDef& node, int64_t num_workers,
int64_t index, FunctionLibraryDefinition* flib,
MutableGraphView* graph,
absl::flat_hash_set<string>* nodes_to_delete) {
if (node.op() == kAssertCardinalityDatasetOpName) {
LOG(WARNING) << "The `assert_cardinality` transformation is currently not "
"handled by the auto-shard rewrite and will be removed.";
nodes_to_delete->insert(node.name());
TF_RETURN_IF_ERROR(graph->UpdateFanouts(node.name(), node.input(0)));
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph,
nodes_to_delete);
}
if (IsDatasetNodeOfType(node, kUnshardableSourceDatasetOps)) {
return errors::NotFound("Found an unshardable source dataset: ",
node.DebugString());
}
if (IsDatasetNodeOfType(node, kMultipleInputsDatasetOps)) {
for (int i = 0; i < node.input_size(); ++i) {
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, i);
TF_RETURN_IF_ERROR(RecursivelyHandleOp(*input_node, num_workers, index,
flib, graph, nodes_to_delete));
}
return absl::OkStatus();
}
if (IsDatasetNodeOfType(node, kFuncDatasetOps)) {
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
const NodeDef* flat_map_node = FindFuncAndTensorSliceDataset(
input_node, num_workers, index, flib, graph, nodes_to_delete);
if (flat_map_node != nullptr) {
auto fanouts = graph->GetFanouts(*flat_map_node, false);
if (fanouts.size() == 1) {
return ProcessDatasetSourceNode(graph, *fanouts.begin()->node,
nodes_to_delete, num_workers, index);
}
}
}
if ((IsDatasetNodeOfType(node, kFuncDatasetOps) ||
IsDatasetNodeOfType(node, kPassThroughOps)) &&
ReaderOpInFunction(node, *flib)) {
return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers,
index);
}
if (IsDatasetNodeOfType(node, kReaderDatasetOps)) {
return ProcessDatasetSourceNode(graph, node, nodes_to_delete, num_workers,
index);
}
if (!IsDatasetNodeOfType(node, kFuncDatasetOps) &&
!IsDatasetNodeOfType(node, kPassThroughOps)) {
return errors::NotFound(
"Did not find a shardable source, walked to ",
"a node which is not a dataset: ", node.DebugString(),
". Consider either turning off auto-sharding or switching the "
"auto_shard_policy to DATA to shard this dataset. You can do this by "
"creating a new `tf.data.Options()` object then setting "
"`options.experimental_distribute.auto_shard_policy = "
"AutoShardPolicy.DATA` before applying the options object to the "
"dataset via `dataset.with_options(options)`.");
}
const NodeDef* input_node = graph_utils::GetInputNode(node, *graph, 0);
return RecursivelyHandleOp(*input_node, num_workers, index, flib, graph,
nodes_to_delete);
}
Status ShardByFile(const NodeDef& sink_node, int64_t num_workers, int64_t index,
FunctionLibraryDefinition* flib, MutableGraphView* graph) {
absl::flat_hash_set<string> nodes_to_delete;
TF_RETURN_IF_ERROR(RecursivelyHandleOp(sink_node, num_workers, index, flib,
graph, &nodes_to_delete));
return graph->DeleteNodes(nodes_to_delete);
}
Status RewriteRebatchV2ToV1(const NodeDef& sink_node, int64_t num_replicas,
MutableGraphView* graph) {
NodeDef* input_node = graph_utils::GetInputNode(sink_node, *graph);
if (input_node->op() != kRebatchDatasetV2OpName) {
return absl::OkStatus();
}
NodeDef* rebatch_node = input_node;
rebatch_node->set_op(kRebatchDatasetOpName);
rebatch_node->mutable_input()->DeleteSubrange(1, 2);
if (num_replicas < 1) {
return errors::InvalidArgument(
"Cannot rewrite RebatchDatasetV2 to legacy RebatchDataset with invalid "
"num_replicas argument. `num_replicas` is ",
num_replicas, ", but expected to be >= 1.");
}
auto num_replicas_node = graph_utils::AddScalarConstNode(num_replicas, graph);
rebatch_node->add_input(num_replicas_node->name());
(*rebatch_node->mutable_attr())["use_fallback"].set_b(true);
auto* shapes_attr =
gtl::FindOrNull(*rebatch_node->mutable_attr(), "output_shapes");
if (shapes_attr == nullptr) {
return errors::InvalidArgument(
"Cannot rewrite RebatchDatasetV2 with missing `output_shapes` attr.");
}
for (int i = 0; i < shapes_attr->list().shape_size(); ++i) {
auto* shape = shapes_attr->mutable_list()->mutable_shape(i);
if (shape->unknown_rank()) continue;
shape->mutable_dim(0)->set_size(-1);
}
return absl::OkStatus();
}
Status ShardByData(const NodeDef& sink_node, int64_t num_workers, int64_t index,
int64_t num_replicas, MutableGraphView* graph) {
const NodeDef* shard_before = &sink_node;
NodeDef* input_node = graph_utils::GetInputNode(sink_node, *graph);
while (input_node->op() == kPrefetchDatasetOpName ||
input_node->op() == kOptionsDatasetOpName ||
input_node->op() == kFinalizeDatasetOpName) {
shard_before = input_node;
input_node = graph_utils::GetInputNode(*input_node, *graph);
}
TF_RETURN_IF_ERROR(RewriteRebatchV2ToV1(*shard_before, num_replicas, graph));
return AddShardNode(graph, *shard_before, num_workers, index);
}
Status ShardByHint(const NodeDef& sink_node, int64_t num_workers, int64_t index,
int64_t num_replicas, MutableGraphView* graph) {
auto get_shard_node = [graph](const NodeDef& node) -> const NodeDef* {
if (node.op() != kShardDatasetOpName) return nullptr;
auto num_workers_node = graph->GetNode(node.input(1));
if (num_workers_node->op() != kConstOpName) return nullptr;
if (num_workers_node->attr().at("value").tensor().int64_val(0) !=
tensorflow::data::kShardHint)
return nullptr;
return &node;
};
auto* num_workers_node =
graph_utils::AddScalarConstNode(static_cast<int64_t>(num_workers), graph);
auto* worker_index_node =
graph_utils::AddScalarConstNode(static_cast<int64_t>(index), graph);
for (const NodeDef& node : graph->graph()->node()) {
const NodeDef* shard_node = get_shard_node(node);
if (!shard_node) continue;
auto mutable_node = graph->GetNode(shard_node->name());
*mutable_node->mutable_input(1) = num_workers_node->name();
*mutable_node->mutable_input(2) = worker_index_node->name();
(*(mutable_node->mutable_attr()))[data::ShardDatasetOp::kRequireNonEmpty]
.set_b(true);
}
return absl::OkStatus();
}
Status ApplyAutoShard(const NodeDef& sink_node, int64_t num_workers,
int64_t index, AutoShardPolicy policy,
int64_t num_replicas, MutableGraphView* graph,
AutoShardPolicy* policy_applied) {
*policy_applied = policy;
FunctionLibraryDefinition flib(OpRegistry::Global(),
graph->graph()->library());
switch (policy) {
case AutoShardPolicy::OFF:
return absl::OkStatus();
case AutoShardPolicy::FILE:
return ShardByFile(sink_node, num_workers, index, &flib, graph);
case AutoShardPolicy::DATA:
return ShardByData(sink_node, num_workers, index, num_replicas, graph);
case AutoShardPolicy::HINT:
return ShardByHint(sink_node, num_workers, index, num_replicas, graph);
case AutoShardPolicy::AUTO:
default:
Status s = ShardByFile(sink_node, num_workers, index, &flib, graph);
if (absl::IsNotFound(s)) {
if (VLOG_IS_ON(2)) {
VLOG(2) << "AUTO sharding policy will apply DATA sharding policy "
"as it failed to apply FILE sharding policy because of "
"the following reason: "
<< s.message();
}
*policy_applied = AutoShardPolicy::DATA;
return ShardByData(sink_node, num_workers, index, num_replicas, graph);
}
*policy_applied = AutoShardPolicy::FILE;
return s;
}
}
Status OptimizeGraph(const GrapplerItem& item, int64_t num_workers,
int64_t index, AutoShardPolicy policy,
int64_t num_replicas, GraphDef* output) {
*output = item.graph;
MutableGraphView graph(output);
NodeDef* sink_node;
TF_RETURN_IF_ERROR(graph_utils::GetFetchNode(graph, item, &sink_node));
string id = strings::StrCat(reinterpret_cast<uint64>(output));
if (index == 0) {
std::vector<std::string> ineligible_reason;
bool is_eligible = internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason);
metrics::RecordTFDataAutoShardRewriteBatchSize(is_eligible,
ineligible_reason);
}
AutoShardPolicy policy_applied = policy;
if (policy != AutoShardPolicy::OFF &&
!(policy == AutoShardPolicy::FILE && num_workers == 1 && index == 0)) {
TF_RETURN_IF_ERROR(ApplyAutoShard(*sink_node, num_workers, index, policy,
num_replicas, &graph, &policy_applied));
}
if (index == 0) {
metrics::RecordTFDataAutoShard(id, policy_applied, num_workers,
num_replicas);
}
return absl::OkStatus();
}
}
namespace internal {
bool IsEligibleRewriteBatchSize(const NodeDef& sink_node,
const MutableGraphView& graph,
std::vector<std::string>* ineligible_reason) {
ineligible_reason->clear();
NodeDef* input_node = graph_utils::GetInputNode(sink_node, graph);
while (input_node != nullptr) {
if (input_node->op() == kRebatchDatasetOpName ||
input_node->op() == kRebatchDatasetV2OpName) {
input_node = graph_utils::GetInputNode(*input_node, graph);
if (input_node == nullptr || input_node->op() != kMapDatasetOpName) {
ineligible_reason->push_back("BUG_NO_MAP_BEFORE_REBATCH");
return false;
}
input_node = graph_utils::GetInputNode(*input_node, graph);
continue;
}
if (IsDatasetNodeOfType(*input_node, kBatchSizeOrthogonalDatasetOps)) {
input_node = graph_utils::GetInputNode(*input_node, graph);
continue;
}
if (IsDatasetNodeOfType(*input_node, kBatchDatasetOps)) {
DropRemainderValue drop_remainder = GetDropRemainder(graph, *input_node);
int64_t cardinality = data::kUnknownCardinality;
bool cardinality_available = true;
AttrSlice attrs(*input_node);
if (!TryGetNodeAttr(attrs, data::kCardinalityAttrForRewrite,
&cardinality)) {
cardinality_available = false;
}
if (drop_remainder == DropRemainderValue::kFalse ||
(cardinality_available &&
cardinality == data::kInfiniteCardinality)) {
return ineligible_reason->empty();
} else {
if (drop_remainder == DropRemainderValue::kUnknown) {
ineligible_reason->push_back("BATCH_DROP_REMAINDER_UNKNOWN");
}
if (!cardinality_available) {
ineligible_reason->push_back("BATCH_CARDINALITY_NOT_AVAILABLE");
}
if (drop_remainder == DropRemainderValue::kTrue &&
cardinality_available &&
cardinality != data::kInfiniteCardinality) {
ineligible_reason->push_back("BATCH_DROP_REMAINDER_NOT_INFINITE");
}
return false;
}
}
ineligible_reason->push_back(
strings::StrCat("OP_NOT_SUPPORTED_", input_node->op()));
input_node = graph_utils::GetInputNode(*input_node, graph);
}
ineligible_reason->clear();
ineligible_reason->push_back("BATCH_NOT_FOUND");
return false;
}
}
Status AutoShard::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) return errors::InvalidArgument("RewriterConfig not found.");
if ((config->parameter_map().find(kNumWorkersAttrName) ==
config->parameter_map().end())) {
return errors::InvalidArgument(kNumWorkersAttrName, " parameter missing.");
}
if ((config->parameter_map().find(kIndexAttrName) ==
config->parameter_map().end())) {
return errors::InvalidArgument(kIndexAttrName, " parameter missing.");
}
num_workers_ = config->parameter_map().at(kNumWorkersAttrName).i();
index_ = config->parameter_map().at(kIndexAttrName).i();
auto_shard_policy_ =
AutoShardPolicy(config->parameter_map().at(kAutoShardPolicyAttrName).i());
num_replicas_ = config->parameter_map().at(kNumReplicasAttrName).i();
if (auto_shard_policy_ != AutoShardPolicy::OFF &&
auto_shard_policy_ != AutoShardPolicy::AUTO &&
auto_shard_policy_ != AutoShardPolicy::DATA &&
auto_shard_policy_ != AutoShardPolicy::FILE &&
auto_shard_policy_ != AutoShardPolicy::HINT) {
return errors::InvalidArgument(kAutoShardPolicyAttrName, " is invalid.");
}
if (num_workers_ < 1) {
return errors::InvalidArgument(kNumWorkersAttrName,
" should be >= 1, currently ", num_workers_);
}
if (index_ < 0 || index_ >= num_workers_) {
return errors::InvalidArgument(kIndexAttrName, " should be >= 0 and < ",
num_workers_, ", currently ", index_);
}
if (num_replicas_ < 0) {
return errors::InvalidArgument(kNumReplicasAttrName, " should be >= 0");
}
return absl::OkStatus();
}
Status AutoShard::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
TF_RETURN_IF_ERROR(OptimizeGraph(item, num_workers_, index_,
auto_shard_policy_, num_replicas_, output));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(AutoShard, "tf_auto_shard");
}
} | #include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::grappler::graph_tests_utils::MakeBatchV2Node;
using ::tensorflow::grappler::graph_tests_utils::MakeMapAndBatchNode;
using ::tensorflow::grappler::graph_tests_utils::MakeParallelBatchNode;
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using ::testing::UnorderedElementsAre;
void FinishItem(GrapplerItem* item, const string& input_node_name) {
*item->graph.add_node() =
NDef("map_before_rebatch", "MapDataset", {input_node_name},
{{"f", "__inference_Dataset_map_normalize_8232"},
{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() =
NDef("num_replicas", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}});
*item->graph.add_node() =
NDef("rebatch", "RebatchDataset", {"map_before_rebatch", "num_replicas"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() =
NDef("prefetch_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}});
*item->graph.add_node() =
NDef("prefetch", "PrefetchDataset", {"rebatch", "prefetch_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}});
*item->graph.add_node() = NDef("Sink", "Identity", {"prefetch"}, {});
item->fetch.push_back("Sink");
}
NodeDef AddCardinalityAttr(NodeDef node, int64_t cardinality) {
(*node.mutable_attr())[data::kCardinalityAttrForRewrite].set_i(cardinality);
return node;
}
TEST(RewriteBatchTest, InfiniteSource) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "repeat", "batch_size", "drop_remainder",
false),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, InfiniteSourceMapAndBatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeMapAndBatchNode("batch", "repeat", "batch_size",
"num_parallel_calls", "drop_remainder"),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, InfiniteSourceParallelBatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeParallelBatchNode("batch", "repeat", "batch_size",
"num_parallel_calls", "drop_remainder",
"true"),
data::kInfiniteCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, FiniteSourceNoDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
TEST(RewriteBatchTest, FiniteSourceDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
1337),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, UnknownCardinalitySourceDropRemainder) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, FiniteSourceDropRemainderUnknown) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "RandomBool", {}, {}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_DROP_REMAINDER_UNKNOWN"));
}
TEST(RewriteBatchTest, DropRemainderCardinalityNotAvailable) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {}, {{"value", true}}),
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
});
FinishItem(&item, "batch");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("BATCH_CARDINALITY_NOT_AVAILABLE"));
}
TEST(RewriteBatchTest, OpNotSupported) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "tf_record", "batch_size", "drop_remainder",
false),
data::kUnknownCardinality),
NDef("take_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeTakeNode("take", "batch", "take_count"),
});
FinishItem(&item, "take");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason,
UnorderedElementsAre("OP_NOT_SUPPORTED_TakeDataset",
"BATCH_DROP_REMAINDER_NOT_INFINITE"));
}
TEST(RewriteBatchTest, BatchNotFound) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
graph_tests_utils::MakeTakeNode("take", "tf_record", "take_count"),
NDef("take_count", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
});
FinishItem(&item, "take");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_FALSE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason));
EXPECT_THAT(ineligible_reason, UnorderedElementsAre("BATCH_NOT_FOUND"));
}
TEST(RewriteBatchTest, InfiniteSourceNoRebatch) {
GrapplerItem item;
item.graph = GDef({
NDef("files", "Const", {},
{{"values", std::vector<std::string>{"file1", "file2"}},
{"dtype", DT_STRING}}),
NDef("tf_record", "TFRecordDataset", {"file"}, {}),
NDef("repeat_count", "Const", {}, {{"value", -1}, {"dtype", DT_INT32}}),
NDef("repeat", "RepeatDataset", {"tf_record", "repeat_count"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", true}, {"dtype", DT_BOOL}}),
AddCardinalityAttr(
MakeBatchV2Node("batch", "repeat", "batch_size", "drop_remainder",
false),
data::kInfiniteCardinality),
NDef("Sink", "Identity", {"batch"}, {}),
});
item.fetch.push_back("Sink");
MutableGraphView graph(&item.graph);
NodeDef* sink_node = nullptr;
TF_ASSERT_OK(graph_utils::GetFetchNode(graph, item, &sink_node));
std::vector<std::string> ineligible_reason;
EXPECT_TRUE(internal::IsEligibleRewriteBatchSize(*sink_node, graph,
&ineligible_reason))
<< absl::StrJoin(ineligible_reason, ",");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/auto_shard.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/auto_shard_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
befb96a5-113e-49f5-b16e-46e40289e82c | cpp | tensorflow/tensorflow | map_fusion | tensorflow/core/grappler/optimizers/data/map_fusion.cc | tensorflow/core/grappler/optimizers/data/map_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMapDatasetOp[] = "MapDataset";
constexpr char kParallelMapDatasetOp[] = "ParallelMapDatasetV2";
constexpr char kDeterministicAttr[] = "deterministic";
constexpr char kConstOp[] = "Const";
constexpr char kValueAttr[] = "value";
constexpr int kAutotuneValue = -1;
bool IsAutotuneNode(const string& node_name, const MutableGraphView& graph) {
const NodeDef* node = graph.GetNode(node_name);
if (!node) return false;
if (node->op() != kConstOp) return false;
const auto* value = gtl::FindOrNull(node->attr(), kValueAttr);
if (!value) return false;
if (value->has_tensor()) {
if (value->tensor().int64_val_size()) {
return value->tensor().int64_val(0) == kAutotuneValue;
}
}
return false;
}
bool SameDeterministicAttr(const NodeDef& parallel_map_node,
const NodeDef& parent_parallel_map_node) {
const auto* first_deterministic_attr =
gtl::FindOrNull(parallel_map_node.attr(), kDeterministicAttr);
const auto* second_deterministic_attr =
gtl::FindOrNull(parent_parallel_map_node.attr(), kDeterministicAttr);
const bool first_deterministic_val =
(first_deterministic_attr == nullptr) ||
(first_deterministic_attr->s() == "true" ||
first_deterministic_attr->s() == "default");
const bool second_deterministic_val =
(second_deterministic_attr == nullptr) ||
(second_deterministic_attr->s() == "true" ||
second_deterministic_attr->s() == "default");
return first_deterministic_val == second_deterministic_val;
}
string GetFusedName(const NodeDef& parent, const NodeDef& child) {
return absl::StrCat("map_fusion_nodes/", parent.name(), "/", child.name());
}
string GetFusedName(const FunctionDef& parent, const FunctionDef& child) {
return absl::StrCat("map_fusion_funcs/", parent.signature().name(), "/",
child.signature().name());
}
NodeDef MakeFusedNode(const NodeDef& parent_map_node, const NodeDef& map_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName(GetFusedName(parent_map_node, map_node),
graph->graph(), &fused_node);
if (map_node.op() == kMapDatasetOp) {
fused_node.set_op(kMapDatasetOp);
fused_node.add_input(parent_map_node.input(0));
} else if (map_node.op() == kParallelMapDatasetOp) {
fused_node.set_op(kParallelMapDatasetOp);
fused_node.add_input(parent_map_node.input(0));
fused_node.add_input(parent_map_node.input(1));
}
auto attr = parent_map_node.attr().at("f");
*attr.mutable_func()->mutable_name() = fused_function.signature().name();
(*fused_node.mutable_attr())["f"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", parent_map_node, &fused_node);
graph_utils::CopyShapesAndTypesAttrs(map_node, &fused_node);
auto value_or_false = [](const AttrValue* attr) {
if (!attr) return false;
return attr->b();
};
const auto* first_parallelism =
gtl::FindOrNull(parent_map_node.attr(), "use_inter_op_parallelism");
const auto* second_parallelism =
gtl::FindOrNull(map_node.attr(), "use_inter_op_parallelism");
(*fused_node.mutable_attr())["use_inter_op_parallelism"].set_b(
value_or_false(first_parallelism) || value_or_false(second_parallelism));
const auto* first_cardinality =
gtl::FindOrNull(parent_map_node.attr(), "preserve_cardinality");
const auto* second_cardinality =
gtl::FindOrNull(map_node.attr(), "preserve_cardinality");
(*fused_node.mutable_attr())["preserve_cardinality"].set_b(
value_or_false(first_cardinality) && value_or_false(second_cardinality));
graph_utils::MaybeSetFusedMetadata(parent_map_node, map_node, &fused_node);
if (map_node.op() == kParallelMapDatasetOp) {
graph_utils::CopyAttribute(kDeterministicAttr, map_node, &fused_node);
}
return fused_node;
}
}
Status MapFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
if (!autotune_) {
VLOG(1) << "The optimization map_fusion is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [&graph](const NodeDef& node) -> const NodeDef* {
if (node.op() == kMapDatasetOp && node.input_size() == 1) return &node;
if (node.op() == kParallelMapDatasetOp) {
if (node.input_size() != 2) return nullptr;
if (!IsAutotuneNode(node.input(1), graph)) return nullptr;
return &node;
}
return nullptr;
};
auto make_fused_function = [&function_library, &output](
const NodeDef* parent_map_node,
const NodeDef* map_node) -> FunctionDef* {
const auto& parent_fun = parent_map_node->attr().at("f");
const FunctionDef* parent_func =
function_library.Find(parent_fun.func().name());
const auto& fun = map_node->attr().at("f");
const FunctionDef* func = function_library.Find(fun.func().name());
if (!fusion_utils::CanCompose(parent_func->signature(),
func->signature())) {
VLOG(1) << "Can't fuse two maps because the output signature of the "
"first map function does not match the input signature of the "
"second function\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*parent_func, *func, GetFusedName(*parent_func, *func),
fusion_utils::ComposeSignature, fusion_utils::ComposeInput,
fusion_utils::ComposeOutput, fusion_utils::MergeNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* map_node = get_map_node(node);
if (!map_node) continue;
if (map_node->attr().find("use_unbounded_threadpool") !=
map_node->attr().end() &&
map_node->attr().at("use_unbounded_threadpool").b()) {
continue;
}
const NodeDef* parent_map_node =
get_map_node(*graph_utils::GetInputNode(*map_node, graph));
if (!parent_map_node) continue;
if (parent_map_node->attr().find("use_unbounded_threadpool") !=
parent_map_node->attr().end() &&
parent_map_node->attr().at("use_unbounded_threadpool").b()) {
continue;
}
if (parent_map_node->op() != map_node->op()) continue;
if (map_node->op() == kParallelMapDatasetOp) {
if (!SameDeterministicAttr(*parent_map_node, *map_node)) continue;
}
const auto* fused_function = make_fused_function(parent_map_node, map_node);
if (fused_function == nullptr) continue;
const auto* fused_maps_node = graph.AddNode(
MakeFusedNode(*parent_map_node, *map_node, *fused_function, &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(map_node->name(), fused_maps_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_function));
nodes_to_delete.insert(parent_map_node->name());
nodes_to_delete.insert(map_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapFusion, "map_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_fusion.h"
#include <functional>
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeMapNode;
using graph_tests_utils::MakeParallelMapV2Node;
constexpr char kConstOpName[] = "Const";
NodeDef CreateScalarConstNodeHelper(
const std::string& node_name, DataType dtype,
const std::function<void(TensorProto*)>& add_value) {
NodeDef node;
node.set_op(kConstOpName);
node.set_name(node_name);
(*node.mutable_attr())["dtype"].set_type(dtype);
auto tensor = std::make_unique<tensorflow::TensorProto>();
auto tensor_shape = std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())["value"].set_allocated_tensor(tensor.release());
return node;
}
Status OptimizeWithMapFusion(const GrapplerItem& item, GraphDef* output,
bool autotune) {
MapFusion optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, MapFusionTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node("map1", "range", num_parallel_calls_node.name(),
"XTimesTwo", "default",
false),
MakeParallelMapV2Node("map2", "map1", num_parallel_calls_node.name(),
"XTimesTwo", "default",
false)},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, autotune));
if (autotune) {
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
} else {
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
TEST(MapFusionTest, FuseTwoMapNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range"), MakeMapNode("map2", "map1")},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
TEST(MapFusionTest, FuseThreeNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range"), MakeMapNode("map2", "map1"),
MakeMapNode("map3", "map2"),
NDef("cache", "CacheDataset", {"map3", "filename"}, {})},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map3", output));
}
TEST(MapFusionTest, FuseTwoParallelMapNodesIntoOne) {
using test::function::NDef;
GrapplerItem item;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node("map1", "range", num_parallel_calls_node.name(),
"XTimesTwo", "default",
false),
MakeParallelMapV2Node("map2", "map1", num_parallel_calls_node.name(),
"XTimesTwo", "default",
false)},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
TEST(MapFusionTest, NoChange_UnboundedThreadpoolParallelMap) {
using test::function::NDef;
GrapplerItem item;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node("map1", "range", num_parallel_calls_node.name(),
"XTimesTwo", "default",
true),
MakeParallelMapV2Node("map2", "map1", num_parallel_calls_node.name(),
"XTimesTwo", "default",
false)},
{
test::function::XTimesTwo(),
});
MapFusion optimizer;
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapFusion(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
TEST(MapFusionTest, FusedNodesAndFunctionsAreNamedAfterOldNodesAndFunctions) {
using test::function::NDef;
NodeDef num_parallel_calls_node = CreateScalarConstNodeHelper(
"num_parallel_calls", DT_INT64,
[](TensorProto* proto) { proto->add_int64_val(-1); });
auto graph = [&num_parallel_calls_node](
const std::string& parent_map_node_name,
const std::string& map_node_name,
const std::string& parent_function_name,
const std::string& function_name) {
FunctionDef parent_fn = test::function::XTimesTwo();
FunctionDef fn = test::function::XTimesTwo();
parent_fn.mutable_signature()->set_name(parent_function_name);
fn.mutable_signature()->set_name(function_name);
return test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
num_parallel_calls_node,
MakeParallelMapV2Node(parent_map_node_name, "range",
num_parallel_calls_node.name(),
parent_function_name, "default",
false),
MakeParallelMapV2Node(map_node_name, parent_map_node_name,
num_parallel_calls_node.name(), function_name,
"default", false)},
{parent_fn, fn});
};
GrapplerItem item_1;
item_1.graph = graph("map1", "map2", "fnA", "fnB");
GraphDef output_1;
TF_ASSERT_OK(OptimizeWithMapFusion(item_1, &output_1, true));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
"map_fusion_nodes/map1/map2", output_1));
EXPECT_TRUE(graph_utils::ContainsGraphFunctionWithName(
"map_fusion_funcs/fnA/fnB", output_1.library()));
GrapplerItem item_2;
item_2.graph = graph("map3", "map4", "fnC", "fnD");
GraphDef output_2;
TF_ASSERT_OK(OptimizeWithMapFusion(item_2, &output_2, true));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(
"map_fusion_nodes/map3/map4", output_2));
EXPECT_TRUE(graph_utils::ContainsGraphFunctionWithName(
"map_fusion_funcs/fnC/fnD", output_2.library()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4af027aa-e249-4c4b-8bc5-c238083c09b4 | cpp | tensorflow/tensorflow | map_and_filter_fusion | tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.cc | tensorflow/core/grappler/optimizers/data/map_and_filter_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/kernels/function_ops.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeFusedNode(const NodeDef& map_node, const NodeDef& filter_node,
const FunctionDef& fused_function,
MutableGraphView* graph) {
NodeDef fused_node;
graph_utils::SetUniqueGraphNodeName("fused_map", graph->graph(), &fused_node);
fused_node.set_op(map_node.op());
for (int i = 0; i < map_node.input_size(); ++i) {
fused_node.add_input(map_node.input(i));
}
auto attr = map_node.attr().at("f");
attr.mutable_func()->set_name(fused_function.signature().name());
(*fused_node.mutable_attr())["f"] = std::move(attr);
graph_utils::CopyAttribute("Targuments", map_node, &fused_node);
graph_utils::CopyShapesAndTypesAttrs(map_node, &fused_node);
for (auto key :
{"use_inter_op_parallelism", "sloppy", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node.attr(), key)) {
graph_utils::CopyAttribute(key, map_node, &fused_node);
}
}
graph_utils::MaybeSetFusedMetadata(map_node, filter_node, &fused_node);
(*fused_node.mutable_attr())["output_types"]
.mutable_list()
->mutable_type()
->Add(DT_BOOL);
(*fused_node.mutable_attr())["output_shapes"]
.mutable_list()
->mutable_shape()
->Add();
return fused_node;
}
NodeDef MakeFilterNode(const NodeDef& fused_map,
const FunctionDef& fused_map_func,
MutableGraphView* graph, FunctionDefLibrary* library) {
NodeDef filter_node;
graph_utils::SetUniqueGraphNodeName("FilterByLast", graph->graph(),
&filter_node);
filter_node.set_op("FilterDataset");
filter_node.add_input(fused_map.name());
graph_utils::CopyShapesAndTypesAttrs(fused_map, &filter_node);
AddNodeAttr("Targuments", std::vector<DataType>({}), &filter_node);
OpDef fused_sig = fused_map_func.signature();
FunctionDef* func = library->add_function();
OpDef* sig = func->mutable_signature();
sig->set_name("GetLast");
for (const auto& arg : fused_sig.output_arg()) {
*(sig->add_input_arg()) = arg;
}
OpDef::ArgDef* arg = sig->add_output_arg();
arg->set_name("predicate_result");
arg->set_description("predicate result computed in the fused map");
arg->set_type(DT_BOOL);
sig->set_description("returns the last argument");
(*func->mutable_ret())["predicate_result"] = strings::StrCat(
fused_sig.output_arg(fused_sig.output_arg_size() - 1).name(), ":0");
(*filter_node.mutable_attr())["predicate"] =
FunctionDefHelper::FunctionRef(func->signature().name()).proto;
return filter_node;
}
NodeDef MakeMapNode(const NodeDef& updated_filter, const NodeDef& original_map,
const FunctionDef& fused_map_func, MutableGraphView* graph,
FunctionDefLibrary* library) {
NodeDef map_node;
graph_utils::SetUniqueGraphNodeName("DropLast", graph->graph(), &map_node);
map_node.set_op("MapDataset");
map_node.add_input(updated_filter.name());
graph_utils::CopyShapesAndTypesAttrs(original_map, &map_node);
AddNodeAttr("Targuments", std::vector<DataType>({}), &map_node);
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(original_map.attr(), key)) {
graph_utils::CopyAttribute(key, original_map, &map_node);
}
}
OpDef fused_sig = fused_map_func.signature();
FunctionDef* func = library->add_function();
OpDef* sig = func->mutable_signature();
sig->set_name("DropLast");
for (const auto& o : fused_sig.output_arg()) {
*(sig->add_input_arg()) = o;
}
for (int i = 0; i < fused_sig.output_arg_size() - 1; ++i) {
auto arg_i = fused_sig.output_arg(i);
*(sig->add_output_arg()) = arg_i;
(*func->mutable_ret())[arg_i.name()] = strings::StrCat(arg_i.name(), ":0");
}
sig->set_description("drops the last argument");
(*map_node.mutable_attr())["f"] =
FunctionDefHelper::FunctionRef(func->signature().name()).proto;
return map_node;
}
}
Status MapAndFilterFusion::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
GraphDef sorted_old_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(&sorted_old_graph));
*output = sorted_old_graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [](const NodeDef& node) -> const NodeDef* {
if ((node.op() == "MapDataset" && node.input_size() == 1) ||
(node.op() == "ParallelMapDataset" && node.input_size() == 2)) {
return &node;
}
return nullptr;
};
auto get_filter_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == "FilterDataset" && node.input_size() == 1) return &node;
return nullptr;
};
auto make_fused_function = [&function_library, &output](
const NodeDef* map_node,
const NodeDef* filter_node) -> FunctionDef* {
const auto& parent_fun = map_node->attr().at("f");
const FunctionDef* map_func =
function_library.Find(parent_fun.func().name());
const auto& fun = filter_node->attr().at("predicate");
const FunctionDef* filter_func = function_library.Find(fun.func().name());
if (!fusion_utils::CanCompose(map_func->signature(),
filter_func->signature())) {
VLOG(1) << "Can't fuse map and filter because the output signature of "
"the map function does not match the input signature of the "
"filter function\n";
return nullptr;
}
return fusion_utils::FuseFunctions(
*map_func, *filter_func, "fused_map_and_filter_function",
fusion_utils::CombineSignature, fusion_utils::ComposeInput,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
output->mutable_library());
};
for (const NodeDef& node : sorted_old_graph.node()) {
const NodeDef* filter_node = get_filter_node(node);
if (!filter_node) continue;
const NodeDef* map_node =
get_map_node(*graph_utils::GetInputNode(*filter_node, graph));
if (!map_node) continue;
const auto* fused_function = make_fused_function(map_node, filter_node);
if (fused_function == nullptr) continue;
const auto* fused_maps = graph.AddNode(
MakeFusedNode(*map_node, *filter_node, *fused_function, &graph));
const auto* new_filter_node = graph.AddNode(MakeFilterNode(
*fused_maps, *fused_function, &graph, output->mutable_library()));
const auto* new_map_node =
graph.AddNode(MakeMapNode(*new_filter_node, *map_node, *fused_function,
&graph, output->mutable_library()));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(filter_node->name(), new_map_node->name()));
TF_RETURN_IF_ERROR(function_library.AddFunctionDef(*fused_function));
nodes_to_delete.insert(map_node->name());
nodes_to_delete.insert(filter_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapAndFilterFusion, "map_and_filter_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using graph_tests_utils::MakeFilterNode;
using graph_tests_utils::MakeMapNode;
using graph_tests_utils::MakeParallelMapNode;
TEST(MapAndFilterFusionTest, FuseMapAndFilter) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range"), MakeFilterNode("filter", "map")},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_EQ(graph_utils::FindAllGraphNodesWithOp("MapDataset", output).size(),
2);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
}
TEST(MapAndFilterFusionTest, FuseParallelMapAndFilter) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 3}, {"dtype", "DT_INT32"}}),
MakeParallelMapNode("map", "range", "num_parallel_calls", "XTimesTwo",
false),
MakeFilterNode("filter", "map")},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("MapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
auto& map_node = output.node(
graph_utils::FindGraphNodeWithOp("ParallelMapDataset", output));
EXPECT_FALSE(map_node.attr().at("sloppy").b()) << map_node.DebugString();
EXPECT_EQ(map_node.input_size(), 2);
}
TEST(MapAndFilterFusionTest, FuseMapAndFilterWithExtraChild) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range"), MakeFilterNode("filter", "map"),
NDef("cache", "CacheDataset", {"filter", "filename"}, {})},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_EQ(graph_utils::FindAllGraphNodesWithOp("MapDataset", output).size(),
2);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("CacheDataset", output));
}
TEST(MapAndFilterFusionTest, FuseParallelMapAndFilterWithExtraChild) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 3}, {"dtype", "DT_INT32"}}),
MakeParallelMapNode("map", "range", "num_parallel_calls", "XTimesTwo",
true),
MakeFilterNode("filter", "map"),
NDef("cache", "CacheDataset", {"filter", "filename"}, {})},
{
test::function::XTimesTwo(),
test::function::IsZero(),
});
MapAndFilterFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("filter", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("FilterDataset", output));
ASSERT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("CacheDataset", output));
auto& map_node = output.node(
graph_utils::FindGraphNodeWithOp("ParallelMapDataset", output));
EXPECT_TRUE(map_node.attr().at("sloppy").b()) << map_node.DebugString();
EXPECT_EQ(map_node.input_size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_filter_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_and_filter_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09110132-03f3-4e02-8c26-76dc3137b184 | cpp | tensorflow/tensorflow | shuffle_and_repeat_fusion | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kShuffleDataset[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kRepeatDataset[] = "RepeatDataset";
constexpr char kShuffleAndRepeatDataset[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
Status FuseShuffleV1AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDataset);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV2AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDatasetV2, output,
fused_node);
NodeDef zero_node = *graph_utils::AddScalarConstNode<int64_t>(0, graph);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(zero_node.name());
fused_node->add_input(zero_node.name());
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(2));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
(*fused_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV3AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(4));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
}
Status ShuffleAndRepeatFusion::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& repeat_node : item.graph.node()) {
if (repeat_node.op() != kRepeatDataset) {
continue;
}
const NodeDef& shuffle_node =
*graph_utils::GetInputNode(repeat_node, graph);
NodeDef fused_node;
if (shuffle_node.op() == kShuffleDataset) {
TF_RETURN_IF_ERROR(FuseShuffleV1AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV2) {
TF_RETURN_IF_ERROR(FuseShuffleV2AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV3) {
TF_RETURN_IF_ERROR(FuseShuffleV3AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else {
continue;
}
NodeDef& shuffle_and_repeat_node = *graph.AddNode(std::move(fused_node));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(repeat_node.name(),
shuffle_and_repeat_node.name()));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(shuffle_node.name(),
shuffle_and_repeat_node.name()));
const auto nodes_to_preserve = item.NodesToPreserve();
if (nodes_to_preserve.find(shuffle_node.name()) ==
nodes_to_preserve.end() &&
nodes_to_preserve.find(repeat_node.name()) == nodes_to_preserve.end()) {
nodes_to_delete.insert(shuffle_node.name());
nodes_to_delete.insert(repeat_node.name());
}
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ShuffleAndRepeatFusion,
"shuffle_and_repeat_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV1AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> shuffle_inputs(4);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDataset", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDataset", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDataset", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 5);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV2AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(3);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV2", shuffle_inputs, common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(2));
for (const auto &attr : {kOutputShapes, kOutputTypes}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
EXPECT_TRUE(shuffle_and_repeat_node.attr().at(kReshuffleEachIteration).b());
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV3AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(5);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
shuffle_inputs[4] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV3", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(4));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = range_node->name();
repeat_inputs[1] = count_node->name();
graph_utils::AddNode("", "RepeatDataset", repeat_inputs, common_attrs,
&graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b67db28c-e40b-4668-92e4-3270dc801d78 | cpp | tensorflow/tensorflow | graph_transform_wrapper | tensorflow/core/transforms/graph_transform_wrapper.cc | tensorflow/core/transforms/graph_transform_wrapper_test.cc | #include "tensorflow/core/transforms/graph_transform_wrapper.h"
#include <initializer_list>
#include "absl/memory/memory.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/platform/statusor.h"
namespace mlir {
namespace tfg {
tensorflow::Status RunTransformOnGraph(
tensorflow::Graph* graph,
const std::initializer_list<
llvm::function_ref<std::unique_ptr<mlir::Pass>()>>& passes,
const tensorflow::GraphDebugInfo& debug_info) {
MLIRContext context(MLIRContext::Threading::DISABLED);
TF_ASSIGN_OR_RETURN(OwningOpRef<ModuleOp> module,
ImportGraphAndFunctionsToMlir(&context, debug_info,
*graph, graph->flib_def()));
PassManager pm((*module)->getName(), mlir::PassManager::Nesting::Explicit);
for (auto& pass : passes) pm.addPass(pass());
mlir::StatusScopedDiagnosticHandler error_handler(&context);
if (failed(pm.run(*module)))
return error_handler.Combine(
tensorflow::errors::InvalidArgument("MLIR Graph Optimizer failed: "));
tensorflow::GraphDef graphdef;
TF_RETURN_WITH_CONTEXT_IF_ERROR(ConvertToGraphDef(*module, &graphdef),
"when exporting MLIR module to GraphDef");
graph->Clear();
graph->mutable_flib_def()->Clear();
tensorflow::GraphConstructorOptions opts;
return ConvertGraphDefToGraph(opts, graphdef, graph);
}
}
} | #include "tensorflow/core/transforms/graph_transform_wrapper.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace {
struct TestPass : public PassWrapper<TestPass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPass)
TestPass() = default;
StringRef getArgument() const final { return "test"; }
void runOnOperation() override {
Operation* del;
getOperation()->walk([&](Operation* op) {
if (op->getName().getStringRef() != "tfg.TestInput") return;
del = *op->getResult(0).getUsers().begin();
});
del->erase();
}
};
}
}
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestRelu").Input("i: float").Output("o: float");
REGISTER_OP("NoOp");
TEST(GraphTransformWrapper, ReplacedGraph) {
tensorflow::Graph graph(tensorflow::OpRegistry::Global());
{
tensorflow::GraphDefBuilder b(
tensorflow::GraphDefBuilder::kFailImmediately);
tensorflow::Node* input =
tensorflow::ops::SourceOp("TestInput", b.opts().WithName("in"));
tensorflow::ops::UnaryOp("TestRelu", tensorflow::ops::NodeOut(input, 0),
b.opts().WithName("n1"));
tensorflow::ops::UnaryOp("TestRelu", tensorflow::ops::NodeOut(input, 1),
b.opts().WithName("n2"));
TF_EXPECT_OK(tensorflow::GraphDefBuilderToGraph(b, &graph));
}
mlir::MLIRContext context;
context.getOrLoadDialect<mlir::tfg::TFGraphDialect>();
auto create_pass = [&]() { return std::make_unique<mlir::TestPass>(); };
TF_QCHECK_OK(mlir::tfg::RunTransformOnGraph(&graph, {create_pass}));
EXPECT_EQ(4, graph.num_nodes());
EXPECT_TRUE(
absl::StrContains(graph.ToGraphDefDebug().ShortDebugString(), "\"n2\""));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/graph_transform_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/graph_transform_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99d6ed46-f40c-442e-b916-0ade406e716d | cpp | tensorflow/tensorflow | eval_utils | tensorflow/core/transforms/utils/eval_utils.cc | tensorflow/core/transforms/utils/eval_utils_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/transforms/utils/eval_utils.h"
#include <cassert>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Builders.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/public/version.h"
namespace mlir {
namespace tfg {
namespace util {
static constexpr int kThreads = 2;
SimpleDevice::SimpleDevice() : DeviceBase(tensorflow::Env::Default()) {
eigen_worker_ = std::make_unique<tensorflow::thread::ThreadPool>(
tensorflow::Env::Default(), "eval_utils", kThreads);
eigen_worker_threads_.num_threads = kThreads;
eigen_worker_threads_.workers = eigen_worker_.get();
eigen_device_ = std::make_unique<Eigen::ThreadPoolDevice>(
eigen_worker_threads_.workers->AsEigenThreadPool(),
eigen_worker_threads_.num_threads);
set_tensorflow_cpu_worker_threads(&eigen_worker_threads_);
set_eigen_cpu_device(eigen_device_.get());
}
SimpleDevice::~SimpleDevice() {}
tensorflow::Allocator *SimpleDevice::GetAllocator(
tensorflow::AllocatorAttributes attr) {
return tensorflow::cpu_allocator();
}
tensorflow::Status SimpleDevice::MakeTensorFromProto(
const tensorflow::TensorProto &tensor_proto,
const tensorflow::AllocatorAttributes alloc_attrs,
tensorflow::Tensor *tensor) {
tensorflow::Tensor parsed(tensor_proto.dtype());
if (!parsed.FromProto(tensorflow::cpu_allocator(), tensor_proto)) {
return tensorflow::errors::InvalidArgument(
"Cannot parse tensor from tensor_proto.");
}
*tensor = std::move(parsed);
return absl::OkStatus();
}
LogicalResult EvaluateOperation(tensorflow::DeviceBase *cpu_device,
tensorflow::ResourceMgr *resource_mgr, TFOp op,
ArrayRef<ElementsAttr> operands,
SmallVectorImpl<TypedAttr> &results) {
assert(cpu_device && "cpu device can't be null");
assert(resource_mgr && "ResourceMgr can't be null");
if (llvm::any_of(operands, [](Attribute operand) { return !operand; })) {
VLOG(3) << "cannot be evaluated with null operands";
return failure();
}
tensorflow::NodeDef node_def;
if (!ConvertToNodeDef(&*op, &node_def, op.getDialect(), [&](Value value) {
return GetValueName(value, op.getDialect());
}).ok()) {
VLOG(3) << "failed to convert operation to NodeDef";
return failure();
}
absl::InlinedVector<tensorflow::Tensor, 4> input_tensors(operands.size());
absl::InlinedVector<tensorflow::TensorValue, 4> input_tensor_values(
operands.size());
for (auto it : llvm::zip(operands, input_tensors, input_tensor_values)) {
auto &[operand, input_tensor, input_tensor_value] = it;
if (!ConvertToTensor(operand, &input_tensor).ok()) return failure();
input_tensor_value.tensor = &input_tensor;
}
tensorflow::Status status;
std::unique_ptr<tensorflow::OpKernel> op_kernel = tensorflow::CreateOpKernel(
tensorflow::DEVICE_CPU, cpu_device, cpu_device->GetAllocator({}),
node_def, TF_GRAPH_DEF_VERSION, &status);
if (!status.ok()) {
VLOG(3) << status.message();
return failure();
}
tensorflow::OpKernelContext::Params params;
params.device = cpu_device;
params.frame_iter = tensorflow::FrameAndIter(0, 0);
params.inputs = input_tensor_values;
params.op_kernel = op_kernel.get();
params.resource_manager = resource_mgr;
absl::InlinedVector<tensorflow::AllocatorAttributes, 4> output_attrs(
op_kernel->num_outputs());
for (auto &attr : output_attrs) attr.set_on_host(true);
params.output_attr_array = output_attrs.data();
tensorflow::OpKernelContext op_context(¶ms);
op_kernel->Compute(&op_context);
if (!op_context.status().ok()) {
VLOG(3) << op_context.status().message();
return failure();
}
Builder builder(op->getContext());
for (int i = 0; i < op_kernel->num_outputs(); ++i) {
if (op_context.mutable_output(i) == nullptr) {
results.push_back(nullptr);
continue;
}
absl::StatusOr<ElementsAttr> attr_or =
ConvertTensor(*(op_context.mutable_output(i)), builder);
if (!attr_or.status().ok()) {
VLOG(3) << attr_or.status().message();
return failure();
}
results.push_back(attr_or.value());
}
return success();
}
}
}
} | #include "tensorflow/core/transforms/utils/eval_utils.h"
#include <memory>
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
TEST(EvalUtilsTest, InvalidInputs) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i1, value = dense<1> : tensor<i1>} : () -> (tensor<i1>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Switch:2, %ctl_3 = Switch(%Const_1, %Const_0) name("switch") {T = i1} : (tensor<2x2xi32>, tensor<i1>) -> (tensor<*xi32>, tensor<*xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
auto tfg_dialect = context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_0));
Operation *const_1 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_1));
Operation *switch_op = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
EXPECT_TRUE(failed(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), switch_op,
{const_0->getAttrOfType<ElementsAttr>("value"),
const_1->getAttrOfType<ElementsAttr>("value")},
result)));
}
TEST(EvalUtilsTest, EvaluateOperation) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i32, value = dense<1> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Add, %ctl_7 = Add(%Const_0, %Const_1) name("add") {T = i32} : (tensor<2x2xi32>, tensor<2x2xi32>) -> (tensor<2x2xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
Operation *const_1 = &*iter++;
Operation *add = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
ASSERT_TRUE(succeeded(util::EvaluateOperation(
cpu_device.get(), resource_mgr.get(), const_0,
{const_0->getAttrOfType<ElementsAttr>("value")}, result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 1);
result.clear();
ASSERT_TRUE(succeeded(util::EvaluateOperation(
cpu_device.get(), resource_mgr.get(), const_1,
{const_1->getAttrOfType<ElementsAttr>("value")}, result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 2);
result.clear();
ASSERT_TRUE(succeeded(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), add,
{const_0->getAttrOfType<ElementsAttr>("value"),
const_1->getAttrOfType<ElementsAttr>("value")},
result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 3);
}
TEST(EvalUtilsTest, OutputInvalidation) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i1, value = dense<1> : tensor<i1>} : () -> (tensor<i1>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Switch:2, %ctl_3 = Switch(%Const_1, %Const_0) name("switch") {T = i1} : (tensor<2x2xi32>, tensor<i1>) -> (tensor<*xi32>, tensor<*xi32>)
%Identity_0, %ctl_4 = Identity(%Switch#0) name("id1") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
%Identity_1, %ctl_5 = Identity(%Switch#1) name("id2") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
auto tfg_dialect = context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_0));
Operation *const_1 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_1));
Operation *switch_op = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
ASSERT_TRUE(succeeded(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), switch_op,
{const_1->getAttrOfType<ElementsAttr>("value"),
const_0->getAttrOfType<ElementsAttr>("value")},
result)));
ASSERT_EQ(result.size(), 2);
EXPECT_EQ(result[0], nullptr);
EXPECT_EQ(mlir::cast<ElementsAttr>(result[1]).getValues<int>()[0], 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/utils/eval_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/utils/eval_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d40086fa-d2c1-4a7d-9d48-7b3d7cb0db27 | cpp | tensorflow/tensorflow | ordered_code | tensorflow/core/lib/strings/ordered_code.cc | tensorflow/core/lib/strings/ordered_code_test.cc | #include "tensorflow/core/lib/strings/ordered_code.h"
#include <assert.h>
#include <stddef.h>
#include "xla/tsl/lib/core/bits.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace strings {
static const char kEscape1 = '\000';
static const char kNullCharacter = '\xff';
static const char kSeparator = '\001';
static const char kEscape2 = '\xff';
static const char kFFCharacter = '\000';
static const char kEscape1_Separator[2] = {kEscape1, kSeparator};
inline static void AppendBytes(string* dest, const char* src, size_t len) {
dest->append(src, len);
}
inline bool IsSpecialByte(char c) {
return (static_cast<unsigned char>(c + 1)) < 2;
}
inline const char* SkipToNextSpecialByte(const char* start, const char* limit) {
DCHECK_EQ(kEscape1, 0);
DCHECK_EQ(kEscape2 & 0xffu, 255u);
const char* p = start;
while (p < limit && !IsSpecialByte(*p)) {
p++;
}
return p;
}
const char* OrderedCode::TEST_SkipToNextSpecialByte(const char* start,
const char* limit) {
return SkipToNextSpecialByte(start, limit);
}
inline static void EncodeStringFragment(string* dest, StringPiece s) {
const char* p = s.data();
const char* limit = p + s.size();
const char* copy_start = p;
while (true) {
p = SkipToNextSpecialByte(p, limit);
if (p >= limit) break;
char c = *(p++);
DCHECK(IsSpecialByte(c));
if (c == kEscape1) {
AppendBytes(dest, copy_start, p - copy_start - 1);
dest->push_back(kEscape1);
dest->push_back(kNullCharacter);
copy_start = p;
} else {
assert(c == kEscape2);
AppendBytes(dest, copy_start, p - copy_start - 1);
dest->push_back(kEscape2);
dest->push_back(kFFCharacter);
copy_start = p;
}
}
if (p > copy_start) {
AppendBytes(dest, copy_start, p - copy_start);
}
}
void OrderedCode::WriteString(string* dest, StringPiece s) {
EncodeStringFragment(dest, s);
AppendBytes(dest, kEscape1_Separator, 2);
}
void OrderedCode::WriteNumIncreasing(string* dest, uint64 val) {
unsigned char buf[9];
int len = 0;
while (val > 0) {
len++;
buf[9 - len] = (val & 0xff);
val >>= 8;
}
buf[9 - len - 1] = len;
len++;
AppendBytes(dest, reinterpret_cast<const char*>(buf + 9 - len), len);
}
inline static bool ReadStringInternal(StringPiece* src, string* result) {
const char* start = src->data();
const char* string_limit = src->data() + src->size();
const char* limit = string_limit - 1;
const char* copy_start = start;
while (true) {
start = SkipToNextSpecialByte(start, limit);
if (start >= limit) break;
const char c = *(start++);
DCHECK(IsSpecialByte(c));
if (c == kEscape1) {
if (result) {
AppendBytes(result, copy_start, start - copy_start - 1);
}
const char next = *(start++);
if (next == kSeparator) {
src->remove_prefix(start - src->data());
return true;
} else if (next == kNullCharacter) {
if (result) {
*result += '\0';
}
} else {
return false;
}
copy_start = start;
} else {
assert(c == kEscape2);
if (result) {
AppendBytes(result, copy_start, start - copy_start - 1);
}
const char next = *(start++);
if (next == kFFCharacter) {
if (result) {
*result += '\xff';
}
} else {
return false;
}
copy_start = start;
}
}
return false;
}
bool OrderedCode::ReadString(StringPiece* src, string* result) {
return ReadStringInternal(src, result);
}
bool OrderedCode::ReadNumIncreasing(StringPiece* src, uint64* result) {
if (src->empty()) {
return false;
}
const size_t len = static_cast<unsigned char>((*src)[0]);
DCHECK(0 == len || src->size() == 1 || (*src)[1] != '\0')
<< "invalid encoding";
if (len + 1 > src->size() || len > 8) {
return false;
}
if (result) {
uint64 tmp = 0;
for (size_t i = 0; i < len; i++) {
tmp <<= 8;
tmp |= static_cast<unsigned char>((*src)[1 + i]);
}
*result = tmp;
}
src->remove_prefix(len + 1);
return true;
}
void OrderedCode::TEST_Corrupt(string* str, int k) {
int seen_seps = 0;
for (size_t i = 0; i + 1 < str->size(); i++) {
if ((*str)[i] == kEscape1 && (*str)[i + 1] == kSeparator) {
seen_seps++;
if (seen_seps == k) {
(*str)[i + 1] = kSeparator + 1;
return;
}
}
}
}
static const int kMaxSigned64Length = 10;
static const char kLengthToHeaderBits[1 + kMaxSigned64Length][2] = {
{0, 0}, {'\x80', 0}, {'\xc0', 0}, {'\xe0', 0},
{'\xf0', 0}, {'\xf8', 0}, {'\xfc', 0}, {'\xfe', 0},
{'\xff', 0}, {'\xff', '\x80'}, {'\xff', '\xc0'}};
static const uint64 kLengthToMask[1 + kMaxSigned64Length] = {
0ULL,
0x80ULL,
0xc000ULL,
0xe00000ULL,
0xf0000000ULL,
0xf800000000ULL,
0xfc0000000000ULL,
0xfe000000000000ULL,
0xff00000000000000ULL,
0x8000000000000000ULL,
0ULL};
static const int8 kBitsToLength[1 + 63] = {
1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4,
4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 7,
7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10};
static inline int SignedEncodingLength(int64_t n) {
return kBitsToLength[tsl::Log2Floor64(n < 0 ? ~n : n) + 1];
}
static void StoreBigEndian64(char* dst, uint64 v) {
for (int i = 0; i < 8; i++) {
dst[i] = (v >> (56 - 8 * i)) & 0xff;
}
}
static uint64 LoadBigEndian64(const char* src) {
uint64 result = 0;
for (int i = 0; i < 8; i++) {
unsigned char c = static_cast<unsigned char>(src[i]);
result |= static_cast<uint64>(c) << (56 - 8 * i);
}
return result;
}
void OrderedCode::WriteSignedNumIncreasing(string* dest, int64_t val) {
const uint64 x = val < 0 ? ~val : val;
if (x < 64) {
*dest += kLengthToHeaderBits[1][0] ^ val;
return;
}
const char sign_byte = val < 0 ? '\xff' : '\0';
char buf[10] = {
sign_byte,
sign_byte,
};
StoreBigEndian64(buf + 2, val);
static_assert(sizeof(buf) == kMaxSigned64Length, "max length size mismatch");
const int len = SignedEncodingLength(x);
DCHECK_GE(len, 2);
char* const begin = buf + sizeof(buf) - len;
begin[0] ^= kLengthToHeaderBits[len][0];
begin[1] ^= kLengthToHeaderBits[len][1];
dest->append(begin, len);
}
bool OrderedCode::ReadSignedNumIncreasing(StringPiece* src, int64_t* result) {
if (src->empty()) return false;
const uint64 xor_mask = (!((*src)[0] & 0x80)) ? ~0ULL : 0ULL;
const unsigned char first_byte = (*src)[0] ^ (xor_mask & 0xff);
int len;
uint64 x;
if (first_byte != 0xff) {
len = 7 - tsl::Log2Floor64(first_byte ^ 0xff);
if (src->size() < static_cast<size_t>(len)) return false;
x = xor_mask;
for (int i = 0; i < len; ++i)
x = (x << 8) | static_cast<unsigned char>((*src)[i]);
} else {
len = 8;
if (src->size() < static_cast<size_t>(len)) return false;
const unsigned char second_byte = (*src)[1] ^ (xor_mask & 0xff);
if (second_byte >= 0x80) {
if (second_byte < 0xc0) {
len = 9;
} else {
const unsigned char third_byte = (*src)[2] ^ (xor_mask & 0xff);
if (second_byte == 0xc0 && third_byte < 0x80) {
len = 10;
} else {
return false;
}
}
if (src->size() < static_cast<size_t>(len)) return false;
}
x = LoadBigEndian64(src->data() + len - 8);
}
x ^= kLengthToMask[len];
DCHECK_EQ(len, SignedEncodingLength(x)) << "invalid encoding";
if (result) *result = x;
src->remove_prefix(len);
return true;
}
}
} | #include "tensorflow/core/lib/strings/ordered_code.h"
#include <float.h>
#include <stddef.h>
#include <limits>
#include <vector>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace strings {
namespace {
string RandomString(random::SimplePhilox* rnd, size_t len) {
string x;
for (size_t i = 0; i < len; i++) {
x += rnd->Uniform(256);
}
return x;
}
template <typename T>
void OCWriteIncreasing(string* dest, const T& val);
template <typename T>
bool OCReadIncreasing(StringPiece* src, T* result);
template <>
void OCWriteIncreasing<string>(string* dest, const string& val) {
OrderedCode::WriteString(dest, val);
}
template <>
bool OCReadIncreasing<string>(StringPiece* src, string* result) {
return OrderedCode::ReadString(src, result);
}
template <>
void OCWriteIncreasing<uint64>(string* dest, const uint64& val) {
OrderedCode::WriteNumIncreasing(dest, val);
}
template <>
bool OCReadIncreasing<uint64>(StringPiece* src, uint64* result) {
return OrderedCode::ReadNumIncreasing(src, result);
}
template <>
void OCWriteIncreasing<int64_t>(string* dest, const int64_t& val) {
OrderedCode::WriteSignedNumIncreasing(dest, val);
}
template <>
bool OCReadIncreasing<int64_t>(StringPiece* src, int64_t* result) {
return OrderedCode::ReadSignedNumIncreasing(src, result);
}
template <typename T>
string OCWrite(T val) {
string result;
OCWriteIncreasing<T>(&result, val);
return result;
}
template <typename T>
void OCWriteToString(string* result, T val) {
OCWriteIncreasing<T>(result, val);
}
template <typename T>
bool OCRead(StringPiece* s, T* val) {
return OCReadIncreasing<T>(s, val);
}
template <typename T>
T TestRead(const string& a) {
for (int i = 0; i < a.size() - 1; ++i) {
StringPiece s(a.data(), i);
CHECK(!OCRead<T>(&s, nullptr));
CHECK_EQ(s, a.substr(0, i));
}
StringPiece s(a);
T v;
CHECK(OCRead<T>(&s, &v));
CHECK(s.empty());
return v;
}
template <typename T>
void TestWriteRead(T expected) {
EXPECT_EQ(expected, TestRead<T>(OCWrite<T>(expected)));
}
template <typename T, typename U>
void TestWriteAppends(T first, U second) {
string encoded;
OCWriteToString<T>(&encoded, first);
string encoded_first_only = encoded;
OCWriteToString<U>(&encoded, second);
EXPECT_NE(encoded, encoded_first_only);
EXPECT_TRUE(absl::StartsWith(encoded, encoded_first_only));
}
template <typename T>
void TestNumbers(T multiplier) {
for (T x = std::numeric_limits<T>().max(); x != 0; x /= 2) {
TestWriteRead(multiplier * (x - 1));
TestWriteRead(multiplier * x);
if (x != std::numeric_limits<T>::max()) {
TestWriteRead(multiplier * (x + 1));
} else if (multiplier < 0 && multiplier == -1) {
TestWriteRead(-x - 1);
}
}
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int bits = 1; bits <= std::numeric_limits<T>().digits; ++bits) {
const uint64 mask = (~0ULL) >> (64 - bits);
for (int i = 0; i < 1000; i++) {
T x = rnd.Rand64() & mask;
TestWriteRead(multiplier * x);
T y = rnd.Rand64() & mask;
TestWriteAppends(multiplier * x, multiplier * y);
}
}
}
bool CompareStrings(const string& a, const string& b) { return (a < b); }
template <typename T>
void TestNumberOrdering() {
string laststr = OCWrite<T>(std::numeric_limits<T>().min());
for (T num = std::numeric_limits<T>().min() / 2; num != 0; num /= 2) {
string strminus1 = OCWrite<T>(num - 1);
string str = OCWrite<T>(num);
string strplus1 = OCWrite<T>(num + 1);
CHECK(CompareStrings(strminus1, str));
CHECK(CompareStrings(str, strplus1));
CHECK(CompareStrings(laststr, str));
laststr = str;
}
laststr = OCWrite<T>(0);
T num = 1;
while (num < std::numeric_limits<T>().max() / 2) {
num *= 2;
string strminus1 = OCWrite<T>(num - 1);
string str = OCWrite<T>(num);
string strplus1 = OCWrite<T>(num + 1);
CHECK(CompareStrings(strminus1, str));
CHECK(CompareStrings(str, strplus1));
CHECK(CompareStrings(laststr, str));
laststr = str;
}
}
size_t FindSpecial(const string& x) {
const char* p = x.data();
const char* limit = p + x.size();
const char* result = OrderedCode::TEST_SkipToNextSpecialByte(p, limit);
return result - p;
}
template <size_t N>
string ByteSequence(const char (&arr)[N]) {
return string(arr, N - 1);
}
TEST(OrderedCode, SkipToNextSpecialByte) {
for (size_t len = 0; len < 256; len++) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
string x;
while (x.size() < len) {
char c = 1 + rnd.Uniform(254);
ASSERT_NE(c, 0);
ASSERT_NE(c, 255);
x += c;
}
EXPECT_EQ(FindSpecial(x), x.size());
for (size_t special_pos = 0; special_pos < len; special_pos++) {
for (size_t special_test = 0; special_test < 2; special_test++) {
const char special_byte = (special_test == 0) ? 0 : 255;
string y = x;
y[special_pos] = special_byte;
EXPECT_EQ(FindSpecial(y), special_pos);
if (special_pos < 16) {
for (size_t rest = special_pos + 1; rest < len; rest++) {
if (rnd.OneIn(3)) {
y[rest] = rnd.OneIn(2) ? 0 : 255;
EXPECT_EQ(FindSpecial(y), special_pos);
}
}
}
}
}
}
}
TEST(OrderedCode, ExhaustiveFindSpecial) {
char buf[16];
char* limit = buf + sizeof(buf);
int count = 0;
for (int start_offset = 0; start_offset <= 5; start_offset += 5) {
for (size_t i = 0; i < sizeof(buf); i++) {
buf[i] = 'a';
}
for (int b0 = 0; b0 < 256; b0++) {
for (int b1 = 0; b1 < 256; b1++) {
for (int b2 = 0; b2 < 256; b2++) {
buf[start_offset + 0] = b0;
buf[start_offset + 1] = b1;
buf[start_offset + 2] = b2;
char* expected;
if (b0 == 0 || b0 == 255) {
expected = &buf[start_offset];
} else if (b1 == 0 || b1 == 255) {
expected = &buf[start_offset + 1];
} else if (b2 == 0 || b2 == 255) {
expected = &buf[start_offset + 2];
} else {
expected = limit;
}
count++;
EXPECT_EQ(expected,
OrderedCode::TEST_SkipToNextSpecialByte(buf, limit));
}
}
}
}
EXPECT_EQ(count, 256 * 256 * 256 * 2);
}
TEST(Uint64, EncodeDecode) { TestNumbers<uint64>(1); }
TEST(Uint64, Ordering) { TestNumberOrdering<uint64>(); }
TEST(Int64, EncodeDecode) {
TestNumbers<int64_t>(1);
TestNumbers<int64_t>(-1);
}
TEST(Int64, Ordering) { TestNumberOrdering<int64_t>(); }
inline string StrNot(const string& s) {
string result;
for (string::const_iterator it = s.begin(); it != s.end(); ++it)
result.push_back(~*it);
return result;
}
template <typename T>
void TestInvalidEncoding(const string& s) {
StringPiece p(s);
EXPECT_FALSE(OCRead<T>(&p, nullptr));
EXPECT_EQ(s, p);
}
TEST(OrderedCodeInvalidEncodingsTest, Overflow) {
const string k2xx64U = "\x09\x01" + string(8, 0);
TestInvalidEncoding<uint64>(k2xx64U);
const string k2xx63 = "\xff\xc0\x80" + string(7, 0);
TestInvalidEncoding<int64_t>(k2xx63);
TestInvalidEncoding<int64_t>(StrNot(k2xx63));
}
TEST(OrderedCodeInvalidEncodingsDeathTest, NonCanonical) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int n = 2; n <= 9; ++n) {
string non_minimal =
string(1, n - 1) + string(1, 0) + RandomString(&rnd, n - 2);
EXPECT_EQ(n, non_minimal.length());
EXPECT_NE(OCWrite<uint64>(0), non_minimal);
#ifndef NDEBUG
StringPiece s(non_minimal);
EXPECT_DEATH(OrderedCode::ReadNumIncreasing(&s, nullptr),
"invalid encoding");
#else
TestRead<uint64>(non_minimal);
#endif
}
for (int n = 2; n <= 10; ++n) {
string header = string(n / 8, 0xff) + string(1, 0xff << (8 - (n % 8)));
string non_minimal = header +
string(1, rnd.Uniform(256) & ~*header.rbegin()) +
RandomString(&rnd, n - header.length() - 1);
EXPECT_EQ(n, non_minimal.length());
EXPECT_NE(OCWrite<int64_t>(0), non_minimal);
#ifndef NDEBUG
StringPiece s(non_minimal);
EXPECT_DEATH(OrderedCode::ReadSignedNumIncreasing(&s, nullptr),
"invalid encoding")
<< n;
#else
TestRead<int64_t>(non_minimal);
#endif
}
}
uint64 NextBits(random::SimplePhilox* rnd, int bits) {
return (bits != 0)
? (rnd->Rand64() % (1LL << (bits - 1))) + (1LL << (bits - 1))
: 0;
}
template <typename T>
void BM_WriteNum(::testing::benchmark::State& state, T multiplier) {
constexpr int kValues = 64;
T values[kValues];
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int i = 0; i < kValues; i++) {
values[i] = NextBits(&rnd, state.max_iterations % 64) * multiplier;
}
string result;
int index = 0;
for (auto i : state) {
result.clear();
OCWriteToString<T>(&result, values[index % kValues]);
index++;
}
}
template <typename T>
void BM_ReadNum(::testing::benchmark::State& state, T multiplier) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
constexpr int kValues = 64;
string values[kValues];
for (int i = 0; i < kValues; i++) {
T val = NextBits(&rnd, i % 64) * multiplier;
values[i] = OCWrite<T>(val);
}
uint32 index = 0;
for (auto i : state) {
T val;
StringPiece s = values[index++ % kValues];
OCRead<T>(&s, &val);
}
}
#define BENCHMARK_NUM(name, T, multiplier) \
void BM_Write##name(::testing::benchmark::State& state) { \
BM_WriteNum<T>(state, multiplier); \
} \
BENCHMARK(BM_Write##name); \
void BM_Read##name(::testing::benchmark::State& state) { \
BM_ReadNum<T>(state, multiplier); \
} \
BENCHMARK(BM_Read##name)
BENCHMARK_NUM(NumIncreasing, uint64, 1);
BENCHMARK_NUM(SignedNum, int64_t, 1);
BENCHMARK_NUM(SignedNumNegative, int64_t, -1);
#undef BENCHMARK_NUM
TEST(String, EncodeDecode) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int len = 0; len < 256; len++) {
const string a = RandomString(&rnd, len);
TestWriteRead(a);
for (int len2 = 0; len2 < 64; len2++) {
const string b = RandomString(&rnd, len2);
TestWriteAppends(a, b);
string out;
OCWriteToString<string>(&out, a);
OCWriteToString<string>(&out, b);
string a2, b2, dummy;
StringPiece s = out;
StringPiece s2 = out;
CHECK(OCRead<string>(&s, &a2));
CHECK(OCRead<string>(&s2, nullptr));
CHECK_EQ(s, s2);
CHECK(OCRead<string>(&s, &b2));
CHECK(OCRead<string>(&s2, nullptr));
CHECK_EQ(s, s2);
CHECK(!OCRead<string>(&s, &dummy));
CHECK(!OCRead<string>(&s2, nullptr));
CHECK_EQ(a, a2);
CHECK_EQ(b, b2);
CHECK(s.empty());
CHECK(s2.empty());
}
}
}
#define STATIC_STR(str) StringPiece((str), sizeof(str) - 1)
string EncodeStringIncreasing(StringPiece value) {
string encoded;
OrderedCode::WriteString(&encoded, value);
return encoded;
}
TEST(String, Increasing) {
ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("")),
EncodeStringIncreasing(STATIC_STR("")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("")),
EncodeStringIncreasing(STATIC_STR("\x00")));
ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("\x00")),
EncodeStringIncreasing(STATIC_STR("\x00")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x00")),
EncodeStringIncreasing(STATIC_STR("\x01")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\x01")),
EncodeStringIncreasing(STATIC_STR("a")));
ASSERT_EQ(EncodeStringIncreasing(STATIC_STR("a")),
EncodeStringIncreasing(STATIC_STR("a")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("a")),
EncodeStringIncreasing(STATIC_STR("aa")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("aa")),
EncodeStringIncreasing(STATIC_STR("\xff")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff")),
EncodeStringIncreasing(STATIC_STR("\xff\x00")));
ASSERT_LT(EncodeStringIncreasing(STATIC_STR("\xff\x00")),
EncodeStringIncreasing(STATIC_STR("\xff\x01")));
}
TEST(EncodingIsExpected, String) {
std::vector<std::pair<string, string>> data = {
{"", string("\x00\x01", 2)},
{"foo", string("foo\x00\x01", 5)},
{"hello", string("hello\x00\x01", 7)},
{string("\x00\x01\xff", 3), string("\x00\xff\x01\xff\x00\x00\x01", 7)},
};
for (const auto& t : data) {
string result;
OrderedCode::WriteString(&result, t.first);
EXPECT_EQ(t.second, result);
StringPiece in = result;
string decoded;
EXPECT_TRUE(OrderedCode::ReadString(&in, &decoded));
EXPECT_EQ(t.first, decoded);
EXPECT_EQ("", in);
}
}
TEST(EncodingIsExpected, Unsigned) {
std::vector<std::pair<uint64, string>> data = {
{0x0ull, ByteSequence("\000")},
{0x1ull, ByteSequence("\001\001")},
{0x2ull, ByteSequence("\001\002")},
{0x1ull, ByteSequence("\001\001")},
{0x2ull, ByteSequence("\001\002")},
{0x3ull, ByteSequence("\001\003")},
{0x3ull, ByteSequence("\001\003")},
{0x4ull, ByteSequence("\001\004")},
{0x5ull, ByteSequence("\001\005")},
{0x7ull, ByteSequence("\001\007")},
{0x8ull, ByteSequence("\001\010")},
{0x9ull, ByteSequence("\001\t")},
{0xfull, ByteSequence("\001\017")},
{0x10ull, ByteSequence("\001\020")},
{0x11ull, ByteSequence("\001\021")},
{0x1full, ByteSequence("\001\037")},
{0x20ull, ByteSequence("\001 ")},
{0x21ull, ByteSequence("\001!")},
{0x3full, ByteSequence("\001?")},
{0x40ull, ByteSequence("\001@")},
{0x41ull, ByteSequence("\001A")},
{0x7full, ByteSequence("\001\177")},
{0x80ull, ByteSequence("\001\200")},
{0x81ull, ByteSequence("\001\201")},
{0xffull, ByteSequence("\001\377")},
{0x100ull, ByteSequence("\002\001\000")},
{0x101ull, ByteSequence("\002\001\001")},
{0x1ffull, ByteSequence("\002\001\377")},
{0x200ull, ByteSequence("\002\002\000")},
{0x201ull, ByteSequence("\002\002\001")},
{0x3ffull, ByteSequence("\002\003\377")},
{0x400ull, ByteSequence("\002\004\000")},
{0x401ull, ByteSequence("\002\004\001")},
{0x7ffull, ByteSequence("\002\007\377")},
{0x800ull, ByteSequence("\002\010\000")},
{0x801ull, ByteSequence("\002\010\001")},
{0xfffull, ByteSequence("\002\017\377")},
{0x1000ull, ByteSequence("\002\020\000")},
{0x1001ull, ByteSequence("\002\020\001")},
{0x1fffull, ByteSequence("\002\037\377")},
{0x2000ull, ByteSequence("\002 \000")},
{0x2001ull, ByteSequence("\002 \001")},
{0x3fffull, ByteSequence("\002?\377")},
{0x4000ull, ByteSequence("\002@\000")},
{0x4001ull, ByteSequence("\002@\001")},
{0x7fffull, ByteSequence("\002\177\377")},
{0x8000ull, ByteSequence("\002\200\000")},
{0x8001ull, ByteSequence("\002\200\001")},
{0xffffull, ByteSequence("\002\377\377")},
{0x10000ull, ByteSequence("\003\001\000\000")},
{0x10001ull, ByteSequence("\003\001\000\001")},
{0x1ffffull, ByteSequence("\003\001\377\377")},
{0x20000ull, ByteSequence("\003\002\000\000")},
{0x20001ull, ByteSequence("\003\002\000\001")},
{0x3ffffull, ByteSequence("\003\003\377\377")},
{0x40000ull, ByteSequence("\003\004\000\000")},
{0x40001ull, ByteSequence("\003\004\000\001")},
{0x7ffffull, ByteSequence("\003\007\377\377")},
{0x80000ull, ByteSequence("\003\010\000\000")},
{0x80001ull, ByteSequence("\003\010\000\001")},
{0xfffffull, ByteSequence("\003\017\377\377")},
{0x100000ull, ByteSequence("\003\020\000\000")},
{0x100001ull, ByteSequence("\003\020\000\001")},
{0x1fffffull, ByteSequence("\003\037\377\377")},
{0x200000ull, ByteSequence("\003 \000\000")},
{0x200001ull, ByteSequence("\003 \000\001")},
{0x3fffffull, ByteSequence("\003?\377\377")},
{0x400000ull, ByteSequence("\003@\000\000")},
{0x400001ull, ByteSequence("\003@\000\001")},
{0x7fffffull, ByteSequence("\003\177\377\377")},
{0x800000ull, ByteSequence("\003\200\000\000")},
{0x800001ull, ByteSequence("\003\200\000\001")},
{0xffffffull, ByteSequence("\003\377\377\377")},
{0x1000000ull, ByteSequence("\004\001\000\000\000")},
{0x1000001ull, ByteSequence("\004\001\000\000\001")},
{0x1ffffffull, ByteSequence("\004\001\377\377\377")},
{0x2000000ull, ByteSequence("\004\002\000\000\000")},
{0x2000001ull, ByteSequence("\004\002\000\000\001")},
{0x3ffffffull, ByteSequence("\004\003\377\377\377")},
{0x4000000ull, ByteSequence("\004\004\000\000\000")},
{0x4000001ull, ByteSequence("\004\004\000\000\001")},
{0x7ffffffull, ByteSequence("\004\007\377\377\377")},
{0x8000000ull, ByteSequence("\004\010\000\000\000")},
{0x8000001ull, ByteSequence("\004\010\000\000\001")},
{0xfffffffull, ByteSequence("\004\017\377\377\377")},
{0x10000000ull, ByteSequence("\004\020\000\000\000")},
{0x10000001ull, ByteSequence("\004\020\000\000\001")},
{0x1fffffffull, ByteSequence("\004\037\377\377\377")},
{0x20000000ull, ByteSequence("\004 \000\000\000")},
{0x20000001ull, ByteSequence("\004 \000\000\001")},
{0x3fffffffull, ByteSequence("\004?\377\377\377")},
{0x40000000ull, ByteSequence("\004@\000\000\000")},
{0x40000001ull, ByteSequence("\004@\000\000\001")},
{0x7fffffffull, ByteSequence("\004\177\377\377\377")},
{0x80000000ull, ByteSequence("\004\200\000\000\000")},
{0x80000001ull, ByteSequence("\004\200\000\000\001")},
{0xffffffffull, ByteSequence("\004\377\377\377\377")},
{0x100000000ull, ByteSequence("\005\001\000\000\000\000")},
{0x100000001ull, ByteSequence("\005\001\000\000\000\001")},
{0x1ffffffffull, ByteSequence("\005\001\377\377\377\377")},
{0x200000000ull, ByteSequence("\005\002\000\000\000\000")},
{0x200000001ull, ByteSequence("\005\002\000\000\000\001")},
{0x3ffffffffull, ByteSequence("\005\003\377\377\377\377")},
{0x400000000ull, ByteSequence("\005\004\000\000\000\000")},
{0x400000001ull, ByteSequence("\005\004\000\000\000\001")},
{0x7ffffffffull, ByteSequence("\005\007\377\377\377\377")},
{0x800000000ull, ByteSequence("\005\010\000\000\000\000")},
{0x800000001ull, ByteSequence("\005\010\000\000\000\001")},
{0xfffffffffull, ByteSequence("\005\017\377\377\377\377")},
{0x1000000000ull, ByteSequence("\005\020\000\000\000\000")},
{0x1000000001ull, ByteSequence("\005\020\000\000\000\001")},
{0x1fffffffffull, ByteSequence("\005\037\377\377\377\377")},
{0x2000000000ull, ByteSequence("\005 \000\000\000\000")},
{0x2000000001ull, ByteSequence("\005 \000\000\000\001")},
{0x3fffffffffull, ByteSequence("\005?\377\377\377\377")},
{0x4000000000ull, ByteSequence("\005@\000\000\000\000")},
{0x4000000001ull, ByteSequence("\005@\000\000\000\001")},
{0x7fffffffffull, ByteSequence("\005\177\377\377\377\377")},
{0x8000000000ull, ByteSequence("\005\200\000\000\000\000")},
{0x8000000001ull, ByteSequence("\005\200\000\000\000\001")},
{0xffffffffffull, ByteSequence("\005\377\377\377\377\377")},
{0x10000000000ull, ByteSequence("\006\001\000\000\000\000\000")},
{0x10000000001ull, ByteSequence("\006\001\000\000\000\000\001")},
{0x1ffffffffffull, ByteSequence("\006\001\377\377\377\377\377")},
{0x20000000000ull, ByteSequence("\006\002\000\000\000\000\000")},
{0x20000000001ull, ByteSequence("\006\002\000\000\000\000\001")},
{0x3ffffffffffull, ByteSequence("\006\003\377\377\377\377\377")},
{0x40000000000ull, ByteSequence("\006\004\000\000\000\000\000")},
{0x40000000001ull, ByteSequence("\006\004\000\000\000\000\001")},
{0x7ffffffffffull, ByteSequence("\006\007\377\377\377\377\377")},
{0x80000000000ull, ByteSequence("\006\010\000\000\000\000\000")},
{0x80000000001ull, ByteSequence("\006\010\000\000\000\000\001")},
{0xfffffffffffull, ByteSequence("\006\017\377\377\377\377\377")},
{0x100000000000ull, ByteSequence("\006\020\000\000\000\000\000")},
{0x100000000001ull, ByteSequence("\006\020\000\000\000\000\001")},
{0x1fffffffffffull, ByteSequence("\006\037\377\377\377\377\377")},
{0x200000000000ull, ByteSequence("\006 \000\000\000\000\000")},
{0x200000000001ull, ByteSequence("\006 \000\000\000\000\001")},
{0x3fffffffffffull, ByteSequence("\006?\377\377\377\377\377")},
{0x400000000000ull, ByteSequence("\006@\000\000\000\000\000")},
{0x400000000001ull, ByteSequence("\006@\000\000\000\000\001")},
{0x7fffffffffffull, ByteSequence("\006\177\377\377\377\377\377")},
{0x800000000000ull, ByteSequence("\006\200\000\000\000\000\000")},
{0x800000000001ull, ByteSequence("\006\200\000\000\000\000\001")},
{0xffffffffffffull, ByteSequence("\006\377\377\377\377\377\377")},
{0x1000000000000ull, ByteSequence("\007\001\000\000\000\000\000\000")},
{0x1000000000001ull, ByteSequence("\007\001\000\000\000\000\000\001")},
{0x1ffffffffffffull, ByteSequence("\007\001\377\377\377\377\377\377")},
{0x2000000000000ull, ByteSequence("\007\002\000\000\000\000\000\000")},
{0x2000000000001ull, ByteSequence("\007\002\000\000\000\000\000\001")},
{0x3ffffffffffffull, ByteSequence("\007\003\377\377\377\377\377\377")},
{0x4000000000000ull, ByteSequence("\007\004\000\000\000\000\000\000")},
{0x4000000000001ull, ByteSequence("\007\004\000\000\000\000\000\001")},
{0x7ffffffffffffull, ByteSequence("\007\007\377\377\377\377\377\377")},
{0x8000000000000ull, ByteSequence("\007\010\000\000\000\000\000\000")},
{0x8000000000001ull, ByteSequence("\007\010\000\000\000\000\000\001")},
{0xfffffffffffffull, ByteSequence("\007\017\377\377\377\377\377\377")},
{0x10000000000000ull, ByteSequence("\007\020\000\000\000\000\000\000")},
{0x10000000000001ull, ByteSequence("\007\020\000\000\000\000\000\001")},
{0x1fffffffffffffull, ByteSequence("\007\037\377\377\377\377\377\377")},
{0x20000000000000ull, ByteSequence("\007 \000\000\000\000\000\000")},
{0x20000000000001ull, ByteSequence("\007 \000\000\000\000\000\001")},
{0x3fffffffffffffull, ByteSequence("\007?\377\377\377\377\377\377")},
{0x40000000000000ull, ByteSequence("\007@\000\000\000\000\000\000")},
{0x40000000000001ull, ByteSequence("\007@\000\000\000\000\000\001")},
{0x7fffffffffffffull, ByteSequence("\007\177\377\377\377\377\377\377")},
{0x80000000000000ull, ByteSequence("\007\200\000\000\000\000\000\000")},
{0x80000000000001ull, ByteSequence("\007\200\000\000\000\000\000\001")},
{0xffffffffffffffull, ByteSequence("\007\377\377\377\377\377\377\377")},
{0x100000000000000ull,
ByteSequence("\010\001\000\000\000\000\000\000\000")},
{0x100000000000001ull,
ByteSequence("\010\001\000\000\000\000\000\000\001")},
{0x1ffffffffffffffull,
ByteSequence("\010\001\377\377\377\377\377\377\377")},
{0x200000000000000ull,
ByteSequence("\010\002\000\000\000\000\000\000\000")},
{0x200000000000001ull,
ByteSequence("\010\002\000\000\000\000\000\000\001")},
{0x3ffffffffffffffull,
ByteSequence("\010\003\377\377\377\377\377\377\377")},
{0x400000000000000ull,
ByteSequence("\010\004\000\000\000\000\000\000\000")},
{0x400000000000001ull,
ByteSequence("\010\004\000\000\000\000\000\000\001")},
{0x7ffffffffffffffull,
ByteSequence("\010\007\377\377\377\377\377\377\377")},
{0x800000000000000ull,
ByteSequence("\010\010\000\000\000\000\000\000\000")},
{0x800000000000001ull,
ByteSequence("\010\010\000\000\000\000\000\000\001")},
{0xfffffffffffffffull,
ByteSequence("\010\017\377\377\377\377\377\377\377")},
{0x1000000000000000ull,
ByteSequence("\010\020\000\000\000\000\000\000\000")},
{0x1000000000000001ull,
ByteSequence("\010\020\000\000\000\000\000\000\001")},
{0x1fffffffffffffffull,
ByteSequence("\010\037\377\377\377\377\377\377\377")},
{0x2000000000000000ull,
ByteSequence("\010 \000\000\000\000\000\000\000")},
{0x2000000000000001ull,
ByteSequence("\010 \000\000\000\000\000\000\001")},
{0x3fffffffffffffffull,
ByteSequence("\010?\377\377\377\377\377\377\377")},
{0x4000000000000000ull,
ByteSequence("\010@\000\000\000\000\000\000\000")},
{0x4000000000000001ull,
ByteSequence("\010@\000\000\000\000\000\000\001")},
{0x7fffffffffffffffull,
ByteSequence("\010\177\377\377\377\377\377\377\377")},
{0x8000000000000000ull,
ByteSequence("\010\200\000\000\000\000\000\000\000")},
{0x8000000000000001ull,
ByteSequence("\010\200\000\000\000\000\000\000\001")},
};
for (const auto& t : data) {
uint64 num = t.first;
string result;
OrderedCode::WriteNumIncreasing(&result, num);
EXPECT_EQ(t.second, result) << std::hex << num;
StringPiece in = result;
uint64 decoded;
EXPECT_TRUE(OrderedCode::ReadNumIncreasing(&in, &decoded));
EXPECT_EQ(num, decoded);
EXPECT_EQ("", in);
}
}
TEST(EncodingIsExpected, Signed) {
std::vector<std::pair<int64_t, string>> data = {
{0ll, ByteSequence("\200")},
{1ll, ByteSequence("\201")},
{2ll, ByteSequence("\202")},
{1ll, ByteSequence("\201")},
{2ll, ByteSequence("\202")},
{3ll, ByteSequence("\203")},
{3ll, ByteSequence("\203")},
{4ll, ByteSequence("\204")},
{5ll, ByteSequence("\205")},
{7ll, ByteSequence("\207")},
{8ll, ByteSequence("\210")},
{9ll, ByteSequence("\211")},
{15ll, ByteSequence("\217")},
{16ll, ByteSequence("\220")},
{17ll, ByteSequence("\221")},
{31ll, ByteSequence("\237")},
{32ll, ByteSequence("\240")},
{33ll, ByteSequence("\241")},
{63ll, ByteSequence("\277")},
{64ll, ByteSequence("\300@")},
{65ll, ByteSequence("\300A")},
{127ll, ByteSequence("\300\177")},
{128ll, ByteSequence("\300\200")},
{129ll, ByteSequence("\300\201")},
{255ll, ByteSequence("\300\377")},
{256ll, ByteSequence("\301\000")},
{257ll, ByteSequence("\301\001")},
{511ll, ByteSequence("\301\377")},
{512ll, ByteSequence("\302\000")},
{513ll, ByteSequence("\302\001")},
{1023ll, ByteSequence("\303\377")},
{1024ll, ByteSequence("\304\000")},
{1025ll, ByteSequence("\304\001")},
{2047ll, ByteSequence("\307\377")},
{2048ll, ByteSequence("\310\000")},
{2049ll, ByteSequence("\310\001")},
{4095ll, ByteSequence("\317\377")},
{4096ll, ByteSequence("\320\000")},
{4097ll, ByteSequence("\320\001")},
{8191ll, ByteSequence("\337\377")},
{8192ll, ByteSequence("\340 \000")},
{8193ll, ByteSequence("\340 \001")},
{16383ll, ByteSequence("\340?\377")},
{16384ll, ByteSequence("\340@\000")},
{16385ll, ByteSequence("\340@\001")},
{32767ll, ByteSequence("\340\177\377")},
{32768ll, ByteSequence("\340\200\000")},
{32769ll, ByteSequence("\340\200\001")},
{65535ll, ByteSequence("\340\377\377")},
{65536ll, ByteSequence("\341\000\000")},
{65537ll, ByteSequence("\341\000\001")},
{131071ll, ByteSequence("\341\377\377")},
{131072ll, ByteSequence("\342\000\000")},
{131073ll, ByteSequence("\342\000\001")},
{262143ll, ByteSequence("\343\377\377")},
{262144ll, ByteSequence("\344\000\000")},
{262145ll, ByteSequence("\344\000\001")},
{524287ll, ByteSequence("\347\377\377")},
{524288ll, ByteSequence("\350\000\000")},
{524289ll, ByteSequence("\350\000\001")},
{1048575ll, ByteSequence("\357\377\377")},
{1048576ll, ByteSequence("\360\020\000\000")},
{1048577ll, ByteSequence("\360\020\000\001")},
{2097151ll, ByteSequence("\360\037\377\377")},
{2097152ll, ByteSequence("\360 \000\000")},
{2097153ll, ByteSequence("\360 \000\001")},
{4194303ll, ByteSequence("\360?\377\377")},
{4194304ll, ByteSequence("\360@\000\000")},
{4194305ll, ByteSequence("\360@\000\001")},
{8388607ll, ByteSequence("\360\177\377\377")},
{8388608ll, ByteSequence("\360\200\000\000")},
{8388609ll, ByteSequence("\360\200\000\001")},
{16777215ll, ByteSequence("\360\377\377\377")},
{16777216ll, ByteSequence("\361\000\000\000")},
{16777217ll, ByteSequence("\361\000\000\001")},
{33554431ll, ByteSequence("\361\377\377\377")},
{33554432ll, ByteSequence("\362\000\000\000")},
{33554433ll, ByteSequence("\362\000\000\001")},
{67108863ll, ByteSequence("\363\377\377\377")},
{67108864ll, ByteSequence("\364\000\000\000")},
{67108865ll, ByteSequence("\364\000\000\001")},
{134217727ll, ByteSequence("\367\377\377\377")},
{134217728ll, ByteSequence("\370\010\000\000\000")},
{134217729ll, ByteSequence("\370\010\000\000\001")},
{268435455ll, ByteSequence("\370\017\377\377\377")},
{268435456ll, ByteSequence("\370\020\000\000\000")},
{268435457ll, ByteSequence("\370\020\000\000\001")},
{536870911ll, ByteSequence("\370\037\377\377\377")},
{536870912ll, ByteSequence("\370 \000\000\000")},
{536870913ll, ByteSequence("\370 \000\000\001")},
{1073741823ll, ByteSequence("\370?\377\377\377")},
{1073741824ll, ByteSequence("\370@\000\000\000")},
{1073741825ll, ByteSequence("\370@\000\000\001")},
{2147483647ll, ByteSequence("\370\177\377\377\377")},
{2147483648ll, ByteSequence("\370\200\000\000\000")},
{2147483649ll, ByteSequence("\370\200\000\000\001")},
{4294967295ll, ByteSequence("\370\377\377\377\377")},
{4294967296ll, ByteSequence("\371\000\000\000\000")},
{4294967297ll, ByteSequence("\371\000\000\000\001")},
{8589934591ll, ByteSequence("\371\377\377\377\377")},
{8589934592ll, ByteSequence("\372\000\000\000\000")},
{8589934593ll, ByteSequence("\372\000\000\000\001")},
{17179869183ll, ByteSequence("\373\377\377\377\377")},
{17179869184ll, ByteSequence("\374\004\000\000\000\000")},
{17179869185ll, ByteSequence("\374\004\000\000\000\001")},
{34359738367ll, ByteSequence("\374\007\377\377\377\377")},
{34359738368ll, ByteSequence("\374\010\000\000\000\000")},
{34359738369ll, ByteSequence("\374\010\000\000\000\001")},
{68719476735ll, ByteSequence("\374\017\377\377\377\377")},
{68719476736ll, ByteSequence("\374\020\000\000\000\000")},
{68719476737ll, ByteSequence("\374\020\000\000\000\001")},
{137438953471ll, ByteSequence("\374\037\377\377\377\377")},
{137438953472ll, ByteSequence("\374 \000\000\000\000")},
{137438953473ll, ByteSequence("\374 \000\000\000\001")},
{274877906943ll, ByteSequence("\374?\377\377\377\377")},
{274877906944ll, ByteSequence("\374@\000\000\000\000")},
{274877906945ll, ByteSequence("\374@\000\000\000\001")},
{549755813887ll, ByteSequence("\374\177\377\377\377\377")},
{549755813888ll, ByteSequence("\374\200\000\000\000\000")},
{549755813889ll, ByteSequence("\374\200\000\000\000\001")},
{1099511627775ll, ByteSequence("\374\377\377\377\377\377")},
{1099511627776ll, ByteSequence("\375\000\000\000\000\000")},
{1099511627777ll, ByteSequence("\375\000\000\000\000\001")},
{2199023255551ll, ByteSequence("\375\377\377\377\377\377")},
{2199023255552ll, ByteSequence("\376\002\000\000\000\000\000")},
{2199023255553ll, ByteSequence("\376\002\000\000\000\000\001")},
{4398046511103ll, ByteSequence("\376\003\377\377\377\377\377")},
{4398046511104ll, ByteSequence("\376\004\000\000\000\000\000")},
{4398046511105ll, ByteSequence("\376\004\000\000\000\000\001")},
{8796093022207ll, ByteSequence("\376\007\377\377\377\377\377")},
{8796093022208ll, ByteSequence("\376\010\000\000\000\000\000")},
{8796093022209ll, ByteSequence("\376\010\000\000\000\000\001")},
{17592186044415ll, ByteSequence("\376\017\377\377\377\377\377")},
{17592186044416ll, ByteSequence("\376\020\000\000\000\000\000")},
{17592186044417ll, ByteSequence("\376\020\000\000\000\000\001")},
{35184372088831ll, ByteSequence("\376\037\377\377\377\377\377")},
{35184372088832ll, ByteSequence("\376 \000\000\000\000\000")},
{35184372088833ll, ByteSequence("\376 \000\000\000\000\001")},
{70368744177663ll, ByteSequence("\376?\377\377\377\377\377")},
{70368744177664ll, ByteSequence("\376@\000\000\000\000\000")},
{70368744177665ll, ByteSequence("\376@\000\000\000\000\001")},
{140737488355327ll, ByteSequence("\376\177\377\377\377\377\377")},
{140737488355328ll, ByteSequence("\376\200\000\000\000\000\000")},
{140737488355329ll, ByteSequence("\376\200\000\000\000\000\001")},
{281474976710655ll, ByteSequence("\376\377\377\377\377\377\377")},
{281474976710656ll, ByteSequence("\377\001\000\000\000\000\000\000")},
{281474976710657ll, ByteSequence("\377\001\000\000\000\000\000\001")},
{562949953421311ll, ByteSequence("\377\001\377\377\377\377\377\377")},
{562949953421312ll, ByteSequence("\377\002\000\000\000\000\000\000")},
{562949953421313ll, ByteSequence("\377\002\000\000\000\000\000\001")},
{1125899906842623ll, ByteSequence("\377\003\377\377\377\377\377\377")},
{1125899906842624ll, ByteSequence("\377\004\000\000\000\000\000\000")},
{1125899906842625ll, ByteSequence("\377\004\000\000\000\000\000\001")},
{2251799813685247ll, ByteSequence("\377\007\377\377\377\377\377\377")},
{2251799813685248ll, ByteSequence("\377\010\000\000\000\000\000\000")},
{2251799813685249ll, ByteSequence("\377\010\000\000\000\000\000\001")},
{4503599627370495ll, ByteSequence("\377\017\377\377\377\377\377\377")},
{4503599627370496ll, ByteSequence("\377\020\000\000\000\000\000\000")},
{4503599627370497ll, ByteSequence("\377\020\000\000\000\000\000\001")},
{9007199254740991ll, ByteSequence("\377\037\377\377\377\377\377\377")},
{9007199254740992ll, ByteSequence("\377 \000\000\000\000\000\000")},
{9007199254740993ll, ByteSequence("\377 \000\000\000\000\000\001")},
{18014398509481983ll, ByteSequence("\377?\377\377\377\377\377\377")},
{18014398509481984ll, ByteSequence("\377@\000\000\000\000\000\000")},
{18014398509481985ll, ByteSequence("\377@\000\000\000\000\000\001")},
{36028797018963967ll, ByteSequence("\377\177\377\377\377\377\377\377")},
{36028797018963968ll,
ByteSequence("\377\200\200\000\000\000\000\000\000")},
{36028797018963969ll,
ByteSequence("\377\200\200\000\000\000\000\000\001")},
{72057594037927935ll,
ByteSequence("\377\200\377\377\377\377\377\377\377")},
{72057594037927936ll,
ByteSequence("\377\201\000\000\000\000\000\000\000")},
{72057594037927937ll,
ByteSequence("\377\201\000\000\000\000\000\000\001")},
{144115188075855871ll,
ByteSequence("\377\201\377\377\377\377\377\377\377")},
{144115188075855872ll,
ByteSequence("\377\202\000\000\000\000\000\000\000")},
{144115188075855873ll,
ByteSequence("\377\202\000\000\000\000\000\000\001")},
{288230376151711743ll,
ByteSequence("\377\203\377\377\377\377\377\377\377")},
{288230376151711744ll,
ByteSequence("\377\204\000\000\000\000\000\000\000")},
{288230376151711745ll,
ByteSequence("\377\204\000\000\000\000\000\000\001")},
{576460752303423487ll,
ByteSequence("\377\207\377\377\377\377\377\377\377")},
{576460752303423488ll,
ByteSequence("\377\210\000\000\000\000\000\000\000")},
{576460752303423489ll,
ByteSequence("\377\210\000\000\000\000\000\000\001")},
{1152921504606846975ll,
ByteSequence("\377\217\377\377\377\377\377\377\377")},
{1152921504606846976ll,
ByteSequence("\377\220\000\000\000\000\000\000\000")},
{1152921504606846977ll,
ByteSequence("\377\220\000\000\000\000\000\000\001")},
{2305843009213693951ll,
ByteSequence("\377\237\377\377\377\377\377\377\377")},
{2305843009213693952ll,
ByteSequence("\377\240\000\000\000\000\000\000\000")},
{2305843009213693953ll,
ByteSequence("\377\240\000\000\000\000\000\000\001")},
{4611686018427387903ll,
ByteSequence("\377\277\377\377\377\377\377\377\377")},
{4611686018427387904ll,
ByteSequence("\377\300@\000\000\000\000\000\000\000")},
{4611686018427387905ll,
ByteSequence("\377\300@\000\000\000\000\000\000\001")},
{9223372036854775807ll,
ByteSequence("\377\300\177\377\377\377\377\377\377\377")},
{-9223372036854775807ll,
ByteSequence("\000?\200\000\000\000\000\000\000\001")},
{0ll, ByteSequence("\200")},
{-1ll, ByteSequence("\177")},
{-2ll, ByteSequence("~")},
{-1ll, ByteSequence("\177")},
{-2ll, ByteSequence("~")},
{-3ll, ByteSequence("}")},
{-3ll, ByteSequence("}")},
{-4ll, ByteSequence("|")},
{-5ll, ByteSequence("{")},
{-7ll, ByteSequence("y")},
{-8ll, ByteSequence("x")},
{-9ll, ByteSequence("w")},
{-15ll, ByteSequence("q")},
{-16ll, ByteSequence("p")},
{-17ll, ByteSequence("o")},
{-31ll, ByteSequence("a")},
{-32ll, ByteSequence("`")},
{-33ll, ByteSequence("_")},
{-63ll, ByteSequence("A")},
{-64ll, ByteSequence("@")},
{-65ll, ByteSequence("?\277")},
{-127ll, ByteSequence("?\201")},
{-128ll, ByteSequence("?\200")},
{-129ll, ByteSequence("?\177")},
{-255ll, ByteSequence("?\001")},
{-256ll, ByteSequence("?\000")},
{-257ll, ByteSequence(">\377")},
{-511ll, ByteSequence(">\001")},
{-512ll, ByteSequence(">\000")},
{-513ll, ByteSequence("=\377")},
{-1023ll, ByteSequence("<\001")},
{-1024ll, ByteSequence("<\000")},
{-1025ll, ByteSequence(";\377")},
{-2047ll, ByteSequence("8\001")},
{-2048ll, ByteSequence("8\000")},
{-2049ll, ByteSequence("7\377")},
{-4095ll, ByteSequence("0\001")},
{-4096ll, ByteSequence("0\000")},
{-4097ll, ByteSequence("/\377")},
{-8191ll, ByteSequence(" \001")},
{-8192ll, ByteSequence(" \000")},
{-8193ll, ByteSequence("\037\337\377")},
{-16383ll, ByteSequence("\037\300\001")},
{-16384ll, ByteSequence("\037\300\000")},
{-16385ll, ByteSequence("\037\277\377")},
{-32767ll, ByteSequence("\037\200\001")},
{-32768ll, ByteSequence("\037\200\000")},
{-32769ll, ByteSequence("\037\177\377")},
{-65535ll, ByteSequence("\037\000\001")},
{-65536ll, ByteSequence("\037\000\000")},
{-65537ll, ByteSequence("\036\377\377")},
{-131071ll, ByteSequence("\036\000\001")},
{-131072ll, ByteSequence("\036\000\000")},
{-131073ll, ByteSequence("\035\377\377")},
{-262143ll, ByteSequence("\034\000\001")},
{-262144ll, ByteSequence("\034\000\000")},
{-262145ll, ByteSequence("\033\377\377")},
{-524287ll, ByteSequence("\030\000\001")},
{-524288ll, ByteSequence("\030\000\000")},
{-524289ll, ByteSequence("\027\377\377")},
{-1048575ll, ByteSequence("\020\000\001")},
{-1048576ll, ByteSequence("\020\000\000")},
{-1048577ll, ByteSequence("\017\357\377\377")},
{-2097151ll, ByteSequence("\017\340\000\001")},
{-2097152ll, ByteSequence("\017\340\000\000")},
{-2097153ll, ByteSequence("\017\337\377\377")},
{-4194303ll, ByteSequence("\017\300\000\001")},
{-4194304ll, ByteSequence("\017\300\000\000")},
{-4194305ll, ByteSequence("\017\277\377\377")},
{-8388607ll, ByteSequence("\017\200\000\001")},
{-8388608ll, ByteSequence("\017\200\000\000")},
{-8388609ll, ByteSequence("\017\177\377\377")},
{-16777215ll, ByteSequence("\017\000\000\001")},
{-16777216ll, ByteSequence("\017\000\000\000")},
{-16777217ll, ByteSequence("\016\377\377\377")},
{-33554431ll, ByteSequence("\016\000\000\001")},
{-33554432ll, ByteSequence("\016\000\000\000")},
{-33554433ll, ByteSequence("\r\377\377\377")},
{-67108863ll, ByteSequence("\014\000\000\001")},
{-67108864ll, ByteSequence("\014\000\000\000")},
{-67108865ll, ByteSequence("\013\377\377\377")},
{-134217727ll, ByteSequence("\010\000\000\001")},
{-134217728ll, ByteSequence("\010\000\000\000")},
{-134217729ll, ByteSequence("\007\367\377\377\377")},
{-268435455ll, ByteSequence("\007\360\000\000\001")},
{-268435456ll, ByteSequence("\007\360\000\000\000")},
{-268435457ll, ByteSequence("\007\357\377\377\377")},
{-536870911ll, ByteSequence("\007\340\000\000\001")},
{-536870912ll, ByteSequence("\007\340\000\000\000")},
{-536870913ll, ByteSequence("\007\337\377\377\377")},
{-1073741823ll, ByteSequence("\007\300\000\000\001")},
{-1073741824ll, ByteSequence("\007\300\000\000\000")},
{-1073741825ll, ByteSequence("\007\277\377\377\377")},
{-2147483647ll, ByteSequence("\007\200\000\000\001")},
{-2147483648ll, ByteSequence("\007\200\000\000\000")},
{-2147483649ll, ByteSequence("\007\177\377\377\377")},
{-4294967295ll, ByteSequence("\007\000\000\000\001")},
{-4294967296ll, ByteSequence("\007\000\000\000\000")},
{-4294967297ll, ByteSequence("\006\377\377\377\377")},
{-8589934591ll, ByteSequence("\006\000\000\000\001")},
{-8589934592ll, ByteSequence("\006\000\000\000\000")},
{-8589934593ll, ByteSequence("\005\377\377\377\377")},
{-17179869183ll, ByteSequence("\004\000\000\000\001")},
{-17179869184ll, ByteSequence("\004\000\000\000\000")},
{-17179869185ll, ByteSequence("\003\373\377\377\377\377")},
{-34359738367ll, ByteSequence("\003\370\000\000\000\001")},
{-34359738368ll, ByteSequence("\003\370\000\000\000\000")},
{-34359738369ll, ByteSequence("\003\367\377\377\377\377")},
{-68719476735ll, ByteSequence("\003\360\000\000\000\001")},
{-68719476736ll, ByteSequence("\003\360\000\000\000\000")},
{-68719476737ll, ByteSequence("\003\357\377\377\377\377")},
{-137438953471ll, ByteSequence("\003\340\000\000\000\001")},
{-137438953472ll, ByteSequence("\003\340\000\000\000\000")},
{-137438953473ll, ByteSequence("\003\337\377\377\377\377")},
{-274877906943ll, ByteSequence("\003\300\000\000\000\001")},
{-274877906944ll, ByteSequence("\003\300\000\000\000\000")},
{-274877906945ll, ByteSequence("\003\277\377\377\377\377")},
{-549755813887ll, ByteSequence("\003\200\000\000\000\001")},
{-549755813888ll, ByteSequence("\003\200\000\000\000\000")},
{-549755813889ll, ByteSequence("\003\177\377\377\377\377")},
{-1099511627775ll, ByteSequence("\003\000\000\000\000\001")},
{-1099511627776ll, ByteSequence("\003\000\000\000\000\000")},
{-1099511627777ll, ByteSequence("\002\377\377\377\377\377")},
{-2199023255551ll, ByteSequence("\002\000\000\000\000\001")},
{-2199023255552ll, ByteSequence("\002\000\000\000\000\000")},
{-2199023255553ll, ByteSequence("\001\375\377\377\377\377\377")},
{-4398046511103ll, ByteSequence("\001\374\000\000\000\000\001")},
{-4398046511104ll, ByteSequence("\001\374\000\000\000\000\000")},
{-4398046511105ll, ByteSequence("\001\373\377\377\377\377\377")},
{-8796093022207ll, ByteSequence("\001\370\000\000\000\000\001")},
{-8796093022208ll, ByteSequence("\001\370\000\000\000\000\000")},
{-8796093022209ll, ByteSequence("\001\367\377\377\377\377\377")},
{-17592186044415ll, ByteSequence("\001\360\000\000\000\000\001")},
{-17592186044416ll, ByteSequence("\001\360\000\000\000\000\000")},
{-17592186044417ll, ByteSequence("\001\357\377\377\377\377\377")},
{-35184372088831ll, ByteSequence("\001\340\000\000\000\000\001")},
{-35184372088832ll, ByteSequence("\001\340\000\000\000\000\000")},
{-35184372088833ll, ByteSequence("\001\337\377\377\377\377\377")},
{-70368744177663ll, ByteSequence("\001\300\000\000\000\000\001")},
{-70368744177664ll, ByteSequence("\001\300\000\000\000\000\000")},
{-70368744177665ll, ByteSequence("\001\277\377\377\377\377\377")},
{-140737488355327ll, ByteSequence("\001\200\000\000\000\000\001")},
{-140737488355328ll, ByteSequence("\001\200\000\000\000\000\000")},
{-140737488355329ll, ByteSequence("\001\177\377\377\377\377\377")},
{-281474976710655ll, ByteSequence("\001\000\000\000\000\000\001")},
{-281474976710656ll, ByteSequence("\001\000\000\000\000\000\000")},
{-281474976710657ll, ByteSequence("\000\376\377\377\377\377\377\377")},
{-562949953421311ll, ByteSequence("\000\376\000\000\000\000\000\001")},
{-562949953421312ll, ByteSequence("\000\376\000\000\000\000\000\000")},
{-562949953421313ll, ByteSequence("\000\375\377\377\377\377\377\377")},
{-1125899906842623ll, ByteSequence("\000\374\000\000\000\000\000\001")},
{-1125899906842624ll, ByteSequence("\000\374\000\000\000\000\000\000")},
{-1125899906842625ll, ByteSequence("\000\373\377\377\377\377\377\377")},
{-2251799813685247ll, ByteSequence("\000\370\000\000\000\000\000\001")},
{-2251799813685248ll, ByteSequence("\000\370\000\000\000\000\000\000")},
{-2251799813685249ll, ByteSequence("\000\367\377\377\377\377\377\377")},
{-4503599627370495ll, ByteSequence("\000\360\000\000\000\000\000\001")},
{-4503599627370496ll, ByteSequence("\000\360\000\000\000\000\000\000")},
{-4503599627370497ll, ByteSequence("\000\357\377\377\377\377\377\377")},
{-9007199254740991ll, ByteSequence("\000\340\000\000\000\000\000\001")},
{-9007199254740992ll, ByteSequence("\000\340\000\000\000\000\000\000")},
{-9007199254740993ll, ByteSequence("\000\337\377\377\377\377\377\377")},
{-18014398509481983ll, ByteSequence("\000\300\000\000\000\000\000\001")},
{-18014398509481984ll, ByteSequence("\000\300\000\000\000\000\000\000")},
{-18014398509481985ll, ByteSequence("\000\277\377\377\377\377\377\377")},
{-36028797018963967ll, ByteSequence("\000\200\000\000\000\000\000\001")},
{-36028797018963968ll, ByteSequence("\000\200\000\000\000\000\000\000")},
{-36028797018963969ll,
ByteSequence("\000\177\177\377\377\377\377\377\377")},
{-72057594037927935ll,
ByteSequence("\000\177\000\000\000\000\000\000\001")},
{-72057594037927936ll,
ByteSequence("\000\177\000\000\000\000\000\000\000")},
{-72057594037927937ll, ByteSequence("\000~\377\377\377\377\377\377\377")},
{-144115188075855871ll,
ByteSequence("\000~\000\000\000\000\000\000\001")},
{-144115188075855872ll,
ByteSequence("\000~\000\000\000\000\000\000\000")},
{-144115188075855873ll,
ByteSequence("\000}\377\377\377\377\377\377\377")},
{-288230376151711743ll,
ByteSequence("\000|\000\000\000\000\000\000\001")},
{-288230376151711744ll,
ByteSequence("\000|\000\000\000\000\000\000\000")},
{-288230376151711745ll,
ByteSequence("\000{\377\377\377\377\377\377\377")},
{-576460752303423487ll,
ByteSequence("\000x\000\000\000\000\000\000\001")},
{-576460752303423488ll,
ByteSequence("\000x\000\000\000\000\000\000\000")},
{-576460752303423489ll,
ByteSequence("\000w\377\377\377\377\377\377\377")},
{-1152921504606846975ll,
ByteSequence("\000p\000\000\000\000\000\000\001")},
{-1152921504606846976ll,
ByteSequence("\000p\000\000\000\000\000\000\000")},
{-1152921504606846977ll,
ByteSequence("\000o\377\377\377\377\377\377\377")},
{-2305843009213693951ll,
ByteSequence("\000`\000\000\000\000\000\000\001")},
{-2305843009213693952ll,
ByteSequence("\000`\000\000\000\000\000\000\000")},
{-2305843009213693953ll,
ByteSequence("\000_\377\377\377\377\377\377\377")},
{-4611686018427387903ll,
ByteSequence("\000@\000\000\000\000\000\000\001")},
{-4611686018427387904ll,
ByteSequence("\000@\000\000\000\000\000\000\000")},
{-4611686018427387905ll,
ByteSequence("\000?\277\377\377\377\377\377\377\377")},
{-9223372036854775807ll,
ByteSequence("\000?\200\000\000\000\000\000\000\001")},
{9223372036854775807ll,
ByteSequence("\377\300\177\377\377\377\377\377\377\377")},
};
for (const auto& t : data) {
int64_t num = t.first;
string result;
OrderedCode::WriteSignedNumIncreasing(&result, num);
EXPECT_EQ(t.second, result) << std::hex << num;
StringPiece in = result;
int64_t decoded;
EXPECT_TRUE(OrderedCode::ReadSignedNumIncreasing(&in, &decoded));
EXPECT_EQ(num, decoded);
EXPECT_EQ("", in);
}
}
void BM_WriteString(::testing::benchmark::State& state, int len) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
string x;
for (int i = 0; i < len; i++) {
x += rnd.Uniform(256);
}
string y;
for (auto s : state) {
y.clear();
OCWriteToString<string>(&y, x);
}
state.SetBytesProcessed(state.iterations() * len);
}
void BM_ReadString(::testing::benchmark::State& state, int len) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
string x;
for (int i = 0; i < len; i++) {
x += rnd.Uniform(256);
}
string data;
OCWriteToString<string>(&data, x);
string result;
for (auto i : state) {
result.clear();
StringPiece s = data;
OCRead<string>(&s, &result);
}
state.SetBytesProcessed(state.iterations() * len);
}
void BM_WriteStringIncreasing(::testing::benchmark::State& state) {
BM_WriteString(state, state.range(0));
}
void BM_ReadStringIncreasing(::testing::benchmark::State& state) {
BM_ReadString(state, state.range(0));
}
BENCHMARK(BM_WriteStringIncreasing)->Range(0, 1024);
BENCHMARK(BM_ReadStringIncreasing)->Range(0, 1024);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/ordered_code.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/ordered_code_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28c148ef-d980-4e5f-b429-0b86449631d6 | cpp | tensorflow/tensorflow | wav_io | tensorflow/core/lib/wav/wav_io.cc | tensorflow/core/lib/wav/wav_io_test.cc | #include "tensorflow/core/lib/wav/wav_io.h"
#include <math.h>
#include <string.h>
#include <algorithm>
#include "absl/base/casts.h"
#include "tensorflow/core/lib/core/coding.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
namespace wav {
namespace {
struct TF_PACKED RiffChunk {
char chunk_id[4];
char chunk_data_size[4];
char riff_type[4];
};
static_assert(sizeof(RiffChunk) == 12, "TF_PACKED does not work.");
struct TF_PACKED FormatChunk {
char chunk_id[4];
char chunk_data_size[4];
char compression_code[2];
char channel_numbers[2];
char sample_rate[4];
char bytes_per_second[4];
char bytes_per_frame[2];
char bits_per_sample[2];
};
static_assert(sizeof(FormatChunk) == 24, "TF_PACKED does not work.");
struct TF_PACKED DataChunk {
char chunk_id[4];
char chunk_data_size[4];
};
static_assert(sizeof(DataChunk) == 8, "TF_PACKED does not work.");
struct TF_PACKED WavHeader {
RiffChunk riff_chunk;
FormatChunk format_chunk;
DataChunk data_chunk;
};
static_assert(sizeof(WavHeader) ==
sizeof(RiffChunk) + sizeof(FormatChunk) + sizeof(DataChunk),
"TF_PACKED does not work.");
constexpr char kRiffChunkId[] = "RIFF";
constexpr char kRiffType[] = "WAVE";
constexpr char kFormatChunkId[] = "fmt ";
constexpr char kDataChunkId[] = "data";
inline int16 FloatToInt16Sample(float data) {
constexpr float kMultiplier = 1.0f * (1 << 15);
return std::min<float>(std::max<float>(roundf(data * kMultiplier), kint16min),
kint16max);
}
inline float Int16SampleToFloat(int16_t data) {
constexpr float kMultiplier = 1.0f / (1 << 15);
return data * kMultiplier;
}
}
Status IncrementOffset(int old_offset, int64_t increment, size_t max_size,
int* new_offset) {
if (old_offset < 0) {
return errors::InvalidArgument("Negative offsets are not allowed: ",
old_offset);
}
if (increment < 0) {
return errors::InvalidArgument("Negative increment is not allowed: ",
increment);
}
if (old_offset > max_size) {
return errors::InvalidArgument("Initial offset is outside data range: ",
old_offset);
}
int64_t sum = old_offset + increment;
if (sum > max_size) {
return errors::InvalidArgument("Data too short when trying to read string");
}
if (sum < 0) {
return errors::InvalidArgument("Offset too large, overflowed: ", sum);
}
*new_offset = sum;
return absl::OkStatus();
}
Status ExpectText(const std::string& data, const std::string& expected_text,
int* offset) {
int new_offset;
TF_RETURN_IF_ERROR(
IncrementOffset(*offset, expected_text.size(), data.size(), &new_offset));
const std::string found_text(data.begin() + *offset,
data.begin() + new_offset);
if (found_text != expected_text) {
return errors::InvalidArgument("Header mismatch: Expected ", expected_text,
" but found ", found_text);
}
*offset = new_offset;
return absl::OkStatus();
}
Status ReadString(const std::string& data, int expected_length,
std::string* value, int* offset) {
int new_offset;
TF_RETURN_IF_ERROR(
IncrementOffset(*offset, expected_length, data.size(), &new_offset));
*value = std::string(data.begin() + *offset, data.begin() + new_offset);
*offset = new_offset;
return absl::OkStatus();
}
template <typename T>
Status EncodeAudioAsS16LEWav(const float* audio, size_t sample_rate,
size_t num_channels, size_t num_frames,
T* wav_string) {
constexpr size_t kFormatChunkSize = 16;
constexpr size_t kCompressionCodePcm = 1;
constexpr size_t kBitsPerSample = 16;
constexpr size_t kBytesPerSample = kBitsPerSample / 8;
constexpr size_t kHeaderSize = sizeof(WavHeader);
if (audio == nullptr && num_frames > 0) {
return errors::InvalidArgument("audio is null");
}
if (wav_string == nullptr) {
return errors::InvalidArgument("wav_string is null");
}
if (sample_rate == 0 || sample_rate > kuint32max) {
return errors::InvalidArgument("sample_rate must be in (0, 2^32), got: ",
sample_rate);
}
if (num_channels == 0 || num_channels > kuint16max) {
return errors::InvalidArgument("num_channels must be in (0, 2^16), got: ",
num_channels);
}
const size_t bytes_per_second = sample_rate * kBytesPerSample * num_channels;
const size_t num_samples = num_frames * num_channels;
const size_t data_size = num_samples * kBytesPerSample;
const size_t file_size = kHeaderSize + num_samples * kBytesPerSample;
const size_t bytes_per_frame = kBytesPerSample * num_channels;
if (file_size > kuint32max) {
return errors::InvalidArgument(
"Provided channels and frames cannot be encoded as a WAV.");
}
wav_string->resize(file_size);
char* data = &(*wav_string)[0];
WavHeader* header = absl::bit_cast<WavHeader*>(data);
auto* riff_chunk = &header->riff_chunk;
memcpy(riff_chunk->chunk_id, kRiffChunkId, 4);
core::EncodeFixed32(riff_chunk->chunk_data_size, file_size - 8);
memcpy(riff_chunk->riff_type, kRiffType, 4);
auto* format_chunk = &header->format_chunk;
memcpy(format_chunk->chunk_id, kFormatChunkId, 4);
core::EncodeFixed32(format_chunk->chunk_data_size, kFormatChunkSize);
core::EncodeFixed16(format_chunk->compression_code, kCompressionCodePcm);
core::EncodeFixed16(format_chunk->channel_numbers, num_channels);
core::EncodeFixed32(format_chunk->sample_rate, sample_rate);
core::EncodeFixed32(format_chunk->bytes_per_second, bytes_per_second);
core::EncodeFixed16(format_chunk->bytes_per_frame, bytes_per_frame);
core::EncodeFixed16(format_chunk->bits_per_sample, kBitsPerSample);
auto* data_chunk = &header->data_chunk;
memcpy(data_chunk->chunk_id, kDataChunkId, 4);
core::EncodeFixed32(data_chunk->chunk_data_size, data_size);
data += kHeaderSize;
for (size_t i = 0; i < num_samples; ++i) {
int16_t sample = FloatToInt16Sample(audio[i]);
core::EncodeFixed16(&data[i * kBytesPerSample],
static_cast<uint16>(sample));
}
return absl::OkStatus();
}
template Status EncodeAudioAsS16LEWav<std::string>(const float* audio,
size_t sample_rate,
size_t num_channels,
size_t num_frames,
std::string* wav_string);
template Status EncodeAudioAsS16LEWav<tstring>(const float* audio,
size_t sample_rate,
size_t num_channels,
size_t num_frames,
tstring* wav_string);
Status DecodeLin16WaveAsFloatVector(const std::string& wav_string,
std::vector<float>* float_values,
uint32* sample_count, uint16* channel_count,
uint32* sample_rate) {
int offset = 0;
TF_RETURN_IF_ERROR(ExpectText(wav_string, kRiffChunkId, &offset));
uint32 total_file_size;
TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &total_file_size, &offset));
TF_RETURN_IF_ERROR(ExpectText(wav_string, kRiffType, &offset));
std::string found_text;
TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &found_text, &offset));
while (found_text != kFormatChunkId) {
if (found_text != "JUNK" && found_text != "bext" && found_text != "iXML" &&
found_text != "qlty" && found_text != "mext" && found_text != "levl" &&
found_text != "link" && found_text != "axml") {
return errors::InvalidArgument("Unexpected field ", found_text);
}
uint32 size_of_chunk;
TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &size_of_chunk, &offset));
TF_RETURN_IF_ERROR(
IncrementOffset(offset, size_of_chunk, wav_string.size(), &offset));
TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &found_text, &offset));
}
uint32 format_chunk_size;
TF_RETURN_IF_ERROR(
ReadValue<uint32>(wav_string, &format_chunk_size, &offset));
if ((format_chunk_size != 16) && (format_chunk_size != 18)) {
return errors::InvalidArgument(
"Bad format chunk size for WAV: Expected 16 or 18, but got",
format_chunk_size);
}
uint16 audio_format;
TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &audio_format, &offset));
if (audio_format != 1) {
return errors::InvalidArgument(
"Bad audio format for WAV: Expected 1 (PCM), but got", audio_format);
}
TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, channel_count, &offset));
if (*channel_count < 1) {
return errors::InvalidArgument(
"Bad number of channels for WAV: Expected at least 1, but got ",
*channel_count);
}
TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, sample_rate, &offset));
uint32 bytes_per_second;
TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &bytes_per_second, &offset));
uint16 bytes_per_sample;
TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &bytes_per_sample, &offset));
uint16 bits_per_sample;
TF_RETURN_IF_ERROR(ReadValue<uint16>(wav_string, &bits_per_sample, &offset));
if (bits_per_sample != 16) {
return errors::InvalidArgument(
"Can only read 16-bit WAV files, but received ", bits_per_sample);
}
const uint32 expected_bytes_per_sample =
((bits_per_sample * *channel_count) + 7) / 8;
if (bytes_per_sample != expected_bytes_per_sample) {
return errors::InvalidArgument(
"Bad bytes per sample in WAV header: Expected ",
expected_bytes_per_sample, " but got ", bytes_per_sample);
}
const uint64 expected_bytes_per_second =
static_cast<uint64>(bytes_per_sample) * *sample_rate;
if (static_cast<uint64>(bytes_per_second) != expected_bytes_per_second) {
return errors::InvalidArgument(
"Bad bytes per second in WAV header: Expected ",
expected_bytes_per_second, " but got ", bytes_per_second,
" (sample_rate=", *sample_rate, ", bytes_per_sample=", bytes_per_sample,
")");
}
if (format_chunk_size == 18) {
offset += 2;
}
bool was_data_found = false;
while (offset < wav_string.size()) {
std::string chunk_id;
TF_RETURN_IF_ERROR(ReadString(wav_string, 4, &chunk_id, &offset));
uint32 chunk_size;
TF_RETURN_IF_ERROR(ReadValue<uint32>(wav_string, &chunk_size, &offset));
if (chunk_size > std::numeric_limits<int32>::max()) {
return errors::InvalidArgument(
"WAV data chunk '", chunk_id, "' is too large: ", chunk_size,
" bytes, but the limit is ", std::numeric_limits<int32>::max());
}
if (chunk_id == kDataChunkId) {
if (was_data_found) {
return errors::InvalidArgument("More than one data chunk found in WAV");
}
was_data_found = true;
*sample_count = chunk_size / bytes_per_sample;
const uint32 data_count = *sample_count * *channel_count;
int unused_new_offset = 0;
TF_RETURN_IF_ERROR(IncrementOffset(offset, sizeof(int16) * data_count,
wav_string.size(),
&unused_new_offset));
float_values->resize(data_count);
for (int i = 0; i < data_count; ++i) {
int16_t single_channel_value = 0;
TF_RETURN_IF_ERROR(
ReadValue<int16>(wav_string, &single_channel_value, &offset));
(*float_values)[i] = Int16SampleToFloat(single_channel_value);
}
} else {
offset += chunk_size;
}
}
if (!was_data_found) {
return errors::InvalidArgument("No data chunk found in WAV");
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/lib/wav/wav_io.h"
#include <string>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace wav {
Status ExpectText(const string& data, const string& expected_text, int* offset);
Status ReadString(const string& data, int expected_length, string* value,
int* offset);
TEST(WavIO, BadArguments) {
float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f};
tstring result;
EXPECT_EQ(error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(nullptr, 44100, 2, 3, &result).code());
TF_EXPECT_OK(EncodeAudioAsS16LEWav(nullptr, 44100, 2, 0, &result));
EXPECT_EQ(
error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, 44100, 2, 3, (tstring*)nullptr).code());
const size_t kuint32max_plus_one = static_cast<size_t>(kuint32max) + 1;
const size_t kuint16max_plus_one = static_cast<size_t>(kuint16max) + 1;
EXPECT_EQ(error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, 0, 2, 3, &result).code());
EXPECT_EQ(error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, 44100, 0, 3, &result).code());
EXPECT_EQ(
error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, kuint32max_plus_one, 2, 3, &result).code());
EXPECT_EQ(error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, 44100, kuint16max_plus_one, 3, &result)
.code());
EXPECT_EQ(error::INVALID_ARGUMENT,
EncodeAudioAsS16LEWav(audio, 44100, 2, 1073741813, &result).code());
}
TEST(WavIO, BasicEven) {
float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f};
string result;
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 3, &result));
EXPECT_EQ(56, result.size());
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 22050, 1, 6, &result));
EXPECT_EQ(56, result.size());
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 8000, 1, 6, &result));
EXPECT_EQ(56, result.size());
}
TEST(WavIO, BasicOdd) {
float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f};
string result;
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 22050, 1, 5, &result));
EXPECT_EQ(54, result.size());
}
TEST(WavIO, EncodeThenDecode) {
float audio[] = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f};
string wav_data;
TF_ASSERT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 3, &wav_data));
std::vector<float> decoded_audio;
uint32 decoded_sample_count;
uint16 decoded_channel_count;
uint32 decoded_sample_rate;
TF_ASSERT_OK(DecodeLin16WaveAsFloatVector(
wav_data, &decoded_audio, &decoded_sample_count, &decoded_channel_count,
&decoded_sample_rate));
EXPECT_EQ(2, decoded_channel_count);
EXPECT_EQ(3, decoded_sample_count);
EXPECT_EQ(44100, decoded_sample_rate);
for (int i = 0; i < 6; ++i) {
EXPECT_NEAR(audio[i], decoded_audio[i], 1e-4f) << "i=" << i;
}
}
TEST(WavIO, BasicMono) {
std::vector<uint8> wav_data = {
'R', 'I', 'F', 'F',
44, 0, 0, 0,
'W', 'A', 'V', 'E',
'f', 'm', 't', ' ',
16, 0, 0, 0,
1, 0,
1, 0,
0x44, 0xac, 0, 0,
0x88, 0x58, 0x1, 0,
2, 0,
16, 0,
'd', 'a', 't', 'a',
8, 0, 0, 0,
0, 0,
0xff, 0x7f,
0, 0,
0x00, 0x80,
};
string expected(wav_data.begin(), wav_data.end());
float audio[] = {0.0f, 1.0f, 0.0f, -1.0f};
string result;
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 1, 4, &result));
EXPECT_EQ(expected, result);
}
TEST(WavIO, BasicStereo) {
std::vector<uint8> wav_data = {
'R', 'I', 'F', 'F',
44, 0, 0, 0,
'W', 'A', 'V', 'E',
'f', 'm', 't', ' ',
16, 0, 0, 0,
1, 0,
2, 0,
0x44, 0xac, 0, 0,
0x10, 0xb1, 0x2, 0,
4, 0,
16, 0,
'd', 'a', 't', 'a',
8, 0, 0, 0,
0, 0,
0xff, 0x7f,
0, 0,
0x00, 0x80,
};
string expected(wav_data.begin(), wav_data.end());
float audio[] = {0.0f, 1.0f, 0.0f, -1.0f};
string result;
TF_EXPECT_OK(EncodeAudioAsS16LEWav(audio, 44100, 2, 2, &result));
EXPECT_EQ(expected, result);
}
TEST(WavIO, ChunkSizeOverflow) {
std::vector<uint8> wav_data = {
'R', 'I', 'F', 'F',
60, 0, 0, 0,
'W', 'A', 'V', 'E',
'f', 'm', 't', ' ',
16, 0, 0, 0,
1, 0,
1, 0,
0x44, 0xac, 0, 0,
0x88, 0x58, 0x1, 0,
2, 0,
16, 0,
'd', 'a', 't', 'a',
8, 0, 0, 0,
0, 0,
0xff, 0x7f,
0, 0,
0x00, 0x80,
'f', 'o', 'o', 'o',
0xff, 0xff, 0xff, 0xf8,
0, 0,
0xff, 0x7f,
0, 0,
0x00, 0x80,
};
string wav_data_string(wav_data.begin(), wav_data.end());
std::vector<float> decoded_audio;
uint32 decoded_sample_count;
uint16 decoded_channel_count;
uint32 decoded_sample_rate;
Status decode_status = DecodeLin16WaveAsFloatVector(
wav_data_string, &decoded_audio, &decoded_sample_count,
&decoded_channel_count, &decoded_sample_rate);
EXPECT_FALSE(decode_status.ok());
EXPECT_TRUE(absl::StrContains(decode_status.message(), "too large"))
<< decode_status.message();
}
TEST(WavIO, IncrementOffset) {
int new_offset = -1;
TF_EXPECT_OK(IncrementOffset(0, 10, 20, &new_offset));
EXPECT_EQ(10, new_offset);
new_offset = -1;
TF_EXPECT_OK(IncrementOffset(10, 4, 20, &new_offset));
EXPECT_EQ(14, new_offset);
new_offset = -1;
TF_EXPECT_OK(IncrementOffset(99, 1, 100, &new_offset));
EXPECT_EQ(100, new_offset);
new_offset = -1;
EXPECT_FALSE(IncrementOffset(-1, 1, 100, &new_offset).ok());
new_offset = -1;
EXPECT_FALSE(IncrementOffset(0, -1, 100, &new_offset).ok());
new_offset = -1;
EXPECT_FALSE(IncrementOffset(std::numeric_limits<int>::max(), 1,
std::numeric_limits<int>::max(), &new_offset)
.ok());
new_offset = -1;
EXPECT_FALSE(IncrementOffset(101, 1, 100, &new_offset).ok());
}
TEST(WavIO, ExpectText) {
std::vector<uint8> test_data = {
'E', 'x', 'p', 'e', 'c', 't', 'e', 'd',
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
TF_EXPECT_OK(ExpectText(test_string, "Expected", &offset));
EXPECT_EQ(8, offset);
offset = 0;
Status expect_status = ExpectText(test_string, "Unexpected", &offset);
EXPECT_FALSE(expect_status.ok());
offset = 0;
TF_EXPECT_OK(ExpectText(test_string, "Exp", &offset));
EXPECT_EQ(3, offset);
TF_EXPECT_OK(ExpectText(test_string, "ected", &offset));
EXPECT_EQ(8, offset);
expect_status = ExpectText(test_string, "foo", &offset);
EXPECT_FALSE(expect_status.ok());
}
TEST(WavIO, ReadString) {
std::vector<uint8> test_data = {
'E', 'x', 'p', 'e', 'c', 't', 'e', 'd',
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
string read_value;
TF_EXPECT_OK(ReadString(test_string, 2, &read_value, &offset));
EXPECT_EQ("Ex", read_value);
EXPECT_EQ(2, offset);
TF_EXPECT_OK(ReadString(test_string, 6, &read_value, &offset));
EXPECT_EQ("pected", read_value);
EXPECT_EQ(8, offset);
Status read_status = ReadString(test_string, 3, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueInt8) {
std::vector<uint8> test_data = {0x00, 0x05, 0xff, 0x80};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
int8_t read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(1, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(5, read_value);
EXPECT_EQ(2, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(-1, read_value);
EXPECT_EQ(3, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(-128, read_value);
EXPECT_EQ(4, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueUInt8) {
std::vector<uint8> test_data = {0x00, 0x05, 0xff, 0x80};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
uint8 read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(1, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(5, read_value);
EXPECT_EQ(2, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(255, read_value);
EXPECT_EQ(3, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(128, read_value);
EXPECT_EQ(4, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueInt16) {
std::vector<uint8> test_data = {
0x00, 0x00,
0xff, 0x00,
0x00, 0x01,
0xff, 0xff,
0x00, 0x80,
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
int16_t read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(2, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(255, read_value);
EXPECT_EQ(4, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(256, read_value);
EXPECT_EQ(6, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(-1, read_value);
EXPECT_EQ(8, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(-32768, read_value);
EXPECT_EQ(10, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueUInt16) {
std::vector<uint8> test_data = {
0x00, 0x00,
0xff, 0x00,
0x00, 0x01,
0xff, 0xff,
0x00, 0x80,
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
uint16 read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(2, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(255, read_value);
EXPECT_EQ(4, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(256, read_value);
EXPECT_EQ(6, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(65535, read_value);
EXPECT_EQ(8, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(32768, read_value);
EXPECT_EQ(10, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueInt32) {
std::vector<uint8> test_data = {
0x00, 0x00, 0x00, 0x00,
0xff, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0xff, 0x00,
0xff, 0xff, 0xff, 0xff,
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
int32_t read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(4, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(255, read_value);
EXPECT_EQ(8, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(65280, read_value);
EXPECT_EQ(12, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(16711680, read_value);
EXPECT_EQ(16, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(-1, read_value);
EXPECT_EQ(20, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
TEST(WavIO, ReadValueUInt32) {
std::vector<uint8> test_data = {
0x00, 0x00, 0x00, 0x00,
0xff, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0xff, 0x00,
0xff, 0xff, 0xff, 0xff,
};
string test_string(test_data.begin(), test_data.end());
int offset = 0;
uint32 read_value;
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(0, read_value);
EXPECT_EQ(4, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(255, read_value);
EXPECT_EQ(8, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(65280, read_value);
EXPECT_EQ(12, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(16711680, read_value);
EXPECT_EQ(16, offset);
TF_EXPECT_OK(ReadValue(test_string, &read_value, &offset));
EXPECT_EQ(4294967295, read_value);
EXPECT_EQ(20, offset);
Status read_status = ReadValue(test_string, &read_value, &offset);
EXPECT_FALSE(read_status.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/wav/wav_io.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/wav/wav_io_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
19886cb3-2cfb-4460-800b-3e30336fb6f2 | cpp | tensorflow/tensorflow | arena | tensorflow/core/lib/core/arena.cc | tensorflow/core/lib/core/arena_test.cc | #include "tensorflow/core/lib/core/arena.h"
#include <assert.h>
#include <algorithm>
#include <vector>
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace core {
Arena::Arena(const size_t block_size)
: remaining_(0),
block_size_(block_size),
freestart_(nullptr),
blocks_alloced_(1),
overflow_blocks_(nullptr) {
assert(block_size > kDefaultAlignment);
first_blocks_[0].mem =
reinterpret_cast<char*>(port::AlignedMalloc(block_size_, sizeof(void*)));
first_blocks_[0].size = block_size_;
Reset();
}
Arena::~Arena() {
FreeBlocks();
assert(overflow_blocks_ == nullptr);
for (size_t i = 0; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
}
}
bool Arena::SatisfyAlignment(size_t alignment) {
const size_t overage = reinterpret_cast<size_t>(freestart_) & (alignment - 1);
if (overage > 0) {
const size_t waste = alignment - overage;
if (waste >= remaining_) {
return false;
}
freestart_ += waste;
remaining_ -= waste;
}
DCHECK_EQ(size_t{0}, reinterpret_cast<size_t>(freestart_) & (alignment - 1));
return true;
}
void Arena::Reset() {
FreeBlocks();
freestart_ = first_blocks_[0].mem;
remaining_ = first_blocks_[0].size;
CHECK(SatisfyAlignment(kDefaultAlignment));
freestart_when_empty_ = freestart_;
}
void Arena::MakeNewBlock(const uint32 alignment) {
AllocatedBlock* block = AllocNewBlock(block_size_, alignment);
freestart_ = block->mem;
remaining_ = block->size;
CHECK(SatisfyAlignment(alignment));
}
static uint32 LeastCommonMultiple(uint32 a, uint32 b) {
if (a > b) {
return (a / MathUtil::GCD<uint32>(a, b)) * b;
} else if (a < b) {
return (b / MathUtil::GCD<uint32>(b, a)) * a;
} else {
return a;
}
}
Arena::AllocatedBlock* Arena::AllocNewBlock(const size_t block_size,
const uint32 alignment) {
AllocatedBlock* block;
if (blocks_alloced_ < TF_ARRAYSIZE(first_blocks_)) {
block = &first_blocks_[blocks_alloced_++];
} else {
if (overflow_blocks_ == nullptr)
overflow_blocks_ = new std::vector<AllocatedBlock>;
overflow_blocks_->resize(overflow_blocks_->size() + 1);
block = &overflow_blocks_->back();
}
uint32 adjusted_alignment =
(alignment > 1 ? LeastCommonMultiple(alignment, kDefaultAlignment) : 1);
adjusted_alignment =
std::max(adjusted_alignment, static_cast<uint32>(sizeof(void*)));
CHECK_LE(adjusted_alignment, static_cast<uint32>(1 << 20))
<< "Alignment on boundaries greater than 1MB not supported.";
size_t adjusted_block_size = block_size;
if (adjusted_block_size > adjusted_alignment) {
const uint32 excess = adjusted_block_size % adjusted_alignment;
adjusted_block_size += (excess > 0 ? adjusted_alignment - excess : 0);
}
block->mem = reinterpret_cast<char*>(
port::AlignedMalloc(adjusted_block_size, adjusted_alignment));
block->size = adjusted_block_size;
CHECK(nullptr != block->mem) << "block_size=" << block_size
<< " adjusted_block_size=" << adjusted_block_size
<< " alignment=" << alignment
<< " adjusted_alignment=" << adjusted_alignment;
return block;
}
void* Arena::GetMemoryFallback(const size_t size, const int alignment) {
if (0 == size) {
return nullptr;
}
CHECK(alignment > 0 && 0 == (alignment & (alignment - 1)));
if (block_size_ == 0 || size > block_size_ / 4) {
return AllocNewBlock(size, alignment)->mem;
}
if (!SatisfyAlignment(alignment) || size > remaining_) {
MakeNewBlock(alignment);
}
CHECK_LE(size, remaining_);
remaining_ -= size;
void* result = freestart_;
freestart_ += size;
return result;
}
void Arena::FreeBlocks() {
for (size_t i = 1; i < blocks_alloced_; ++i) {
port::AlignedFree(first_blocks_[i].mem);
first_blocks_[i].mem = nullptr;
first_blocks_[i].size = 0;
}
blocks_alloced_ = 1;
if (overflow_blocks_ != nullptr) {
std::vector<AllocatedBlock>::iterator it;
for (it = overflow_blocks_->begin(); it != overflow_blocks_->end(); ++it) {
port::AlignedFree(it->mem);
}
delete overflow_blocks_;
overflow_blocks_ = nullptr;
}
}
}
} | #include "tensorflow/core/lib/core/arena.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace core {
namespace {
static void TestMemory(void* mem, int size) {
memset(mem, 0xaa, size);
char* tmp[100];
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
tmp[i] = new char[i * i + 1];
}
memset(mem, 0xcc, size);
for (size_t i = 0; i < TF_ARRAYSIZE(tmp); i++) {
delete[] tmp[i];
}
memset(mem, 0xee, size);
}
TEST(ArenaTest, TestBasicArena) {
Arena a(1024);
char* memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
memory = a.Alloc(100);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 100);
}
TEST(ArenaTest, TestAlignment) {
Arena a(1024);
char* byte0 = a.Alloc(1);
char* alloc_aligned8 = a.AllocAligned(17, 8);
EXPECT_EQ(alloc_aligned8 - byte0, 8);
char* alloc_aligned8_b = a.AllocAligned(8, 8);
EXPECT_EQ(alloc_aligned8_b - alloc_aligned8, 24);
char* alloc_aligned8_c = a.AllocAligned(16, 8);
EXPECT_EQ(alloc_aligned8_c - alloc_aligned8_b, 8);
char* alloc_aligned8_d = a.AllocAligned(8, 1);
EXPECT_EQ(alloc_aligned8_d - alloc_aligned8_c, 16);
}
TEST(ArenaTest, TestVariousArenaSizes) {
{
Arena a(1024);
char* memory = a.Alloc(1024);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 1024);
char* memory2 = a.Alloc(1024);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1024);
}
{
Arena a(1024);
char* memory = a.Alloc(768);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 768);
char* memory2 = a.Alloc(768);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 768);
}
{
Arena a(1024);
char* memory = a.Alloc(10240);
ASSERT_NE(memory, nullptr);
TestMemory(memory, 10240);
char* memory2 = a.Alloc(1234);
ASSERT_NE(memory2, nullptr);
TestMemory(memory2, 1234);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/arena.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/arena_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1103e21a-acfc-4a48-804b-d5d2def2a88d | cpp | tensorflow/tensorflow | sqlite | tensorflow/core/lib/db/sqlite.cc | tensorflow/core/lib/db/sqlite_test.cc | #include "tensorflow/core/lib/db/sqlite.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/status.h"
extern "C" int sqlite3_snapfn_init(sqlite3*, const char**, const void*);
namespace tensorflow {
namespace {
absl::StatusCode GetTfErrorCode(int code) {
switch (code & 0xff) {
case SQLITE_OK:
case SQLITE_ROW:
case SQLITE_DONE:
return absl::StatusCode::kOk;
case SQLITE_ABORT:
return absl::StatusCode::kAborted;
case SQLITE_READONLY:
case SQLITE_MISMATCH:
return absl::StatusCode::kFailedPrecondition;
case SQLITE_MISUSE:
case SQLITE_INTERNAL:
return absl::StatusCode::kInternal;
case SQLITE_RANGE:
return absl::StatusCode::kOutOfRange;
case SQLITE_CANTOPEN:
case SQLITE_CONSTRAINT:
case SQLITE_NOTFOUND:
case SQLITE_NOTADB:
return absl::StatusCode::kInvalidArgument;
case SQLITE_CORRUPT:
return absl::StatusCode::kDataLoss;
case SQLITE_AUTH:
case SQLITE_PERM:
return absl::StatusCode::kPermissionDenied;
case SQLITE_FULL:
case SQLITE_TOOBIG:
case SQLITE_NOLFS:
return absl::StatusCode::kResourceExhausted;
case SQLITE_BUSY:
case SQLITE_LOCKED:
case SQLITE_PROTOCOL:
case SQLITE_NOMEM:
return absl::StatusCode::kUnavailable;
case SQLITE_INTERRUPT:
return absl::StatusCode::kCancelled;
case SQLITE_ERROR:
case SQLITE_IOERR:
case SQLITE_SCHEMA:
default:
return absl::StatusCode::kUnknown;
}
}
template <typename... Args>
Status PrintfStatus(int rc, const char* fmt, Args&&... args) {
return {GetTfErrorCode(rc),
strings::Printf(fmt, std::forward<Args>(args)...)};
}
sqlite3_stmt* PrepareRawOrDie(sqlite3* db, const char* sql) {
sqlite3_stmt* stmt = nullptr;
int rc = sqlite3_prepare_v2(db, sql, -1, &stmt, nullptr);
CHECK_EQ(SQLITE_OK, rc) << sql;
return stmt;
}
Status SetPragma(Sqlite* db, const char* pragma, const StringPiece& value) {
if (value.empty()) return absl::OkStatus();
for (auto p = value.begin(); p < value.end(); ++p) {
if (!(('0' <= *p && *p <= '9') || ('A' <= *p && *p <= 'Z') ||
('a' <= *p && *p <= 'z') || *p == '-')) {
return errors::InvalidArgument("Illegal pragma character");
}
}
SqliteStatement stmt;
TF_RETURN_IF_ERROR(
db->Prepare(strings::StrCat("PRAGMA ", pragma, "=", value), &stmt));
bool unused_done;
return stmt.Step(&unused_done);
}
const StringPiece GetEnv(const char* var) {
const char* val = std::getenv(var);
return (val == nullptr) ? StringPiece() : StringPiece(val);
}
Status EnvPragma(Sqlite* db, const char* pragma, const char* var) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(SetPragma(db, pragma, GetEnv(var)), "getenv(",
var, ")");
return absl::OkStatus();
}
}
Status Sqlite::Open(const string& path, int flags, Sqlite** db) {
flags |= SQLITE_OPEN_PRIVATECACHE;
flags |= SQLITE_OPEN_URI;
sqlite3* sqlite = nullptr;
int rc = sqlite3_open_v2(path.c_str(), &sqlite, flags, nullptr);
if (rc != SQLITE_OK) {
*db = nullptr;
return PrintfStatus(rc, "Sqlite::Open(%s) failed: %s", path.c_str(),
sqlite3_errstr(rc));
}
CHECK_EQ(SQLITE_OK, sqlite3_extended_result_codes(sqlite, 1));
CHECK_EQ(SQLITE_OK, sqlite3_snapfn_init(sqlite, nullptr, nullptr));
sqlite3_stmt* begin = PrepareRawOrDie(sqlite, "BEGIN");
sqlite3_stmt* commit = PrepareRawOrDie(sqlite, "COMMIT");
sqlite3_stmt* rollback = PrepareRawOrDie(sqlite, "ROLLBACK");
*db = new Sqlite(sqlite, begin, commit, rollback);
Status s = absl::OkStatus();
s.Update(SetPragma(*db, "page_size", "4096"));
s.Update(EnvPragma(*db, "secure_delete", "TF_SQLITE_SECURE_DELETE"));
s.Update(EnvPragma(*db, "page_size", "TF_SQLITE_PAGE_SIZE"));
s.Update(EnvPragma(*db, "journal_mode", "TF_SQLITE_JOURNAL_MODE"));
s.Update(EnvPragma(*db, "synchronous", "TF_SQLITE_SYNCHRONOUS"));
s.Update(EnvPragma(*db, "mmap_size", "TF_SQLITE_MMAP_SIZE"));
s.Update(EnvPragma(*db, "locking_mode", "TF_SQLITE_LOCKING_MODE"));
s.Update(EnvPragma(*db, "cache_size", "TF_SQLITE_CACHE_SIZE"));
s.Update(EnvPragma(*db, "auto_vacuum", "TF_SQLITE_AUTO_VACUUM"));
DCHECK((*db)->RefCountIsOne());
if (!s.ok()) {
(*db)->Unref();
*db = nullptr;
}
return s;
}
Sqlite::~Sqlite() {
sqlite3_finalize(rollback_);
sqlite3_finalize(commit_);
sqlite3_finalize(begin_);
CHECK_EQ(SQLITE_OK, sqlite3_close(db_));
}
Status Sqlite::Prepare(const StringPiece& sql, SqliteStatement* stmt) {
SqliteLock lock(*this);
sqlite3_stmt* ps = nullptr;
int rc = sqlite3_prepare_v2(db_, sql.data(), static_cast<int>(sql.size()),
&ps, nullptr);
if (rc != SQLITE_OK) {
*stmt = SqliteStatement();
return PrintfStatus(rc, "Prepare() failed: [%d] %s: %.*s", rc, errmsg(),
sql.size(), sql.data());
}
*stmt = SqliteStatement(this, ps);
return absl::OkStatus();
}
Status SqliteStatement::Step(bool* is_done) {
DCHECK(stmt_ != nullptr);
if (TF_PREDICT_FALSE(bind_error_ != SQLITE_OK)) {
*is_done = true;
return PrintfStatus(bind_error_, "Bind(%d) failed: %s: %s",
bind_error_parameter_, sqlite3_errstr(bind_error_),
sql());
}
SqliteLock lock(*db_);
int rc = sqlite3_step(stmt_);
switch (rc) {
case SQLITE_ROW:
*is_done = false;
return absl::OkStatus();
case SQLITE_DONE:
*is_done = true;
return absl::OkStatus();
default:
*is_done = true;
return PrintfStatus(rc, "Step() failed: [%d] %s: %s", rc, db_->errmsg(),
sql());
}
}
bool SqliteStatement::StepOrDie() {
bool is_done;
TF_CHECK_OK(Step(&is_done));
return !is_done;
}
Status SqliteStatement::StepOnce() {
bool is_done;
TF_RETURN_IF_ERROR(Step(&is_done));
if (TF_PREDICT_FALSE(is_done)) {
return errors::Internal("No rows returned: ", sql());
}
return absl::OkStatus();
}
const SqliteStatement& SqliteStatement::StepOnceOrDie() {
TF_CHECK_OK(StepOnce());
return *this;
}
Status SqliteStatement::StepAndReset() {
bool is_done;
Status s = Step(&is_done);
if (TF_PREDICT_FALSE(s.ok() && !is_done)) {
s = errors::Internal("Unexpected row: ", sql());
}
Reset();
return s;
}
void SqliteStatement::StepAndResetOrDie() { TF_CHECK_OK(StepAndReset()); }
void SqliteStatement::Reset() {
if (TF_PREDICT_TRUE(stmt_ != nullptr)) {
sqlite3_reset(stmt_);
sqlite3_clear_bindings(stmt_);
}
bind_error_ = SQLITE_OK;
size_ = 0;
}
SqliteTransaction::SqliteTransaction(Sqlite& db) : db_(&db) {
sqlite3_mutex_enter(sqlite3_db_mutex(db_->db_));
CHECK(!db_->is_in_transaction_);
db_->is_in_transaction_ = true;
Begin();
}
SqliteTransaction::~SqliteTransaction() {
sqlite3_step(db_->rollback_);
sqlite3_reset(db_->rollback_);
sqlite3_reset(db_->begin_);
db_->is_in_transaction_ = false;
sqlite3_mutex_leave(sqlite3_db_mutex(db_->db_));
}
void SqliteTransaction::Begin() {
if (sqlite3_step(db_->begin_) != SQLITE_DONE) {
LOG(FATAL) << "BEGIN failed: " << sqlite3_errmsg(db_->db_);
}
}
Status SqliteTransaction::Commit() {
int rc = sqlite3_step(db_->commit_);
if (rc != SQLITE_DONE) {
return PrintfStatus(rc, "COMMIT failed: [%d] %s", rc,
sqlite3_errmsg(db_->db_));
}
sqlite3_reset(db_->commit_);
sqlite3_reset(db_->begin_);
Begin();
return absl::OkStatus();
}
} | #include "tensorflow/core/lib/db/sqlite.h"
#include <array>
#include <climits>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class SqliteTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db_));
db_->PrepareOrDie("CREATE TABLE T (a BLOB, b BLOB)").StepAndResetOrDie();
}
void TearDown() override { db_->Unref(); }
Sqlite* db_;
bool is_done_;
};
TEST_F(SqliteTest, InsertAndSelectInt) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindInt(1, 3);
stmt.BindInt(2, -7);
TF_ASSERT_OK(stmt.StepAndReset());
stmt.BindInt(1, 123);
stmt.BindInt(2, -123);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T ORDER BY b");
TF_ASSERT_OK(stmt.Step(&is_done_));
ASSERT_FALSE(is_done_);
EXPECT_EQ(123, stmt.ColumnInt(0));
EXPECT_EQ(-123, stmt.ColumnInt(1));
TF_ASSERT_OK(stmt.Step(&is_done_));
ASSERT_FALSE(is_done_);
EXPECT_EQ(3, stmt.ColumnInt(0));
EXPECT_EQ(-7, stmt.ColumnInt(1));
TF_ASSERT_OK(stmt.Step(&is_done_));
ASSERT_TRUE(is_done_);
}
TEST_F(SqliteTest, InsertAndSelectDouble) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindDouble(1, 6.28318530);
stmt.BindDouble(2, 1.61803399);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(6.28318530, stmt.ColumnDouble(0));
EXPECT_EQ(1.61803399, stmt.ColumnDouble(1));
EXPECT_EQ(6, stmt.ColumnInt(0));
EXPECT_EQ(1, stmt.ColumnInt(1));
}
#ifdef DSQLITE_ENABLE_JSON1
TEST_F(SqliteTest, Json1Extension) {
string s1 = "{\"key\": 42}";
string s2 = "{\"key\": \"value\"}";
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindText(1, s1);
stmt.BindText(2, s2);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT json_extract(a, '$.key'), json_extract(b, '$.key') FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(42, stmt.ColumnInt(0));
EXPECT_EQ("value", stmt.ColumnString(1));
}
#endif
TEST_F(SqliteTest, NulCharsInString) {
string s;
s.append(static_cast<size_t>(2), '\0');
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindBlob(1, s);
stmt.BindText(2, s);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(2, stmt.ColumnSize(0));
EXPECT_EQ(2, stmt.ColumnString(0).size());
EXPECT_EQ('\0', stmt.ColumnString(0).at(0));
EXPECT_EQ('\0', stmt.ColumnString(0).at(1));
EXPECT_EQ(2, stmt.ColumnSize(1));
EXPECT_EQ(2, stmt.ColumnString(1).size());
EXPECT_EQ('\0', stmt.ColumnString(1).at(0));
EXPECT_EQ('\0', stmt.ColumnString(1).at(1));
}
TEST_F(SqliteTest, Unicode) {
string s = "要依法治国是赞美那些谁是公义的和惩罚恶人。 - 韩非";
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindBlob(1, s);
stmt.BindText(2, s);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(s, stmt.ColumnString(0));
EXPECT_EQ(s, stmt.ColumnString(1));
}
TEST_F(SqliteTest, StepAndResetClearsBindings) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindInt(1, 1);
stmt.BindInt(2, 123);
TF_ASSERT_OK(stmt.StepAndReset());
stmt.BindInt(1, 2);
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT b FROM T ORDER BY a");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(123, stmt.ColumnInt(0));
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(SQLITE_NULL, stmt.ColumnType(0));
}
TEST_F(SqliteTest, SafeBind) {
string s = "hello";
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindBlob(1, s);
stmt.BindText(2, s);
s.at(0) = 'y';
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ("hello", stmt.ColumnString(0));
EXPECT_EQ("hello", stmt.ColumnString(1));
}
TEST_F(SqliteTest, UnsafeBind) {
string s = "hello";
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindBlobUnsafe(1, s);
stmt.BindTextUnsafe(2, s);
s.at(0) = 'y';
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT a, b FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ("yello", stmt.ColumnString(0));
EXPECT_EQ("yello", stmt.ColumnString(1));
}
TEST_F(SqliteTest, UnsafeColumn) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindInt(1, 1);
stmt.BindText(2, "hello");
TF_ASSERT_OK(stmt.StepAndReset());
stmt.BindInt(1, 2);
stmt.BindText(2, "there");
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT b FROM T ORDER BY a");
TF_ASSERT_OK(stmt.Step(&is_done_));
StringPiece p = stmt.ColumnStringUnsafe(0);
EXPECT_EQ('h', *p.data());
TF_ASSERT_OK(stmt.Step(&is_done_));
}
TEST_F(SqliteTest, NamedParameterBind) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a) VALUES (:a)");
stmt.BindText(":a", "lol");
TF_ASSERT_OK(stmt.StepAndReset());
stmt = db_->PrepareOrDie("SELECT COUNT(*) FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_EQ(1, stmt.ColumnInt(0));
stmt = db_->PrepareOrDie("SELECT a FROM T");
TF_ASSERT_OK(stmt.Step(&is_done_));
EXPECT_FALSE(is_done_);
EXPECT_EQ("lol", stmt.ColumnString(0));
}
TEST_F(SqliteTest, Statement_DefaultConstructor) {
SqliteStatement stmt;
EXPECT_FALSE(stmt);
stmt = db_->PrepareOrDie("INSERT INTO T (a) VALUES (1)");
EXPECT_TRUE(stmt);
EXPECT_TRUE(stmt.StepAndReset().ok());
}
TEST_F(SqliteTest, Statement_MoveConstructor) {
SqliteStatement stmt{db_->PrepareOrDie("INSERT INTO T (a) VALUES (1)")};
EXPECT_TRUE(stmt.StepAndReset().ok());
}
TEST_F(SqliteTest, Statement_MoveAssignment) {
SqliteStatement stmt1 = db_->PrepareOrDie("INSERT INTO T (a) VALUES (1)");
SqliteStatement stmt2;
EXPECT_TRUE(stmt1.StepAndReset().ok());
EXPECT_FALSE(stmt2);
stmt2 = std::move(stmt1);
EXPECT_TRUE(stmt2.StepAndReset().ok());
}
TEST_F(SqliteTest, PrepareFailed) {
SqliteLock lock(*db_);
SqliteStatement stmt;
Status s = db_->Prepare("SELECT", &stmt);
ASSERT_FALSE(s.ok());
EXPECT_NE(string::npos, s.message().find("SELECT"));
EXPECT_EQ(SQLITE_ERROR, db_->errcode());
}
TEST_F(SqliteTest, BindFailed) {
auto stmt = db_->PrepareOrDie("INSERT INTO T (a) VALUES (123)");
stmt.BindInt(1, 123);
Status s = stmt.StepOnce();
EXPECT_NE(string::npos, s.message().find("INSERT INTO T (a) VALUES (123)"))
<< s.message();
}
TEST_F(SqliteTest, SnappyExtension) {
auto stmt = db_->PrepareOrDie("SELECT UNSNAP(SNAP(?))");
stmt.BindText(1, "hello");
EXPECT_EQ("hello", stmt.StepOnceOrDie().ColumnString(0));
}
TEST_F(SqliteTest, SnappyBinaryCompatibility) {
EXPECT_EQ(
"today is the end of the republic",
db_->PrepareOrDie("SELECT UNSNAP(X'03207C746F6461792069732074686520656E64"
"206F66207468652072657075626C6963')")
.StepOnceOrDie()
.ColumnString(0));
}
TEST(SqliteOpenTest, CloseConnectionBeforeStatement_KeepsConnectionOpen) {
Sqlite* db;
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db));
SqliteStatement stmt = db->PrepareOrDie("SELECT ? + ?");
db->Unref();
stmt.BindInt(1, 7);
stmt.BindInt(2, 3);
EXPECT_EQ(10, stmt.StepOnceOrDie().ColumnInt(0));
}
TEST_F(SqliteTest, TransactionRollback) {
{
SqliteTransaction txn(*db_);
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindDouble(1, 6.28318530);
stmt.BindDouble(2, 1.61803399);
TF_ASSERT_OK(stmt.StepAndReset());
}
EXPECT_EQ(
0,
db_->PrepareOrDie("SELECT COUNT(*) FROM T").StepOnceOrDie().ColumnInt(0));
}
TEST_F(SqliteTest, TransactionCommit) {
{
SqliteTransaction txn(*db_);
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindDouble(1, 6.28318530);
stmt.BindDouble(2, 1.61803399);
TF_ASSERT_OK(stmt.StepAndReset());
TF_ASSERT_OK(txn.Commit());
}
EXPECT_EQ(
1,
db_->PrepareOrDie("SELECT COUNT(*) FROM T").StepOnceOrDie().ColumnInt(0));
}
TEST_F(SqliteTest, TransactionCommitMultipleTimes) {
{
SqliteTransaction txn(*db_);
auto stmt = db_->PrepareOrDie("INSERT INTO T (a, b) VALUES (?, ?)");
stmt.BindDouble(1, 6.28318530);
stmt.BindDouble(2, 1.61803399);
TF_ASSERT_OK(stmt.StepAndReset());
TF_ASSERT_OK(txn.Commit());
stmt.BindDouble(1, 6.28318530);
stmt.BindDouble(2, 1.61803399);
TF_ASSERT_OK(stmt.StepAndReset());
TF_ASSERT_OK(txn.Commit());
}
EXPECT_EQ(
2,
db_->PrepareOrDie("SELECT COUNT(*) FROM T").StepOnceOrDie().ColumnInt(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/db/sqlite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/db/sqlite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d90bf76-93c5-467b-bc99-3fbef055fd1a | cpp | tensorflow/tensorflow | jpeg_mem | tensorflow/core/lib/jpeg/jpeg_mem.cc | tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc | #include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include <setjmp.h>
#include <string.h>
#include <algorithm>
#include <functional>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "jpeglib.h"
#include "tensorflow/core/lib/jpeg/jpeg_handle.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/jpeg.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace jpeg {
namespace {
enum JPEGErrors {
JPEGERRORS_OK,
JPEGERRORS_UNEXPECTED_END_OF_DATA,
JPEGERRORS_BAD_PARAM
};
class FewerArgsForCompiler {
public:
FewerArgsForCompiler(int datasize, const UncompressFlags& flags,
int64_t* nwarn,
std::function<uint8*(int, int, int)> allocate_output)
: datasize_(datasize),
flags_(flags),
pnwarn_(nwarn),
allocate_output_(std::move(allocate_output)),
height_read_(0),
height_(0),
stride_(0) {
if (pnwarn_ != nullptr) *pnwarn_ = 0;
}
const int datasize_;
const UncompressFlags flags_;
int64_t* const pnwarn_;
std::function<uint8*(int, int, int)> allocate_output_;
int height_read_;
int height_;
int stride_;
};
bool IsCropWindowValid(const UncompressFlags& flags, int input_image_width,
int input_image_height) {
return flags.crop_width > 0 && flags.crop_height > 0 && flags.crop_x >= 0 &&
flags.crop_y >= 0 &&
flags.crop_y + flags.crop_height <= input_image_height &&
flags.crop_x + flags.crop_width <= input_image_width;
}
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
void no_print(j_common_ptr cinfo) {}
#endif
uint8* UncompressLow(const void* srcdata, FewerArgsForCompiler* argball) {
const int datasize = argball->datasize_;
const auto& flags = argball->flags_;
const int ratio = flags.ratio;
int components = flags.components;
int stride = flags.stride;
int64_t* const nwarn = argball->pnwarn_;
if ((ratio != 1) && (ratio != 2) && (ratio != 4) && (ratio != 8)) {
return nullptr;
}
if (!(components == 0 || components == 1 || components == 3)) {
return nullptr;
}
if (datasize == 0 || srcdata == nullptr) return nullptr;
JSAMPLE* tempdata = nullptr;
JPEGErrors error = JPEGERRORS_OK;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jerr.error_exit = CatchError;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
jerr.output_message = no_print;
#endif
jmp_buf jpeg_jmpbuf;
cinfo.client_data = &jpeg_jmpbuf;
if (setjmp(jpeg_jmpbuf)) {
delete[] tempdata;
return nullptr;
}
jpeg_create_decompress(&cinfo);
SetSrc(&cinfo, srcdata, datasize, flags.try_recover_truncated_jpeg);
jpeg_read_header(&cinfo, TRUE);
if (components == 0) components = std::min(cinfo.num_components, 3);
switch (components) {
case 1:
cinfo.out_color_space = JCS_GRAYSCALE;
break;
case 3:
if (cinfo.jpeg_color_space == JCS_CMYK ||
cinfo.jpeg_color_space == JCS_YCCK) {
cinfo.out_color_space = JCS_CMYK;
} else {
cinfo.out_color_space = JCS_RGB;
}
break;
default:
LOG(ERROR) << " Invalid components value " << components << std::endl;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
cinfo.do_fancy_upsampling = boolean(flags.fancy_upscaling);
cinfo.scale_num = 1;
cinfo.scale_denom = ratio;
cinfo.dct_method = flags.dct_method;
jpeg_calc_output_dimensions(&cinfo);
int64_t total_size = static_cast<int64_t>(cinfo.output_height) *
static_cast<int64_t>(cinfo.output_width) *
static_cast<int64_t>(cinfo.num_components);
if (cinfo.output_width <= 0 || cinfo.output_height <= 0) {
LOG(ERROR) << "Invalid image size: " << cinfo.output_width << " x "
<< cinfo.output_height;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
if (total_size >= (1LL << 29)) {
LOG(ERROR) << "Image too large: " << total_size;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
jpeg_start_decompress(&cinfo);
JDIMENSION target_output_width = cinfo.output_width;
JDIMENSION target_output_height = cinfo.output_height;
JDIMENSION skipped_scanlines = 0;
#if defined(LIBJPEG_TURBO_VERSION)
if (flags.crop) {
target_output_height = flags.crop_height;
target_output_width = flags.crop_width;
if (!IsCropWindowValid(flags, cinfo.output_width, cinfo.output_height)) {
LOG(ERROR) << "Invalid crop window: x=" << flags.crop_x
<< ", y=" << flags.crop_y << ", w=" << target_output_width
<< ", h=" << target_output_height
<< " for image_width: " << cinfo.output_width
<< " and image_height: " << cinfo.output_height;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
JDIMENSION crop_width = flags.crop_width;
JDIMENSION crop_x = flags.crop_x;
jpeg_crop_scanline(&cinfo, &crop_x, &crop_width);
skipped_scanlines = jpeg_skip_scanlines(&cinfo, flags.crop_y);
CHECK_EQ(skipped_scanlines, flags.crop_y);
}
#endif
const int min_stride = target_output_width * components * sizeof(JSAMPLE);
if (stride == 0) {
stride = min_stride;
} else if (stride < min_stride) {
LOG(ERROR) << "Incompatible stride: " << stride << " < " << min_stride;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
argball->height_ = target_output_height;
argball->stride_ = stride;
#if !defined(LIBJPEG_TURBO_VERSION)
uint8* dstdata = nullptr;
if (flags.crop) {
dstdata = new JSAMPLE[stride * target_output_height];
} else {
dstdata = argball->allocate_output_(target_output_width,
target_output_height, components);
}
#else
uint8* dstdata = argball->allocate_output_(target_output_width,
target_output_height, components);
#endif
if (dstdata == nullptr) {
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
JSAMPLE* output_line = static_cast<JSAMPLE*>(dstdata);
const bool need_realign_cropped_scanline =
(target_output_width != cinfo.output_width);
const bool use_cmyk = (cinfo.out_color_space == JCS_CMYK);
if (use_cmyk) {
tempdata = new JSAMPLE[cinfo.output_width * 4];
} else if (need_realign_cropped_scanline) {
tempdata = new JSAMPLE[cinfo.output_width * components];
}
argball->height_read_ = target_output_height;
const int max_scanlines_to_read = skipped_scanlines + target_output_height;
const int mcu_align_offset =
(cinfo.output_width - target_output_width) * (use_cmyk ? 4 : components);
while (cinfo.output_scanline < max_scanlines_to_read) {
int num_lines_read = 0;
if (use_cmyk) {
num_lines_read = jpeg_read_scanlines(&cinfo, &tempdata, 1);
if (num_lines_read > 0) {
for (size_t i = 0; i < target_output_width; ++i) {
int offset = 4 * i;
if (need_realign_cropped_scanline) {
offset += mcu_align_offset;
}
const int c = tempdata[offset + 0];
const int m = tempdata[offset + 1];
const int y = tempdata[offset + 2];
const int k = tempdata[offset + 3];
int r, g, b;
if (cinfo.saw_Adobe_marker) {
r = (k * c) / 255;
g = (k * m) / 255;
b = (k * y) / 255;
} else {
r = (255 - k) * (255 - c) / 255;
g = (255 - k) * (255 - m) / 255;
b = (255 - k) * (255 - y) / 255;
}
output_line[3 * i + 0] = r;
output_line[3 * i + 1] = g;
output_line[3 * i + 2] = b;
}
}
} else if (need_realign_cropped_scanline) {
num_lines_read = jpeg_read_scanlines(&cinfo, &tempdata, 1);
if (num_lines_read > 0) {
memcpy(output_line, tempdata + mcu_align_offset, min_stride);
}
} else {
num_lines_read = jpeg_read_scanlines(&cinfo, &output_line, 1);
}
if (num_lines_read == 0) {
LOG(ERROR) << "Premature end of JPEG data. Stopped at line "
<< cinfo.output_scanline - skipped_scanlines << "/"
<< target_output_height;
if (!flags.try_recover_truncated_jpeg) {
argball->height_read_ = cinfo.output_scanline - skipped_scanlines;
error = JPEGERRORS_UNEXPECTED_END_OF_DATA;
} else {
for (size_t line = cinfo.output_scanline; line < max_scanlines_to_read;
++line) {
if (line == 0) {
memset(output_line, 0, min_stride);
} else {
memcpy(output_line, output_line - stride, min_stride);
}
output_line += stride;
}
argball->height_read_ =
target_output_height;
cinfo.output_scanline = max_scanlines_to_read;
}
break;
}
DCHECK_EQ(num_lines_read, 1);
TF_ANNOTATE_MEMORY_IS_INITIALIZED(output_line, min_stride);
output_line += stride;
}
delete[] tempdata;
tempdata = nullptr;
#if defined(LIBJPEG_TURBO_VERSION)
if (flags.crop && cinfo.output_scanline < cinfo.output_height) {
jpeg_skip_scanlines(&cinfo,
cinfo.output_height - flags.crop_y - flags.crop_height);
}
#endif
if (components == 4) {
JSAMPLE* scanlineptr = static_cast<JSAMPLE*>(
dstdata + static_cast<int64_t>(target_output_height - 1) * stride);
const JSAMPLE kOpaque = -1;
const int right_rgb = (target_output_width - 1) * 3;
const int right_rgba = (target_output_width - 1) * 4;
for (int y = target_output_height; y-- > 0;) {
const JSAMPLE* rgb_pixel = scanlineptr + right_rgb;
JSAMPLE* rgba_pixel = scanlineptr + right_rgba;
scanlineptr -= stride;
for (int x = target_output_width; x-- > 0;
rgba_pixel -= 4, rgb_pixel -= 3) {
rgba_pixel[3] = kOpaque;
rgba_pixel[2] = rgb_pixel[2];
rgba_pixel[1] = rgb_pixel[1];
rgba_pixel[0] = rgb_pixel[0];
}
}
}
switch (components) {
case 1:
if (cinfo.output_components != 1) {
error = JPEGERRORS_BAD_PARAM;
}
break;
case 3:
case 4:
if (cinfo.out_color_space == JCS_CMYK) {
if (cinfo.output_components != 4) {
error = JPEGERRORS_BAD_PARAM;
}
} else {
if (cinfo.output_components != 3) {
error = JPEGERRORS_BAD_PARAM;
}
}
break;
default:
LOG(ERROR) << "Invalid components value " << components << std::endl;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
if (nwarn != nullptr) {
*nwarn = cinfo.err->num_warnings;
}
switch (error) {
case JPEGERRORS_OK:
jpeg_finish_decompress(&cinfo);
break;
case JPEGERRORS_UNEXPECTED_END_OF_DATA:
case JPEGERRORS_BAD_PARAM:
jpeg_abort(reinterpret_cast<j_common_ptr>(&cinfo));
break;
default:
LOG(ERROR) << "Unhandled case " << error;
break;
}
#if !defined(LIBJPEG_TURBO_VERSION)
if (flags.crop) {
target_output_height = flags.crop_height;
target_output_width = flags.crop_width;
if (!IsCropWindowValid(flags, cinfo.output_width, cinfo.output_height)) {
LOG(ERROR) << "Invalid crop window: x=" << flags.crop_x
<< ", y=" << flags.crop_y << ", w=" << target_output_width
<< ", h=" << target_output_height
<< " for image_width: " << cinfo.output_width
<< " and image_height: " << cinfo.output_height;
delete[] dstdata;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
const uint8* full_image = dstdata;
dstdata = argball->allocate_output_(target_output_width,
target_output_height, components);
if (dstdata == nullptr) {
delete[] full_image;
jpeg_destroy_decompress(&cinfo);
return nullptr;
}
const int full_image_stride = stride;
const int min_stride = target_output_width * components * sizeof(JSAMPLE);
if (flags.stride == 0) {
stride = min_stride;
}
argball->height_ = target_output_height;
argball->stride_ = stride;
if (argball->height_read_ > target_output_height) {
argball->height_read_ = target_output_height;
}
const int crop_offset = flags.crop_x * components * sizeof(JSAMPLE);
const uint8* full_image_ptr = full_image + flags.crop_y * full_image_stride;
uint8* crop_image_ptr = dstdata;
for (int i = 0; i < argball->height_read_; i++) {
memcpy(crop_image_ptr, full_image_ptr + crop_offset, min_stride);
crop_image_ptr += stride;
full_image_ptr += full_image_stride;
}
delete[] full_image;
}
#endif
jpeg_destroy_decompress(&cinfo);
return dstdata;
}
}
uint8* Uncompress(const void* srcdata, int datasize,
const UncompressFlags& flags, int64_t* nwarn,
std::function<uint8*(int, int, int)> allocate_output) {
FewerArgsForCompiler argball(datasize, flags, nwarn,
std::move(allocate_output));
uint8* const dstdata = UncompressLow(srcdata, &argball);
const float fraction_read =
argball.height_ == 0
? 1.0
: (static_cast<float>(argball.height_read_) / argball.height_);
if (dstdata == nullptr ||
fraction_read < std::min(1.0f, flags.min_acceptable_fraction)) {
return nullptr;
}
if (argball.height_read_ != argball.height_) {
const int first_bad_line = argball.height_read_;
uint8* start = dstdata + first_bad_line * argball.stride_;
const int nbytes = (argball.height_ - first_bad_line) * argball.stride_;
memset(static_cast<void*>(start), 0, nbytes);
}
return dstdata;
}
uint8* Uncompress(const void* srcdata, int datasize,
const UncompressFlags& flags, int* pwidth, int* pheight,
int* pcomponents, int64_t* nwarn) {
uint8* buffer = nullptr;
uint8* result =
Uncompress(srcdata, datasize, flags, nwarn,
[=, &buffer](int width, int height, int components) {
if (pwidth != nullptr) *pwidth = width;
if (pheight != nullptr) *pheight = height;
if (pcomponents != nullptr) *pcomponents = components;
buffer = new uint8[height * width * components];
return buffer;
});
if (!result) delete[] buffer;
return result;
}
bool GetImageInfo(const void* srcdata, int datasize, int* width, int* height,
int* components) {
if (width) *width = 0;
if (height) *height = 0;
if (components) *components = 0;
if (datasize == 0 || srcdata == nullptr) return false;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf;
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) {
return false;
}
jpeg_create_decompress(&cinfo);
SetSrc(&cinfo, srcdata, datasize, false);
jpeg_read_header(&cinfo, TRUE);
jpeg_calc_output_dimensions(&cinfo);
if (width) *width = cinfo.output_width;
if (height) *height = cinfo.output_height;
if (components) *components = cinfo.output_components;
jpeg_destroy_decompress(&cinfo);
return true;
}
namespace {
bool CompressInternal(const uint8* srcdata, int width, int height,
const CompressFlags& flags, tstring* output) {
if (output == nullptr) {
LOG(ERROR) << "Output buffer is null: ";
return false;
}
output->clear();
const int components = (static_cast<int>(flags.format) & 0xff);
int64_t total_size =
static_cast<int64_t>(width) * static_cast<int64_t>(height);
if (width <= 0 || height <= 0) {
LOG(ERROR) << "Invalid image size: " << width << " x " << height;
return false;
}
if (total_size >= (1LL << 29)) {
LOG(ERROR) << "Image too large: " << total_size;
return false;
}
int in_stride = flags.stride;
if (in_stride == 0) {
in_stride = width * (static_cast<int>(flags.format) & 0xff);
} else if (in_stride < width * components) {
LOG(ERROR) << "Incompatible input stride";
return false;
}
JOCTET* buffer = nullptr;
CHECK(srcdata != nullptr);
CHECK(output != nullptr);
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf;
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) {
output->clear();
delete[] buffer;
return false;
}
jpeg_create_compress(&cinfo);
int bufsize = std::min(width * height * components, 1 << 20);
buffer = new JOCTET[bufsize];
SetDest(&cinfo, buffer, bufsize, output);
cinfo.image_width = width;
cinfo.image_height = height;
switch (components) {
case 1:
cinfo.input_components = 1;
cinfo.in_color_space = JCS_GRAYSCALE;
break;
case 3:
case 4:
cinfo.input_components = 3;
cinfo.in_color_space = JCS_RGB;
break;
default:
LOG(ERROR) << " Invalid components value " << components << std::endl;
output->clear();
delete[] buffer;
return false;
}
jpeg_set_defaults(&cinfo);
if (flags.optimize_jpeg_size) cinfo.optimize_coding = TRUE;
cinfo.density_unit = flags.density_unit;
cinfo.X_density = flags.x_density;
cinfo.Y_density = flags.y_density;
jpeg_set_quality(&cinfo, flags.quality, TRUE);
if (flags.progressive) {
jpeg_simple_progression(&cinfo);
}
if (!flags.chroma_downsampling) {
for (int i = 0; i < cinfo.num_components; ++i) {
cinfo.comp_info[i].h_samp_factor = 1;
cinfo.comp_info[i].v_samp_factor = 1;
}
}
jpeg_start_compress(&cinfo, TRUE);
if (!flags.xmp_metadata.empty()) {
const string name_space = "http:
const int name_space_length = name_space.size();
const int metadata_length = flags.xmp_metadata.size();
const int packet_length = metadata_length + name_space_length + 1;
std::unique_ptr<JOCTET[]> joctet_packet(new JOCTET[packet_length]);
for (int i = 0; i < name_space_length; i++) {
joctet_packet[i] = name_space[i];
}
joctet_packet[name_space_length] = 0;
for (int i = 0; i < metadata_length; i++) {
joctet_packet[i + name_space_length + 1] = flags.xmp_metadata[i];
}
jpeg_write_marker(&cinfo, JPEG_APP0 + 1, joctet_packet.get(),
packet_length);
}
std::unique_ptr<JSAMPLE[]> row_temp(
new JSAMPLE[width * cinfo.input_components]);
while (cinfo.next_scanline < cinfo.image_height) {
JSAMPROW row_pointer[1];
const uint8* r = &srcdata[cinfo.next_scanline * in_stride];
uint8* p = static_cast<uint8*>(row_temp.get());
switch (flags.format) {
case FORMAT_RGBA: {
for (int i = 0; i < width; ++i, p += 3, r += 4) {
p[0] = r[0];
p[1] = r[1];
p[2] = r[2];
}
row_pointer[0] = row_temp.get();
break;
}
case FORMAT_ABGR: {
for (int i = 0; i < width; ++i, p += 3, r += 4) {
p[0] = r[3];
p[1] = r[2];
p[2] = r[1];
}
row_pointer[0] = row_temp.get();
break;
}
default: {
row_pointer[0] = reinterpret_cast<JSAMPLE*>(const_cast<JSAMPLE*>(r));
}
}
CHECK_EQ(jpeg_write_scanlines(&cinfo, row_pointer, 1), 1u);
}
jpeg_finish_compress(&cinfo);
jpeg_destroy_compress(&cinfo);
delete[] buffer;
return true;
}
}
bool Compress(const void* srcdata, int width, int height,
const CompressFlags& flags, tstring* output) {
return CompressInternal(static_cast<const uint8*>(srcdata), width, height,
flags, output);
}
tstring Compress(const void* srcdata, int width, int height,
const CompressFlags& flags) {
tstring temp;
CompressInternal(static_cast<const uint8*>(srcdata), width, height, flags,
&temp);
return temp;
}
}
} | #include "tensorflow/core/lib/jpeg/jpeg_mem.h"
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <memory>
#include "absl/base/casts.h"
#include "jpeglib.h"
#include "tensorflow/core/lib/jpeg/jpeg_handle.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/jpeg.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace jpeg {
namespace {
const char kTestData[] = "tensorflow/core/lib/jpeg/testdata/";
int ComputeSumAbsoluteDifference(const uint8* a, const uint8* b, int width,
int height, int a_stride, int b_stride) {
int totalerr = 0;
for (int i = 0; i < height; i++) {
const uint8* const pa = a + i * a_stride;
const uint8* const pb = b + i * b_stride;
for (int j = 0; j < 3 * width; j++) {
totalerr += abs(static_cast<int>(pa[j]) - static_cast<int>(pb[j]));
}
}
return totalerr;
}
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
void TestJPEG(Env* env, const string& jpegfile) {
string jpeg;
ReadFileToStringOrDie(env, jpegfile, &jpeg);
const int fsize = jpeg.size();
const uint8* const temp = absl::bit_cast<const uint8*>(jpeg.data());
int w, h, c;
std::unique_ptr<uint8[]> imgdata;
UncompressFlags flags;
flags.components = 3;
flags.min_acceptable_fraction = 0.8;
imgdata.reset(Uncompress(temp, fsize / 2, flags, &w, &h, &c, nullptr));
CHECK(imgdata == nullptr);
flags.min_acceptable_fraction = 0.01;
imgdata.reset(Uncompress(temp, fsize / 2, flags, &w, &h, &c, nullptr));
CHECK(imgdata != nullptr);
flags.min_acceptable_fraction = 1.0;
imgdata.reset(Uncompress(temp, fsize, flags, &w, &h, &c, nullptr));
CHECK(imgdata != nullptr);
}
TEST(JpegMemTest, Jpeg) {
Env* env = Env::Default();
const string data_path = kTestData;
TestJPEG(env, data_path + "jpeg_merge_test1.jpg");
TestJPEG(env, data_path + "jpeg_merge_test1_cmyk.jpg");
}
void TestCropAndDecodeJpeg(Env* env, const string& jpegfile,
const UncompressFlags& default_flags) {
string jpeg;
ReadFileToStringOrDie(env, jpegfile, &jpeg);
const int fsize = jpeg.size();
const auto* temp = absl::bit_cast<const uint8*>(jpeg.data());
std::unique_ptr<uint8[]> imgdata1;
int w1, h1, c1;
{
UncompressFlags flags = default_flags;
if (flags.stride == 0) {
imgdata1.reset(Uncompress(temp, fsize, flags, &w1, &h1, &c1, nullptr));
} else {
uint8* buffer = nullptr;
imgdata1.reset(Uncompress(temp, fsize, flags, nullptr,
[&](int width, int height, int components) {
w1 = width;
h1 = height;
c1 = components;
buffer = new uint8[flags.stride * height];
return buffer;
}));
}
ASSERT_NE(imgdata1, nullptr);
}
auto check_crop_and_decode_func = [&](int crop_x, int crop_y, int crop_width,
int crop_height) {
std::unique_ptr<uint8[]> imgdata2;
int w, h, c;
UncompressFlags flags = default_flags;
flags.crop = true;
flags.crop_x = crop_x;
flags.crop_y = crop_y;
flags.crop_width = crop_width;
flags.crop_height = crop_height;
if (flags.stride == 0) {
imgdata2.reset(Uncompress(temp, fsize, flags, &w, &h, &c, nullptr));
} else {
uint8* buffer = nullptr;
imgdata2.reset(Uncompress(temp, fsize, flags, nullptr,
[&](int width, int height, int components) {
w = width;
h = height;
c = components;
buffer = new uint8[flags.stride * height];
return buffer;
}));
}
ASSERT_NE(imgdata2, nullptr);
ASSERT_EQ(w, crop_width);
ASSERT_EQ(h, crop_height);
ASSERT_EQ(c, c1);
const int stride1 = (flags.stride != 0) ? flags.stride : w1 * c;
const int stride2 = (flags.stride != 0) ? flags.stride : w * c;
for (int i = 0; i < crop_height; i++) {
const uint8* p1 = &imgdata1[(i + crop_y) * stride1 + crop_x * c];
const uint8* p2 = &imgdata2[i * stride2];
for (int j = 0; j < c * w; j++) {
ASSERT_EQ(p1[j], p2[j])
<< "p1 != p2 in [" << i << "][" << j / 3 << "][" << j % 3 << "]";
}
}
};
check_crop_and_decode_func(0, 0, 5, 5);
check_crop_and_decode_func(0, 0, w1, 5);
check_crop_and_decode_func(0, 0, 5, h1);
check_crop_and_decode_func(0, 0, w1, h1);
check_crop_and_decode_func(w1 - 5, h1 - 6, 5, 6);
check_crop_and_decode_func(5, 6, 10, 15);
}
TEST(JpegMemTest, CropAndDecodeJpeg) {
Env* env = Env::Default();
const string data_path = kTestData;
UncompressFlags flags;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1_cmyk.jpg", flags);
}
TEST(JpegMemTest, CropAndDecodeJpegWithRatio) {
Env* env = Env::Default();
const string data_path = kTestData;
UncompressFlags flags;
for (int ratio : {1, 2, 4, 8}) {
flags.ratio = ratio;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
}
}
TEST(JpegMemTest, CropAndDecodeJpegWithComponents) {
Env* env = Env::Default();
const string data_path = kTestData;
UncompressFlags flags;
for (const int components : {0, 1, 3}) {
flags.components = components;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
}
}
TEST(JpegMemTest, CropAndDecodeJpegWithUpScaling) {
Env* env = Env::Default();
const string data_path = kTestData;
UncompressFlags flags;
flags.fancy_upscaling = true;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
}
TEST(JpegMemTest, CropAndDecodeJpegWithStride) {
Env* env = Env::Default();
const string data_path = kTestData;
string jpeg;
ReadFileToStringOrDie(env, data_path + "jpeg_merge_test1.jpg", &jpeg);
const int fsize = jpeg.size();
const auto* temp = absl::bit_cast<const uint8*>(jpeg.data());
int w, h, c;
ASSERT_TRUE(GetImageInfo(temp, fsize, &w, &h, &c));
UncompressFlags flags;
flags.stride = w * c;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
flags.stride = w * c * 3;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
flags.stride = w * c + 100;
TestCropAndDecodeJpeg(env, data_path + "jpeg_merge_test1.jpg", flags);
}
void CheckInvalidCropWindowFailed(const uint8* const temp, int fsize, int x,
int y, int w, int h) {
std::unique_ptr<uint8[]> imgdata;
int ww, hh, cc;
UncompressFlags flags;
flags.components = 3;
flags.crop = true;
flags.crop_x = x;
flags.crop_y = y;
flags.crop_width = w;
flags.crop_height = h;
imgdata.reset(Uncompress(temp, fsize, flags, &ww, &hh, &cc, nullptr));
CHECK(imgdata == nullptr);
}
TEST(JpegMemTest, CropAndDecodeJpegWithInvalidCropWindow) {
Env* env = Env::Default();
const string data_path = kTestData;
string jpeg;
ReadFileToStringOrDie(env, data_path + "jpeg_merge_test1.jpg", &jpeg);
const int fsize = jpeg.size();
const auto* temp = absl::bit_cast<const uint8*>(jpeg.data());
int w, h, c;
ASSERT_TRUE(GetImageInfo(temp, fsize, &w, &h, &c));
CheckInvalidCropWindowFailed(temp, fsize, 11, 11, 0, 11);
CheckInvalidCropWindowFailed(temp, fsize, 11, 11, 11, 0);
CheckInvalidCropWindowFailed(temp, fsize, -1, 11, 11, 11);
CheckInvalidCropWindowFailed(temp, fsize, 11, -1, 11, 11);
CheckInvalidCropWindowFailed(temp, fsize, 11, 11, -1, 11);
CheckInvalidCropWindowFailed(temp, fsize, 11, 11, 11, -1);
CheckInvalidCropWindowFailed(temp, fsize, w - 10, 11, 11, 11);
CheckInvalidCropWindowFailed(temp, fsize, 11, h - 10, 11, 11);
}
TEST(JpegMemTest, Jpeg2) {
const int in_w = 256;
const int in_h = 256;
const int stride1 = 3 * in_w;
const std::unique_ptr<uint8[]> refdata1(new uint8[stride1 * in_h]);
for (int i = 0; i < in_h; i++) {
for (int j = 0; j < in_w; j++) {
const int offset = i * stride1 + 3 * j;
refdata1[offset + 0] = i;
refdata1[offset + 1] = j;
refdata1[offset + 2] = static_cast<uint8>((i + j) >> 1);
}
}
const int stride2 = 3 * 357;
const std::unique_ptr<uint8[]> refdata2(new uint8[stride2 * in_h]);
for (int i = 0; i < in_h; i++) {
memcpy(&refdata2[i * stride2], &refdata1[i * stride1], 3 * in_w);
}
string cpdata1, cpdata2;
{
const string kXMP = "XMP_TEST_123";
CompressFlags flags;
flags.format = FORMAT_RGB;
flags.quality = 97;
flags.xmp_metadata = kXMP;
cpdata1 = Compress(refdata1.get(), in_w, in_h, flags);
flags.stride = stride2;
cpdata2 = Compress(refdata2.get(), in_w, in_h, flags);
CHECK_EQ(cpdata1, cpdata2);
CHECK_NE(string::npos, cpdata1.find(kXMP));
tstring cptest;
flags.stride = 0;
Compress(refdata1.get(), in_w, in_h, flags, &cptest);
CHECK_EQ(cptest, cpdata1);
flags.stride = stride2;
Compress(refdata2.get(), in_w, in_h, flags, &cptest);
CHECK_EQ(cptest, cpdata2);
}
std::unique_ptr<uint8[]> imgdata1;
for (const int components : {0, 3}) {
UncompressFlags flags;
flags.components = components;
int w, h, c;
imgdata1.reset(Uncompress(cpdata1.c_str(), cpdata1.length(), flags, &w, &h,
&c, nullptr));
CHECK_EQ(w, in_w);
CHECK_EQ(h, in_h);
CHECK_EQ(c, 3);
CHECK(imgdata1.get());
const int totalerr = ComputeSumAbsoluteDifference(
imgdata1.get(), refdata1.get(), in_w, in_h, stride1, stride1);
CHECK_LE(totalerr, 85000);
}
{
UncompressFlags flags;
flags.stride = 3 * 411;
const std::unique_ptr<uint8[]> imgdata2(new uint8[flags.stride * in_h]);
CHECK(imgdata2.get() == Uncompress(cpdata2.c_str(), cpdata2.length(), flags,
nullptr ,
[=, &imgdata2](int w, int h, int c) {
CHECK_EQ(w, in_w);
CHECK_EQ(h, in_h);
CHECK_EQ(c, 3);
return imgdata2.get();
}));
const int totalerr = ComputeSumAbsoluteDifference(
imgdata1.get(), imgdata2.get(), in_w, in_h, stride1, flags.stride);
CHECK_EQ(totalerr, 0);
}
{
UncompressFlags flags;
flags.components = 3;
flags.dct_method = JDCT_IFAST;
int w, h, c;
imgdata1.reset(Uncompress(cpdata1.c_str(), cpdata1.length(), flags, &w, &h,
&c, nullptr));
CHECK_EQ(w, in_w);
CHECK_EQ(h, in_h);
CHECK_EQ(c, 3);
CHECK(imgdata1.get());
const int totalerr = ComputeSumAbsoluteDifference(
imgdata1.get(), refdata1.get(), in_w, in_h, stride1, stride1);
ASSERT_LE(totalerr, 200000);
}
}
bool IsChromaDownsampled(const string& jpegdata) {
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf;
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) return false;
jpeg_create_decompress(&cinfo);
SetSrc(&cinfo, jpegdata.c_str(), jpegdata.size(), false);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
const int components = cinfo.output_components;
if (components == 1) return false;
CHECK_EQ(3, components);
CHECK_EQ(cinfo.comp_info[1].h_samp_factor, cinfo.comp_info[2].h_samp_factor)
<< "The h sampling factors should be the same.";
CHECK_EQ(cinfo.comp_info[1].v_samp_factor, cinfo.comp_info[2].v_samp_factor)
<< "The v sampling factors should be the same.";
for (int i = 0; i < components; ++i) {
CHECK_GT(cinfo.comp_info[i].h_samp_factor, 0) << "Invalid sampling factor.";
CHECK_EQ(cinfo.comp_info[i].h_samp_factor, cinfo.comp_info[i].v_samp_factor)
<< "The sampling factor should be the same in both directions.";
}
const bool downsampled =
cinfo.comp_info[1].h_samp_factor < cinfo.comp_info[0].h_samp_factor;
jpeg_destroy_decompress(&cinfo);
return downsampled;
}
TEST(JpegMemTest, ChromaDownsampling) {
const string jpegfile = string(kTestData) + "jpeg_merge_test1.jpg";
string jpeg;
ReadFileToStringOrDie(Env::Default(), jpegfile, &jpeg);
UncompressFlags unflags;
unflags.components = 3;
int w, h, c;
int64_t num_warnings;
std::unique_ptr<uint8[]> uncompressed(Uncompress(
jpeg.c_str(), jpeg.size(), unflags, &w, &h, &c, &num_warnings));
CHECK(uncompressed != nullptr);
CHECK_EQ(num_warnings, 0);
for (const bool downsample : {false, true}) {
CompressFlags flags;
flags.format = FORMAT_RGB;
flags.quality = 85;
flags.chroma_downsampling = downsample;
tstring recompressed;
Compress(uncompressed.get(), w, h, flags, &recompressed);
CHECK(!recompressed.empty());
CHECK_EQ(IsChromaDownsampled(recompressed), downsample);
}
}
void TestBadJPEG(Env* env, const string& bad_jpeg_file, int expected_width,
int expected_height, const string& reference_RGB_file,
const bool try_recover_truncated_jpeg) {
string jpeg;
ReadFileToStringOrDie(env, bad_jpeg_file, &jpeg);
UncompressFlags flags;
flags.components = 3;
flags.try_recover_truncated_jpeg = try_recover_truncated_jpeg;
int width, height, components;
std::unique_ptr<uint8[]> imgdata;
imgdata.reset(Uncompress(jpeg.c_str(), jpeg.size(), flags, &width, &height,
&components, nullptr));
if (expected_width > 0) {
CHECK_EQ(width, expected_width);
CHECK_EQ(height, expected_height);
CHECK_EQ(components, 3);
CHECK(imgdata.get());
if (!reference_RGB_file.empty()) {
string ref;
ReadFileToStringOrDie(env, reference_RGB_file, &ref);
CHECK(!memcmp(ref.data(), imgdata.get(), ref.size()));
}
} else {
CHECK(!imgdata.get()) << "file:" << bad_jpeg_file;
}
}
TEST(JpegMemTest, BadJpeg) {
Env* env = Env::Default();
const string data_path = kTestData;
TestBadJPEG(env, data_path + "bad_huffman.jpg", 1024, 768, "", false);
TestBadJPEG(env, data_path + "corrupt.jpg", 0 , 90, "", false);
TestBadJPEG(env, data_path + "corrupt34_2.jpg", 0, 3300, "", false);
TestBadJPEG(env, data_path + "corrupt34_3.jpg", 0, 3300, "", false);
TestBadJPEG(env, data_path + "corrupt34_4.jpg", 0, 3300, "", false);
TestBadJPEG(env, data_path + "corrupt34_2.jpg", 2544, 3300, "", true);
TestBadJPEG(env, data_path + "corrupt34_3.jpg", 2544, 3300, "", true);
TestBadJPEG(env, data_path + "corrupt34_4.jpg", 2544, 3300, "", true);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/jpeg/jpeg_mem.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/jpeg/jpeg_mem_unittest.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d493d61-fa22-4cee-8c9e-7ecf1b5e0542 | cpp | tensorflow/tensorflow | gif_io | tensorflow/core/lib/gif/gif_io.cc | tensorflow/core/lib/gif/gif_io_test.cc | #include "tensorflow/core/lib/gif/gif_io.h"
#include <algorithm>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/gif.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gif {
struct InputBufferInfo {
const uint8_t* buf;
int bytes_left;
};
int input_callback(GifFileType* gif_file, GifByteType* buf, int size) {
InputBufferInfo* const info =
reinterpret_cast<InputBufferInfo*>(gif_file->UserData);
if (info != nullptr) {
if (size > info->bytes_left) size = info->bytes_left;
memcpy(buf, info->buf, size);
info->buf += size;
info->bytes_left -= size;
return size;
}
return 0;
}
static const char* GifErrorStringNonNull(int error_code) {
const char* error_string = GifErrorString(error_code);
if (error_string == nullptr) {
return "Unknown error";
}
return error_string;
}
uint8* Decode(const void* srcdata, int datasize,
const std::function<uint8*(int, int, int, int)>& allocate_output,
string* error_string, bool expand_animations) {
int error_code = D_GIF_SUCCEEDED;
InputBufferInfo info = {reinterpret_cast<const uint8*>(srcdata), datasize};
GifFileType* gif_file =
DGifOpen(static_cast<void*>(&info), &input_callback, &error_code);
const auto cleanup = gtl::MakeCleanup([gif_file]() {
int error_code = D_GIF_SUCCEEDED;
if (gif_file && DGifCloseFile(gif_file, &error_code) != GIF_OK) {
LOG(WARNING) << "Fail to close gif file, reason: "
<< GifErrorStringNonNull(error_code);
}
});
if (error_code != D_GIF_SUCCEEDED) {
*error_string = absl::StrCat("failed to open gif file: ",
GifErrorStringNonNull(error_code));
return nullptr;
}
if (DGifSlurp(gif_file) != GIF_OK) {
*error_string = absl::StrCat("failed to slurp gif file: ",
GifErrorStringNonNull(gif_file->Error));
if (gif_file->ImageCount <= 0 ||
gif_file->SavedImages[gif_file->ImageCount - 1].RasterBits == NULL) {
return nullptr;
}
LOG(ERROR) << *error_string;
}
if (gif_file->ImageCount <= 0) {
*error_string = "gif file does not contain any image";
return nullptr;
}
int target_num_frames = gif_file->ImageCount;
int max_frame_width = 0;
int max_frame_height = 0;
for (int k = 0; k < target_num_frames; k++) {
SavedImage* si = &gif_file->SavedImages[k];
if (max_frame_height < si->ImageDesc.Height)
max_frame_height = si->ImageDesc.Height;
if (max_frame_width < si->ImageDesc.Width)
max_frame_width = si->ImageDesc.Width;
}
const int width = max_frame_width;
const int height = max_frame_height;
const int channel = 3;
if (!expand_animations) target_num_frames = 1;
uint8* const dstdata =
allocate_output(target_num_frames, width, height, channel);
if (!dstdata) return nullptr;
for (int64_t k = 0; k < target_num_frames; k++) {
uint8* this_dst = dstdata + k * width * channel * height;
SavedImage* this_image = &gif_file->SavedImages[k];
GifImageDesc* img_desc = &this_image->ImageDesc;
GraphicsControlBlock gcb;
DGifSavedExtensionToGCB(gif_file, k, &gcb);
int imgLeft = img_desc->Left;
int imgTop = img_desc->Top;
int imgRight = img_desc->Left + img_desc->Width;
int imgBottom = img_desc->Top + img_desc->Height;
if (k > 0) {
uint8* last_dst = dstdata + (k - 1) * width * channel * height;
for (int64_t i = 0; i < height; ++i) {
uint8* p_dst = this_dst + i * width * channel;
uint8* l_dst = last_dst + i * width * channel;
for (int64_t j = 0; j < width; ++j) {
p_dst[j * channel + 0] = l_dst[j * channel + 0];
p_dst[j * channel + 1] = l_dst[j * channel + 1];
p_dst[j * channel + 2] = l_dst[j * channel + 2];
}
}
}
if (img_desc->Left != 0 || img_desc->Top != 0 || img_desc->Width != width ||
img_desc->Height != height) {
if (k == 0) {
for (int64_t i = 0; i < height; ++i) {
uint8* p_dst = this_dst + i * width * channel;
for (int64_t j = 0; j < width; ++j) {
p_dst[j * channel + 0] = 0;
p_dst[j * channel + 1] = 0;
p_dst[j * channel + 2] = 0;
}
}
}
imgLeft = std::max(imgLeft, 0);
imgTop = std::max(imgTop, 0);
imgRight = std::min(imgRight, width);
imgBottom = std::min(imgBottom, height);
}
ColorMapObject* color_map = this_image->ImageDesc.ColorMap
? this_image->ImageDesc.ColorMap
: gif_file->SColorMap;
if (color_map == nullptr) {
*error_string = absl::StrCat("missing color map for frame ", k);
return nullptr;
}
for (int64_t i = imgTop; i < imgBottom; ++i) {
uint8* p_dst = this_dst + i * width * channel;
for (int64_t j = imgLeft; j < imgRight; ++j) {
GifByteType color_index =
this_image->RasterBits[(i - img_desc->Top) * (img_desc->Width) +
(j - img_desc->Left)];
if (color_index == gcb.TransparentColor) {
if (k == 0) {
p_dst[j * channel + 0] = 0;
p_dst[j * channel + 1] = 0;
p_dst[j * channel + 2] = 0;
}
continue;
}
if (color_index >= color_map->ColorCount) {
*error_string = absl::StrCat("found color index ", color_index,
" outside of color map range ",
color_map->ColorCount);
return nullptr;
}
const GifColorType& gif_color = color_map->Colors[color_index];
p_dst[j * channel + 0] = gif_color.Red;
p_dst[j * channel + 1] = gif_color.Green;
p_dst[j * channel + 2] = gif_color.Blue;
}
}
}
return dstdata;
}
}
} | #include "tensorflow/core/lib/gif/gif_io.h"
#include <memory>
#include "tensorflow/core/lib/png/png_io.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gif {
namespace {
const char kTestData[] = "tensorflow/core/lib/gif/testdata/";
struct DecodeGifTestCase {
const string filepath;
const int num_frames;
const int width;
const int height;
const int channels;
};
void ReadFileToStringOrDie(Env* env, const string& filename, string* output) {
TF_CHECK_OK(ReadFileToString(env, filename, output));
}
void TestDecodeGif(Env* env, DecodeGifTestCase testcase) {
string gif;
ReadFileToStringOrDie(env, testcase.filepath, &gif);
std::unique_ptr<uint8[]> imgdata;
int nframes, w, h, c;
string error_string;
imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
w = width;
h = height;
c = channels;
return new uint8[static_cast<int64_t>(frame_cnt) * height * width *
channels];
},
&error_string));
ASSERT_NE(imgdata, nullptr);
ASSERT_EQ(nframes, testcase.num_frames);
ASSERT_EQ(w, testcase.width);
ASSERT_EQ(h, testcase.height);
ASSERT_EQ(c, testcase.channels);
}
TEST(GifTest, Gif) {
Env* env = Env::Default();
const string testdata_path = kTestData;
std::vector<DecodeGifTestCase> testcases(
{
{testdata_path + "lena.gif", 1, 51, 26, 3},
{testdata_path + "optimized.gif", 12, 20, 40, 3},
{testdata_path + "red_black.gif", 1, 16, 16, 3},
{testdata_path + "scan.gif", 12, 20, 40, 3},
{testdata_path + "squares.gif", 2, 16, 16, 3},
{testdata_path + "3g_multiframe.gif", 519, 1920, 1080, 3}});
for (const auto& tc : testcases) {
TestDecodeGif(env, tc);
}
}
void TestDecodeAnimatedGif(Env* env, const uint8* gif_data,
const string& png_filepath, int frame_idx) {
string png;
ReadFileToStringOrDie(env, png_filepath, &png);
png::DecodeContext decode;
png::CommonInitDecode(png, 3, 8, &decode);
const int width = static_cast<int>(decode.width);
const int height = static_cast<int>(decode.height);
std::unique_ptr<uint8[]> png_imgdata(
new uint8[height * width * decode.channels]);
png::CommonFinishDecode(reinterpret_cast<png_bytep>(png_imgdata.get()),
decode.channels * width * sizeof(uint8), &decode);
int frame_len = width * height * decode.channels;
int gif_idx = frame_len * frame_idx;
for (int i = 0; i < frame_len; i++) {
ASSERT_EQ(gif_data[gif_idx + i], png_imgdata[i]);
}
}
TEST(GifTest, AnimatedGif) {
Env* env = Env::Default();
const string testdata_path = kTestData;
string gif;
ReadFileToStringOrDie(env, testdata_path + "pendulum_sm.gif", &gif);
std::unique_ptr<uint8[]> gif_imgdata;
int nframes, w, h, c;
string error_string;
gif_imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int num_frames, int width, int height, int channels) -> uint8* {
nframes = num_frames;
w = width;
h = height;
c = channels;
return new uint8[num_frames * height * width * channels];
},
&error_string));
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame0.png", 0);
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame1.png", 1);
TestDecodeAnimatedGif(env, gif_imgdata.get(),
testdata_path + "pendulum_sm_frame2.png", 2);
}
void TestExpandAnimations(Env* env, const string& filepath) {
string gif;
ReadFileToStringOrDie(env, filepath, &gif);
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
bool expand_animations = false;
imgdata.reset(gif::Decode(
gif.data(), gif.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
return new uint8[frame_cnt * height * width * channels];
},
&error_string, expand_animations));
ASSERT_EQ(nframes, 1);
}
TEST(GifTest, ExpandAnimations) {
Env* env = Env::Default();
const string testdata_path = kTestData;
TestExpandAnimations(env, testdata_path + "scan.gif");
TestExpandAnimations(env, testdata_path + "pendulum_sm.gif");
TestExpandAnimations(env, testdata_path + "squares.gif");
}
void TestInvalidGifFormat(const string& header_bytes) {
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
imgdata.reset(gif::Decode(
header_bytes.data(), header_bytes.size(),
[&](int frame_cnt, int width, int height, int channels) -> uint8* {
nframes = frame_cnt;
return new uint8[frame_cnt * height * width * channels];
},
&error_string));
string err_msg = "failed to open gif file";
ASSERT_EQ(error_string.substr(0, 23), err_msg);
}
TEST(GifTest, BadGif) {
TestInvalidGifFormat("\x89\x50\x4E\x47\x0D\x0A\x1A\x0A");
TestInvalidGifFormat("\x42\x4d");
TestInvalidGifFormat("\xff\xd8\xff");
TestInvalidGifFormat("\x49\x49\x2A\x00");
}
TEST(GifTest, TransparentIndexOutsideColorTable) {
unsigned char encoded[43] = {
'G', 'I', 'F', '8', '9', 'a',
3, 0, 1, 0,
0b1'111'0'000,
0,
0,
0x80, 0x00, 0x00,
0xFF, 0xFF, 0xFF,
'!', 0xF9, 0x04,
1,
0, 0,
2,
0,
',', 0, 0, 0, 0,
3, 0, 1, 0,
0,
2,
2,
0b01'000'100,
0b0'101'010'0,
0, ';'
};
std::unique_ptr<uint8[]> imgdata;
string error_string;
int nframes;
auto allocate_image_data = [&](int frame_cnt, int width, int height,
int channels) -> uint8* {
nframes = frame_cnt;
imgdata = std::make_unique<uint8[]>(frame_cnt * height * width * channels);
return imgdata.get();
};
gif::Decode(encoded, sizeof(encoded), allocate_image_data, &error_string);
ASSERT_EQ(nframes, 1);
ASSERT_EQ(error_string, "");
uint8 expected[9] = {
0x80, 0x00, 0x00,
0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00,
};
for (int i = 0; i < 9; i++) {
ASSERT_EQ(imgdata[i], expected[i]) << "i=" << i;
}
encoded[40] = 0b0'101'011'0;
error_string.clear();
gif::Decode(encoded, sizeof(encoded), allocate_image_data, &error_string);
ASSERT_EQ(error_string, "found color index 3 outside of color map range 2");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gif/gif_io.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gif/gif_io_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c2ba008-edd9-410e-a26c-ecfbda3b69d9 | cpp | tensorflow/tensorflow | attr_util | tensorflow/core/runtime_fallback/kernel/attr_util.cc | tensorflow/core/runtime_fallback/kernel/attr_util_test.cc | #include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include <assert.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tensorflow/core/util/padding.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/kernel_utils.h"
namespace tensorflow {
DataType ParseTFDataType(StringPiece dtype) {
if (dtype == "DT_INT8") {
return DataType::DT_INT8;
} else if (dtype == "DT_INT32") {
return DataType::DT_INT32;
} else if (dtype == "DT_INT64") {
return DataType::DT_INT64;
} else if (dtype == "DT_FLOAT") {
return DataType::DT_FLOAT;
} else if (dtype == "DT_DOUBLE") {
return DataType::DT_DOUBLE;
} else {
assert(false && "Unsupported dtype");
abort();
}
}
bool ParseBoolAttrValue(StringPiece attr_value) {
if (attr_value == "false") {
return false;
} else if (attr_value == "true") {
return true;
} else {
assert(false && "Bool attribute value invalid");
abort();
}
}
Status ParseValue(StringPiece input, bool* value) {
*value = ParseBoolAttrValue(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, int32* value) {
bool parse_result = absl::SimpleAtoi(input, value);
if (!parse_result) {
return errors::InvalidArgument("Could not parse int32 from ", input);
}
return absl::OkStatus();
}
Status ParseValue(StringPiece input, DataType* value) {
*value = ParseTFDataType(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, std::string* value) {
*value = std::string(input);
return absl::OkStatus();
}
Status ParseValue(StringPiece input, std::vector<int32>* value) {
std::vector<std::string> parts = str_util::Split(input, ",");
value->reserve(parts.size());
for (const auto& value_str : parts) {
int32_t value_int;
bool parse_result = absl::SimpleAtoi(value_str, &value_int);
if (!parse_result) {
return errors::InvalidArgument("Could not parse list of integers from ",
input);
}
value->push_back(value_int);
}
return absl::OkStatus();
}
Status ParseValue(StringPiece input, Padding* value) {
return GetPaddingFromString(input, value);
}
Status AddOpAttr(const std::string& name, const std::string& attr_value,
tfrt::OpAttrs* opattrs) {
Status s;
std::vector<absl::string_view> value_split = tfd::AttrValueSplit(attr_value);
auto& type = value_split[0];
auto& value = value_split[1];
if (type == "bool") {
bool val;
s = ParseValue(value, &val);
opattrs->Set<bool>(name, val);
} else if (type == "i32") {
int32_t val;
s = ParseValue(value, &val);
opattrs->Set<int32>(name, val);
} else if (type == "string" || type == "padding") {
std::string val;
s = ParseValue(value, &val);
opattrs->SetString(name, val);
} else if (type == "tfdtype") {
DataType val;
s = ParseValue(value, &val);
opattrs->Set<tfrt::OpAttrType>(name, tfd::ConvertFromTfDataType(val));
} else if (type == "list(i32)") {
std::vector<int32> val;
s = ParseValue(value, &val);
opattrs->SetArray<int32>(name, val);
}
return s;
}
Status FillOpAttrs(tfrt::RemainingAttributes attrs, tfrt::OpAttrs* opattrs) {
int num_tf_attrs = attrs.size() / 2;
Status status;
for (int i = 0; i < num_tf_attrs; ++i) {
std::string name = attrs.GetStringAttribute(i * 2).str();
std::string attr_value = attrs.GetStringAttribute(i * 2 + 1).str();
Status s = AddOpAttr(name, attr_value, opattrs);
status.Update(s);
}
return status;
}
} | #include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/support/forward_decls.h"
using llvm::ArrayRef;
using tfrt::OpAttrs;
using tfrt::OpAttrType;
namespace tensorflow {
namespace {
TEST(AttrUtilTest, TestGetBoolAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "bool$true", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "bool$false", &opattrs));
ASSERT_TRUE(opattrs.GetAsserting<bool>("foo"));
ASSERT_FALSE(opattrs.GetAsserting<bool>("bar"));
}
TEST(AttrUtilTest, TestGetIntAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "i32$-2", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "i32$0", &opattrs));
TF_ASSERT_OK(AddOpAttr("baz", "i32$123", &opattrs));
ASSERT_EQ(opattrs.GetAsserting<int32>("foo"), -2);
ASSERT_EQ(opattrs.GetAsserting<int32>("bar"), 0);
ASSERT_EQ(opattrs.GetAsserting<int32>("baz"), 123);
Status s = AddOpAttr("invalid", "i32$4.5", &opattrs);
ASSERT_FALSE(s.ok());
}
TEST(AttrUtilTest, TestGetDTypeAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "tfdtype$DT_INT32", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "tfdtype$DT_FLOAT", &opattrs));
ASSERT_EQ(opattrs.GetAsserting<OpAttrType>("foo"), OpAttrType::I32);
ASSERT_EQ(opattrs.GetAsserting<OpAttrType>("bar"), OpAttrType::F32);
}
TEST(AttrUtilTest, TestGetIntListAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "list(i32)$", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "list(i32)$1", &opattrs));
TF_ASSERT_OK(AddOpAttr("baz", "list(i32)$1,2,3", &opattrs));
ArrayRef<int32> v1, v2, v3;
std::vector<int32> expected_v1;
std::vector<int32> expected_v2 = {1};
std::vector<int32> expected_v3 = {1, 2, 3};
ArrayRef<int32> expected_v1_ref(expected_v1);
ArrayRef<int32> expected_v2_ref(expected_v2);
ArrayRef<int32> expected_v3_ref(expected_v3);
ASSERT_TRUE(opattrs.GetArray<int32>("foo", &v1));
ASSERT_TRUE(opattrs.GetArray<int32>("bar", &v2));
ASSERT_TRUE(opattrs.GetArray<int32>("baz", &v3));
ASSERT_EQ(v1, expected_v1_ref);
ASSERT_EQ(v2, expected_v2_ref);
ASSERT_EQ(v3, expected_v3_ref);
}
TEST(AttrUtilTest, TestGetStrAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "string$", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "string$test", &opattrs));
ASSERT_EQ(opattrs.GetStringAsserting("foo"), "");
ASSERT_EQ(opattrs.GetStringAsserting("bar"), "test");
}
TEST(AttrUtilTest, TestGetPaddingAttr) {
OpAttrs opattrs;
TF_ASSERT_OK(AddOpAttr("foo", "padding$VALID", &opattrs));
TF_ASSERT_OK(AddOpAttr("bar", "padding$SAME", &opattrs));
ASSERT_EQ(opattrs.GetStringAsserting("foo"), "VALID");
ASSERT_EQ(opattrs.GetStringAsserting("bar"), "SAME");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/attr_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/attr_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a7cbd52-50d8-49c8-b25e-0aefde08b0fb | cpp | tensorflow/tensorflow | runtime_fallback_kernels | tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc | tensorflow/core/runtime_fallback/test/runtime_fallback_kernels_test.cc | #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_op_handler.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tensorflow/core/runtime_fallback/util/tensor_util.h"
#include "tensorflow/core/runtime_fallback/util/type_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tensor_util.h"
#include "tfrt/cpu/core_runtime/cpu_op_handler.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/core_runtime/core_runtime_op.h"
#include "tfrt/core_runtime/execute_op_impl.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/tensor_handle.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/attribute_utils.h"
#include "tfrt/host_context/device.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_buffer.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/kernel_frame.h"
#include "tfrt/host_context/kernel_utils.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/host_context/sync_kernel_frame.h"
#include "tfrt/support/error_util.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/tensor/conversion_registry.h"
#include "tfrt/tensor/dense_host_tensor.h"
#include "tfrt/tensor/scalar_host_tensor.h"
#include "tfrt/tensor/string_host_tensor.h"
#include "tfrt/tensor/tensor_serialize_utils.h"
namespace tensorflow {
namespace tfd {
namespace {
constexpr char kHostContextPtrAttrName[] = "host_ptr";
constexpr char kDefaultCpuDevice[] =
"/job:localhost/replica:0/task:0/device:CPU:0";
}
using tfrt::AggregateAttr;
using tfrt::Argument;
using tfrt::AsyncValue;
using tfrt::AsyncValueRef;
using tfrt::BEFAttributeType;
using tfrt::Chain;
using tfrt::DenseAttr;
using tfrt::DenseHostTensor;
using tfrt::ExecutionContext;
using tfrt::Expected;
using tfrt::FuncAttr;
using tfrt::HostBuffer;
using tfrt::HostContext;
using tfrt::KernelErrorHandler;
using tfrt::OpAttrs;
using tfrt::OpAttrsRawEntry;
using tfrt::OpAttrsRef;
using tfrt::OpAttrType;
using tfrt::raw_ostream;
using tfrt::RCReference;
using tfrt::RemainingArguments;
using tfrt::RemainingAttributes;
using tfrt::RemainingResults;
using tfrt::Result;
using tfrt::ShapeAttr;
using tfrt::string_view;
using tfrt::StringAttr;
using tfrt::StringAttribute;
using tfrt::Tensor;
using tfrt::TensorShape;
#define TFD_REPORT_AND_RETURN_IF_ERROR(handler, status) \
if (!status.ok()) { \
handler.ReportError(status.message()); \
return; \
}
static AsyncValueRef<RuntimeFallbackTensor> CreateRuntimeFallbackTensor(
TensorHandle* handle, HostContext* host) {
OwnedTensorHandle th(handle);
int rank;
tensorflow::Status status = th->NumDims(&rank);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(tfrt::StrCat(
"error getting rank from TF tensor handle: ", status.message()));
llvm::SmallVector<tfrt::Index, 4> dims;
for (auto i = 0; i < rank; ++i) {
int64_t dim;
status = th->Dim(i, &dim);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat("error getting dimension from TFE tensor handle: ",
status.message()));
dims.push_back(dim);
}
TensorShape shape{dims};
DataType dtype = th->DataType();
return tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
shape, GetTfrtDtype(dtype), std::move(th));
}
static std::pair<RuntimeFallbackTensor, Chain> TfdMoveDHTToTFT(
Argument<DenseHostTensor> dht, Argument<Chain> in_chain,
const ExecutionContext& exec_ctx) {
return std::make_pair(
MoveDHTToRuntimeFallbackTensor(std::move(dht.get()), exec_ctx.host()),
in_chain.get());
}
static void TfdConvertTFTToDHT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain,
Result<DenseHostTensor> dht,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
dht.Set(tfrt::ConvertTensorOnHost(exec_ctx, tft.get(),
DenseHostTensor::kTensorType)
.ReleaseRCRef());
out_chain.Set(in_chain);
}
static void TfdPrintTFT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain, Result<Chain> out_chain) {
llvm::outs() << tft.get() << "\n";
llvm::outs().flush();
out_chain.Set(in_chain);
}
static void TfdInitEagerContext(Argument<Chain> in_chain,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
tensorflow::tfd::EagerContextResource* eager_context_resource =
resource_context
->GetOrCreateResource<tensorflow::tfd::EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
(void)eager_context_resource;
out_chain.Set(in_chain);
}
OwnedTFTensor MoveDHTToTFTensor(DenseHostTensor&& dht, HostContext* host) {
llvm::SmallVector<tfrt::Index, 4> dims;
dht.shape().GetDimensions(&dims);
HostBuffer* host_buffer = dht.ReleaseBuffer().release();
auto deallocator = [](void* data, size_t len, void* arg) {
auto* host_buffer = reinterpret_cast<HostBuffer*>(arg);
host_buffer->DropRef();
};
CheckBoolCompatibility();
OwnedTFTensor tf_tensor{
TF_NewTensor(static_cast<TF_DataType>(GetTfDataType(dht.dtype())),
dims.data(), dims.size(), host_buffer->data(),
host_buffer->size(), deallocator, host_buffer)};
return tf_tensor;
}
static tensorflow::Status DecodeDenseAttrToTensorInterface(
const DenseAttr& dense_attr, HostContext* host,
tensorflow::TensorInterface* result) {
Expected<DenseHostTensor> dht =
tfrt::DeserializeDenseHostTensorFromDenseAttr(dense_attr, host);
if (!dht)
return tensorflow::errors::Internal(tfrt::StrCat(
"cannot create DenseHostTensor in DecodeDenseAttrToTensorInterface:",
dht.takeError()));
OwnedTFTensor tf_tensor = MoveDHTToTFTensor(std::move(*dht), host);
tensorflow::Tensor t;
TF_RETURN_IF_ERROR(TF_TensorToTensor(tf_tensor.get(), &t));
*result = tensorflow::TensorInterface(std::move(t));
return absl::OkStatus();
}
static tensorflow::Status PrepareAttributes(EagerOperation* eager_op,
const OpAttrsRef& attrs,
HostContext* host,
EagerContext* eager_ctx) {
tensorflow::Status status;
attrs.IterateEntries([eager_op, eager_ctx, status_ptr = &status, host,
&attrs](const OpAttrsRawEntry& entry) {
assert(strcmp(entry.name, "device") != 0);
if (IsUnusedAttribute(entry.name)) {
return;
} else if (entry.IsArray()) {
if (entry.element_count == 0) {
if (entry.type == OpAttrType::CHAR) {
std::string empty_str;
*status_ptr = eager_op->SetAttrString(entry.name, empty_str.data(),
empty_str.size());
} else {
AttrValue empty_attr_value;
eager_op->MutableAttrs()->Set(entry.name, empty_attr_value);
}
} else if (entry.type == OpAttrType::CHAR) {
string_view attr_value = attrs.GetStringAsserting(entry.name);
*status_ptr = eager_op->SetAttrString(entry.name, attr_value.data(),
attr_value.size());
} else if (entry.type == OpAttrType::FUNC) {
string_view attr_value = attrs.GetFuncNameAsserting(entry.name);
*status_ptr = eager_op->SetAttrFunctionName(
entry.name, attr_value.data(), attr_value.size());
} else if (entry.type == OpAttrType::I64) {
llvm::ArrayRef<int64_t> int_array =
attrs.GetArrayAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrIntList(entry.name, int_array.data(),
int_array.size());
} else if (entry.type == OpAttrType::F32) {
llvm::ArrayRef<float> float_array =
attrs.GetArrayAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloatList(entry.name, float_array.data(),
float_array.size());
} else if (entry.type == OpAttrType::BOOL) {
llvm::ArrayRef<bool> bool_array =
attrs.GetArrayAsserting<bool>(entry.name);
std::vector<unsigned char> bool_char_array(bool_array.begin(),
bool_array.end());
*status_ptr = eager_op->SetAttrBoolList(
entry.name, bool_char_array.data(), bool_char_array.size());
} else if (entry.type == OpAttrType::DTYPE) {
const auto& op_attr = attrs.GetRawAsserting(entry.name);
assert(op_attr.IsArray());
auto bef_dtypes =
llvm::ArrayRef(static_cast<const tfrt::DType*>(op_attr.GetData()),
op_attr.element_count);
llvm::SmallVector<tensorflow::DataType, 4> tf_dtypes;
tf_dtypes.reserve(bef_dtypes.size());
for (auto bef_dtype : bef_dtypes) {
tf_dtypes.push_back(ConvertBefAttrTypeToTfDataType(bef_dtype));
}
*status_ptr = eager_op->SetAttrTypeList(entry.name, tf_dtypes.data(),
tf_dtypes.size());
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported array attribute type");
}
} else {
if (entry.type == OpAttrType::I64) {
int64_t attr_value = attrs.GetAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrInt(entry.name, attr_value);
} else if (entry.type == OpAttrType::F32) {
float attr_value = attrs.GetAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloat(entry.name, attr_value);
} else if (entry.type == OpAttrType::BOOL) {
bool attr_value = attrs.GetAsserting<bool>(entry.name);
*status_ptr = eager_op->SetAttrBool(entry.name, attr_value);
} else if (entry.type == OpAttrType::DTYPE) {
OpAttrType op_attr_type = attrs.GetAsserting<OpAttrType>(entry.name);
DataType tf_dtype = ConvertToTfDataType(op_attr_type);
*status_ptr = eager_op->SetAttrType(entry.name, tf_dtype);
} else if (entry.type == OpAttrType::SHAPE) {
tfrt::ShapeAttr shape_attr =
attrs.GetAsserting<tfrt::ShapeAttr>(entry.name);
if (shape_attr.HasRank()) {
*status_ptr = eager_op->SetAttrShape(
entry.name, shape_attr.GetShape().data(), shape_attr.GetRank());
} else {
*status_ptr = eager_op->SetAttrShape(entry.name, nullptr,
-1);
}
} else if (entry.type == OpAttrType::DENSE) {
DenseAttr dense_attr = attrs.GetAsserting<DenseAttr>(entry.name);
tensorflow::TensorInterface interface;
*status_ptr =
DecodeDenseAttrToTensorInterface(dense_attr, host, &interface);
if (!status_ptr->ok()) return;
*status_ptr = eager_op->SetAttrTensor(entry.name, &interface);
} else if (entry.type == OpAttrType::AGGREGATE) {
AggregateAttr list_attr = attrs.GetAsserting<AggregateAttr>(entry.name);
int num_values = list_attr.GetNumElements();
if (num_values == 0) {
std::vector<int> dummy_attr;
eager_op->MutableAttrs()->Set(
entry.name, gtl::ArraySlice<const int>(dummy_attr.data(), 0));
return;
}
auto attr_base = list_attr.GetAttribute(0);
if (IsDataTypeAttribute(attr_base.type()) &&
GetDataType(attr_base.type()) == tfrt::DType::String) {
llvm::SmallVector<const void*, 8> values;
llvm::SmallVector<size_t, 8> lengths;
values.reserve(num_values);
lengths.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto string_attr = list_attr.GetAttributeOfType<StringAttr>(i);
values.push_back(string_attr.GetValue().data());
lengths.push_back(string_attr.GetValue().size());
}
*status_ptr = eager_op->SetAttrStringList(entry.name, values.data(),
lengths.data(), num_values);
} else if (IsFuncAttribute(attr_base.type())) {
std::vector<const AbstractOperation*> funcs(num_values);
for (int i = 0; i < num_values; ++i) {
auto func_attr = list_attr.GetAttributeOfType<FuncAttr>(i);
ImmediateExecutionOperation* new_op = eager_ctx->CreateOperation();
auto func_name = func_attr.GetFunctionName();
*status_ptr = new_op->Reset(func_name.str().c_str(),
nullptr);
funcs[i] = new_op;
}
*status_ptr =
eager_op->SetAttrFunctionList(entry.name, absl::MakeSpan(funcs));
} else if (attr_base.type() == BEFAttributeType::kShape) {
llvm::SmallVector<int, 8> ranks;
llvm::SmallVector<const int64_t*, 8> dims;
ranks.reserve(num_values);
dims.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto shape_attr = list_attr.GetAttributeOfType<ShapeAttr>(i);
if (shape_attr.HasRank()) {
ranks.push_back(shape_attr.GetRank());
dims.push_back(shape_attr.GetShape().data());
} else {
ranks.push_back(-1);
dims.push_back(nullptr);
}
}
*status_ptr = eager_op->SetAttrShapeList(entry.name, dims.data(),
ranks.data(), num_values);
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported list attribute type");
}
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported scalar attribute type");
}
}
});
return status;
}
Status CallEagerExecute(const tfrt::ExecutionContext& exec_ctx,
EagerContext* eager_ctx, const char* op_name,
const char* device_name,
llvm::ArrayRef<TensorHandle*> input_tensor_handles,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<tensorflow::AbstractTensorHandle*>
result_tensor_handles) {
assert(eager_ctx != nullptr && "EagerContext is NULL");
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TF_RETURN_IF_ERROR(eager_op->Reset(op_name, device_name));
for (TensorHandle* input_tensor : input_tensor_handles) {
TF_RETURN_IF_ERROR(eager_op->AddInput(input_tensor));
}
auto* host = exec_ctx.host();
TF_RETURN_IF_ERROR(PrepareAttributes(eager_op.get(), attrs, host, eager_ctx));
int num_retvals = result_tensor_handles.size();
TF_RETURN_IF_ERROR(eager_op->Execute(
absl::MakeSpan(result_tensor_handles.data(), num_retvals), &num_retvals));
return absl::OkStatus();
}
static bool ShouldAddHostContextAttr(const char* op_name) {
return strcmp(op_name, "TFRTMakeIterator") == 0;
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, EagerContext* eager_ctx,
const char* op_name, const char* device_name,
llvm::ArrayRef<Tensor*> arguments, const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto emit_error = [&exec_ctx, results](const tensorflow::Status& status) {
auto error = EmitErrorAsync(exec_ctx, status);
std::fill(results.begin(), results.end(), error);
return error;
};
llvm::SmallVector<TensorHandle*, 4> input_tensor_handles;
input_tensor_handles.reserve(arguments.size());
for (Tensor* input_tensor : arguments) {
input_tensor_handles.push_back(
llvm::cast<RuntimeFallbackTensor>(input_tensor)->GetTensorHandle());
}
int num_retvals = results.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> result_tensor_handles(
num_retvals);
Status status;
if (!ShouldAddHostContextAttr(op_name)) {
status =
CallEagerExecute(exec_ctx, eager_ctx, op_name, device_name,
input_tensor_handles, attrs, result_tensor_handles);
} else {
assert(attrs.GetNumEntries() == 1);
OpAttrs updated;
updated.Set(kHostContextPtrAttrName,
reinterpret_cast<int64_t>(exec_ctx.host()));
status = CallEagerExecute(
exec_ctx, eager_ctx, op_name, device_name, input_tensor_handles,
OpAttrsRef(std::move(updated)), result_tensor_handles);
}
if (!status.ok()) return emit_error(status);
auto host = exec_ctx.host();
for (int i = 0; i < num_retvals; ++i) {
auto expected_fallback_tensor =
CreateRuntimeFallbackTensorFromTfTensorHandle(
OwnedTensorHandle{
TensorHandleFromInterface(result_tensor_handles[i])},
host);
if (!expected_fallback_tensor)
results[i] = EmitErrorAsync(
exec_ctx, tfrt::StrCat(expected_fallback_tensor.takeError()));
else
results[i] = tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
std::move(*expected_fallback_tensor));
}
return tfrt::GetReadyChain();
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, const char* op_name,
const char* device_name, llvm::ArrayRef<Tensor*> arguments,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto eager_ctx_expected = GetEagerContext(exec_ctx);
if (!eager_ctx_expected) {
auto error =
EmitErrorAsync(exec_ctx, toString(eager_ctx_expected.takeError()));
std::fill(results.begin(), results.end(), error);
return std::move(error);
}
EagerContext* eager_ctx = eager_ctx_expected.get();
return RuntimeFallbackExecute(exec_ctx, eager_ctx, op_name, device_name,
arguments, attrs, results);
}
static void RuntimeFallbackKernel(
Argument<Chain> in_chain, RemainingArguments input_tensors,
Result<Chain> out_chain, RemainingResults output_tensors,
StringAttribute op_name, RemainingAttributes remaining_attributes,
KernelErrorHandler handler, const ExecutionContext& exec_ctx) {
HostContext* host = exec_ctx.host();
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
EagerContextResource* eager_context_resource =
resource_context->GetOrCreateResource<EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
tfrt::Expected<EagerContext*> eager_ctx_expected =
eager_context_resource->GetTFEagerContext();
if (!eager_ctx_expected) {
handler.ReportError("eager_ctx_expected.takeError()");
return;
}
EagerContext* eager_ctx = eager_ctx_expected.get();
std::string op_name_str = [&] {
auto view = op_name.get();
view.consume_front("tf.");
return view.str();
}();
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->Reset(op_name_str.c_str(), nullptr));
for (AsyncValue* input_tensor_av : input_tensors.values()) {
auto input_tensor_handle =
input_tensor_av->get<RuntimeFallbackTensor>().GetTensorHandle();
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
eager_op->AddInput(input_tensor_handle));
}
assert(remaining_attributes.size() % 2 == 0);
int num_tf_attrs = remaining_attributes.size() / 2;
for (int i = 0; i < num_tf_attrs; ++i) {
std::string attr_name =
remaining_attributes.GetStringAttribute(i * 2).str();
absl::string_view attr_value = ToAbslStringView(
remaining_attributes.GetStringAttribute(i * 2 + 1).get());
std::vector<absl::string_view> value_split =
tfd::AttrValueSplit(attr_value);
if (value_split[0] == "string") {
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrString(attr_name.c_str(), value_split[1].data(),
value_split[1].size()));
} else if (value_split[0] == "bool") {
bool bool_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseBoolAttrValue(value_split[1], &bool_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrBool(attr_name.c_str(), bool_val));
} else if (value_split[0] == "int") {
int64_t int_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseIntAttrValue(value_split[1], &int_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrInt(attr_name.c_str(), int_val));
} else if (value_split[0] == "tftensor") {
tensorflow::Tensor t;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTensorAttrValue(value_split[1], &t));
tensorflow::TensorInterface interface(t);
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrTensor(attr_name.c_str(), &interface));
} else if (value_split[0] == "tfdtype") {
DataType dtype;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTfDataType(value_split[1], &dtype));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrType(attr_name.c_str(), dtype));
} else if (value_split[0] == "tfshape") {
std::vector<int64_t> dims;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseTensorShapeAttrValue(value_split[1], &dims));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrShape(attr_name.c_str(), dims.data(), dims.size()));
} else {
handler.ReportError("attribute type not yet supported");
return;
}
}
int num_retvals = output_tensors.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> retvals(num_retvals);
tensorflow::Status status = eager_op->Execute(
absl::MakeSpan(retvals.data(), num_retvals), &num_retvals);
TFD_REPORT_AND_RETURN_IF_ERROR(handler, status);
if (num_retvals != output_tensors.size()) {
handler.ReportError("Incorrect number of output values");
return;
}
for (int i = 0; i < num_retvals; ++i) {
OwnedTensorHandle owned_th{TensorHandleFromInterface(retvals[i])};
if (!owned_th) handler.ReportError("TensorHandleFromInterface failed");
auto fallback_tensor = CreateRuntimeFallbackTensorFromTfTensorHandle(
std::move(owned_th), host);
if (!fallback_tensor) {
output_tensors[i] = tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat(fallback_tensor.takeError()));
} else {
output_tensors[i] =
tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
std::move(*fallback_tensor));
}
}
out_chain.Set(in_chain);
}
static void EmitErrorAndSetInResults(
const tfrt::ExecutionContext& exec_ctx,
const tfrt::DecodedDiagnostic& error,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>> results) {
auto error_av = tfrt::EmitErrorAsync(exec_ctx, error.status);
std::fill(results.begin(), results.end(), error_av);
}
void CoreRTTensorHandleToFallbackTensorInternal(
llvm::ArrayRef<tfrt::AsyncValue*> tensorhandle_args,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>>
tf_tensor_results,
tfrt::string_view device, const tfrt::ExecutionContext& exec_ctx) {
assert(tensorhandle_args.size() == tf_tensor_results.size());
auto set_result = [&](tfrt::RCReference<tfrt::AsyncValue>& result,
llvm::Expected<tensorflow::Tensor> tf_tensor) {
auto result_ref = tfrt::MakeUnconstructedAsyncValueRef<
tensorflow::tfrt_stub::FallbackTensor>();
if (!tf_tensor) {
result_ref.SetError(tfrt::StrCat(tf_tensor.takeError()));
} else {
result_ref.emplace(std::move(tf_tensor.get()));
}
result = std::move(result_ref);
};
auto maybe_convert_runtime_fallback_tensor =
[&exec_ctx](
tfrt::AsyncValueRef<Tensor> tensor_avref,
const tfrt::Device& src_device,
const tfrt::Device& dst_device) -> tfrt::AsyncValueRef<tfrt::Tensor> {
assert(tensor_avref.IsAvailable());
assert(!tensor_avref.IsError());
auto& tensor = tensor_avref.get();
if (!tensor.IsTensorType(DenseHostTensor::kTensorType) ||
!src_device.IsDeviceType(tfrt::CpuDevice::kDeviceType) ||
!dst_device.IsDeviceType(tfrt::CpuDevice::kDeviceType)) {
return tfrt::ConvertTensor(exec_ctx, tensor,
src_device,
dst_device,
KernelFallbackTensor::kTensorType);
}
return tensor_avref;
};
auto dst_device = exec_ctx.host()->GetDeviceRef(device);
for (int i = 0; i < tensorhandle_args.size(); ++i) {
if (!dst_device) {
tf_tensor_results[i] = tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat("Failed to find device with name ", device));
continue;
}
auto& tensor_handle = tensorhandle_args[i]->get<tfrt::TensorHandle>();
assert(tensor_handle.IsDeviceAvailable());
assert(!tensor_handle.IsDeviceError());
auto* tensor_av = tensor_handle.GetAsyncTensor();
auto tensor_avref = tfrt::AsyncValueRef<Tensor>(FormRef(tensor_av));
auto& src_device = *tensor_handle.GetAvailableDevice();
AsyncValueRef<Tensor> knfb_tensor;
if (!tensor_av->IsAvailable()) {
auto ind_av = tfrt::MakeIndirectAsyncValue();
knfb_tensor = AsyncValueRef<Tensor>(ind_av.CopyRef());
tensor_av->AndThen(
[tensor_avref = std::move(tensor_avref), ind_av = std::move(ind_av),
&src_device, dst_device = dst_device.CopyRef(),
maybe_convert_runtime_fallback_tensor, exec_ctx]() mutable {
ind_av->ForwardTo(maybe_convert_runtime_fallback_tensor(
std::move(tensor_avref), src_device, *dst_device));
});
} else {
knfb_tensor = maybe_convert_runtime_fallback_tensor(
std::move(tensor_avref), src_device, *dst_device);
}
if (!knfb_tensor.IsAvailable()) {
auto result_ref = tfrt::MakeIndirectAsyncValue();
tf_tensor_results[i] = result_ref;
auto knfb_tensor_av = knfb_tensor.GetAsyncValue();
knfb_tensor_av->AndThen([knfb_tensor = std::move(knfb_tensor),
result_ref = std::move(result_ref),
dst_device = dst_device.CopyRef(),
exec_ctx]() mutable {
if (knfb_tensor.IsError()) {
result_ref->ForwardTo(std::move(knfb_tensor));
return;
}
auto expected_tf_tensor = tfrt::TFRTTensorToTFTensor(knfb_tensor.get());
if (!expected_tf_tensor) {
auto error = tfrt::EmitErrorAsync(
exec_ctx, toString(expected_tf_tensor.takeError()));
result_ref->ForwardTo(std::move(error));
} else {
auto tf_tensor_ref = tfrt::MakeAvailableAsyncValueRef<
tensorflow::tfrt_stub::FallbackTensor>(
std::move(expected_tf_tensor.get()));
result_ref->ForwardTo(std::move(tf_tensor_ref));
}
});
} else {
set_result(tf_tensor_results[i],
tfrt::TFRTTensorToTFTensor(knfb_tensor.get()));
}
}
}
void CoreRTTensorHandleToFallbackTensor(
RemainingArguments args, RemainingResults results, StringAttr device,
const tfrt::ExecutionContext& exec_ctx) {
tsl::profiler::TraceMe trace_me("corert_tensorhandle_to_fallback_tensor");
trace_me.AppendMetadata([request_id = exec_ctx.request_ctx()->id()]() {
return tsl::profiler::TraceMeEncode({{"id", request_id}});
});
CoreRTTensorHandleToFallbackTensorInternal(args.values(), results.values(),
device.GetValue(), exec_ctx);
}
static void FallbackTensorToCoreRTTensorHandleInternal(
llvm::ArrayRef<tfrt::AsyncValue*> tf_tensor_args,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>>
tensorhandle_results,
tfrt::string_view device, const tfrt::ExecutionContext& exec_ctx) {
auto* host = exec_ctx.host();
assert(tf_tensor_args.size() == tensorhandle_results.size());
for (int i = 0; i < tf_tensor_args.size(); ++i) {
auto* av = tf_tensor_args[i];
auto& tf_tensor = av->get<tensorflow::tfrt_stub::FallbackTensor>().tensor();
AsyncValueRef<tfrt::Tensor> kernel_fallback_tensor =
tfrt::MakeAvailableAsyncValueRef<KernelFallbackTensor>(tf_tensor);
auto metadata = kernel_fallback_tensor.get().metadata();
tensorhandle_results[i] =
tfrt::MakeAvailableAsyncValueRef<tfrt::TensorHandle>(
host->GetDeviceRef(device), metadata,
std::move(kernel_fallback_tensor));
}
}
void FallbackTensorToCoreRTTensorHandle(
RemainingArguments args, RemainingResults results, StringAttr device,
const tfrt::ExecutionContext& exec_ctx) {
tsl::profiler::TraceMe trace_me("fallback_tensor_to_corert_tensorhandle");
trace_me.AppendMetadata([request_id = exec_ctx.request_ctx()->id()]() {
return tsl::profiler::TraceMeEncode({{"id", request_id}});
});
FallbackTensorToCoreRTTensorHandleInternal(args.values(), results.values(),
device.GetValue(), exec_ctx);
}
tfrt::Chain PrintFallbackTensor(
const tensorflow::tfrt_stub::FallbackTensor& arg, const tfrt::Chain& ch) {
std::string message;
llvm::raw_string_ostream(message) << arg.tensor().DebugString() << "\n";
printf("%s", message.c_str());
fflush(stdout);
return tfrt::Chain();
}
static void RuntimeFallbackExecuteOp(
RemainingArguments args, RemainingResults results, StringAttr device_attr,
AggregateAttr op_attr_array, AggregateAttr op_func_attr_array,
StringAttr op_name_attr, tfrt::AsyncValueRef<tfrt::Chain>* op_chain,
const ExecutionContext& exec_ctx) {
auto set_error = [&exec_ctx, results](tfrt::string_view msg) {
auto error_av = EmitErrorAsync(exec_ctx, absl::InternalError(msg));
for (int i = 0, n = results.size(); i < n; ++i) results[i] = error_av;
};
auto op_name = op_name_attr.GetValue();
op_name.consume_front("tf.");
std::string device_name = device_attr.GetValue().str();
if (!absl::StartsWith(device_name, "/")) device_name = kDefaultCpuDevice;
tfrt::OpAttrs op_attrs;
tfrt::SetUpOpAttrs(op_attr_array, &op_attrs);
tfrt::SetUpOpFuncAttrs(op_func_attr_array, &op_attrs);
auto eager_ctx_expected = GetEagerContext(exec_ctx);
if (!eager_ctx_expected) {
set_error(tfrt::StrCat(eager_ctx_expected.takeError()));
return;
}
EagerContext* eager_ctx = eager_ctx_expected.get();
Device* device = nullptr;
Status s = eager_ctx->local_device_mgr()->LookupDevice(device_name, &device);
if (!s.ok()) {
VLOG(1) << s.message() << " using default CPU device.";
}
llvm::SmallVector<RuntimeFallbackTensor, 4> tfrt_tensor_args;
tfrt_tensor_args.reserve(args.size());
for (int i = 0; i < args.size(); ++i) {
auto* av = args[i];
auto tf_tensor = av->get<tensorflow::Tensor>();
tfrt::TensorMetadata md = tfd::GetTensorMetadata(tf_tensor);
OwnedTensorHandle tensor_handle{tensorflow::TensorHandle::CreateLocalHandle(
std::move(tf_tensor), device, device, eager_ctx)};
tfrt_tensor_args.push_back(
RuntimeFallbackTensor(md.shape, md.dtype, std::move(tensor_handle)));
}
llvm::SmallVector<tfrt::Tensor*, 4> tfrt_tensor_arg_ptrs;
tfrt_tensor_arg_ptrs.reserve(args.size());
for (auto& tensor : tfrt_tensor_args) tfrt_tensor_arg_ptrs.push_back(&tensor);
llvm::SmallVector<RCReference<tfrt::AsyncValue>, 4> tfrt_tensor_results;
tfrt_tensor_results.resize(results.size());
auto chain = RuntimeFallbackExecute(
exec_ctx, op_name.str().c_str(), device_name.c_str(),
tfrt_tensor_arg_ptrs, tfrt::OpAttrsRef(op_attrs), tfrt_tensor_results);
if (op_chain) *op_chain = chain.CopyRef();
DCHECK(chain.IsAvailable());
if (chain.IsError()) {
EmitErrorAndSetInResults(
exec_ctx, tfrt::DecodedDiagnostic(chain.GetError()), results.values());
return;
}
for (int i = 0; i < results.size(); ++i) {
auto& runtime_fallback_tensor =
tfrt_tensor_results[i]->get<RuntimeFallbackTensor>();
const tensorflow::Tensor* tf_tensor = nullptr;
tensorflow::Status s =
runtime_fallback_tensor.GetTensorHandle()->Tensor(&tf_tensor);
DCHECK(s.ok()) << s;
results[i] =
tfrt::MakeAvailableAsyncValueRef<tensorflow::Tensor>(*tf_tensor);
}
}
Chain AddRuntimeFallbackImplicitConversionKernel(
Argument<tfrt::OpHandler*> op_handler, const ExecutionContext& exec_ctx) {
assert(op_handler.get()->GetName() == tfrt::CpuOpHandler::kName);
tfrt::CpuOpHandler* cpu_op_handler =
reinterpret_cast<tfrt::CpuOpHandler*>(op_handler.get());
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
DenseHostTensor::kTensorType);
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
tfrt::AnyScalarHostTensor::kTensorType);
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
tfrt::StringHostTensor::kTensorType);
return {};
}
void CreateRuntimeFallbackOpHandlerKernel(Result<tfrt::OpHandler*> op_handler,
StringAttribute tf_device_name,
const ExecutionContext& exec_ctx) {
auto* runtime = tfrt::CoreRuntime::GetFromHostContext(exec_ctx.host());
assert(runtime);
auto op_handler_ptr =
CreateRuntimeFallbackOpHandler(runtime, tf_device_name.get());
assert(op_handler_ptr);
op_handler.Emplace(op_handler_ptr.get());
}
static OwnedTensorHandle ConvertTFRTTensorToTFTensorHandle(
tfrt::Tensor* tensor) {
if (auto* dht = llvm::dyn_cast<tfrt::DenseHostTensor>(tensor)) {
tensorflow::Tensor tensor =
MoveHostBufferToTfTensor(dht->buffer(), dht->dtype(), dht->shape());
return OwnedTensorHandle{
tensorflow::TensorHandle::CreateLocalHandle(tensor)};
}
if (auto* sht = llvm::dyn_cast<tfrt::StringHostTensor>(tensor)) {
tensorflow::Tensor tensor = CopyShtToTfTensor(*sht);
return OwnedTensorHandle{
tensorflow::TensorHandle::CreateLocalHandle(tensor)};
}
llvm_unreachable("unsupported tensor type");
}
static llvm::Expected<tfrt::Value> ConvertTFTensorHandleToTFRTTensor(
OwnedTensorHandle tensor_handle, HostContext* host) {
tensorflow::Status status;
OwnedAbstractTensorInterface tensor_interface{
tensor_handle->Resolve(&status)};
if (!status.ok()) {
return tfrt::MakeStringError("error resolving TensorHandle: ",
status.message());
}
auto tf_dtype = tensor_interface->Type();
if (tf_dtype == DT_STRING) {
auto string_host_tensor =
CopyTfStringTensorToStringHostTensor(tensor_interface.get(), host);
if (!string_host_tensor)
return tfrt::MakeStringError(
"error converting TF string tensor to tfrt::StringHostTensor: ",
string_host_tensor.takeError());
return tfrt::Value(std::move(*string_host_tensor));
}
tfrt::TensorMetadata metadata(GetTfrtDtype(tf_dtype),
GetShape(tensor_interface.get()));
CheckBoolCompatibility();
void* data = tensor_interface->Data();
size_t size = tensor_interface->ByteSize();
auto host_buffer = HostBuffer::CreateFromExternal(
data, size,
[tensor_interface = std::move(tensor_interface)](void*, size_t) {});
tfrt::Value value;
value.emplace<DenseHostTensor>(metadata, std::move(host_buffer));
return std::move(value);
}
void RegisterTfdDelegateKernels(tfrt::KernelRegistry* registry) {
registry->AddKernel("tfd.init_eager_context",
TFRT_KERNEL(TfdInitEagerContext));
registry->AddKernel("tfd.delegate_kernel",
TFRT_KERNEL(RuntimeFallbackKernel));
registry->AddKernel("tfd.move_dht_to_tft", TFRT_KERNEL(TfdMoveDHTToTFT));
registry->AddKernel("tfd.convert_tft_to_dht",
TFRT_KERNEL(TfdConvertTFTToDHT));
registry->AddKernel("tfd.print_tft", TFRT_KERNEL(TfdPrintTFT));
registry->AddKernel(
"tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor",
TFRT_KERNEL(CoreRTTensorHandleToFallbackTensor));
registry->AddKernel(
"tfrt_fallback_async.fallback_tensor_to_corert_tensorhandle",
TFRT_KERNEL(FallbackTensorToCoreRTTensorHandle));
registry->AddKernel("tfrt_fallback_async.print_tensor",
TFRT_KERNEL(PrintFallbackTensor));
registry->AddKernel("corert.create_runtime_fallback_op_handler",
TFRT_KERNEL(CreateRuntimeFallbackOpHandlerKernel));
registry->AddKernel("corert.add_runtime_fallback_implicit_conversions",
TFRT_KERNEL(AddRuntimeFallbackImplicitConversionKernel));
}
}
} | #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/runtime_fallback/test/coreruntime_driver.h"
#include "tfrt/core_runtime/op_attrs.h"
namespace tfrt {
namespace {
TEST(RuntimeFallbackKernelsTest, CallEagerExecute) {
auto driver = CoreRuntimeDriver();
driver.InitializeCpuRuntimeFallbackOpHandler();
auto exec_ctx = driver.CreateExecutionContext(__FILE__, __LINE__);
tensorflow::Tensor input(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&input, {1, 1, 1, 1});
tensorflow::TensorHandle* input_th =
tensorflow::TensorHandle::CreateLocalHandle(input);
tfrt::OpAttrs matmul_attrs;
matmul_attrs.Set<bool>("transpose_a", false);
matmul_attrs.Set<bool>("transpose_b", false);
tfrt::OpAttrsRef matmul_attrs_ref = matmul_attrs.freeze();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 1> results;
results.resize(1);
auto eager_ctx_expected = tensorflow::tfd::GetEagerContext(exec_ctx);
ASSERT_FALSE(!eager_ctx_expected);
tensorflow::EagerContext* eager_ctx = eager_ctx_expected.get();
TF_EXPECT_OK(tensorflow::tfd::CallEagerExecute(
exec_ctx, eager_ctx, "MatMul", "", {input_th, input_th},
matmul_attrs_ref, results));
ASSERT_EQ(results.size(), 1);
tensorflow::TensorHandle* res_th =
tensorflow::TensorHandleFromInterface(results[0]);
const tensorflow::Tensor* res_tensor;
TF_EXPECT_OK(res_th->Tensor(&res_tensor));
EXPECT_EQ(res_th->DataType(), tensorflow::DT_FLOAT);
int64_t num_elements;
TF_EXPECT_OK(res_th->NumElements(&num_elements));
EXPECT_EQ(num_elements, 4);
tensorflow::Tensor expected(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&expected, {2, 2, 2, 2});
tensorflow::test::ExpectTensorEqual<float>(*res_tensor, expected);
input_th->Unref();
res_th->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/test/runtime_fallback_kernels_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cff052a-121d-4eb7-af05-2568e0a596f9 | cpp | tensorflow/tensorflow | tfrt_op_kernel | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.cc | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel_test.cc | #include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "llvm/Support/raw_ostream.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/kernel_frame.h"
namespace tensorflow {
TFRTOpKernelConstruction::TFRTOpKernelConstruction(
const tfrt::OpAttrsRef& attributes)
: attributes_(std::move(attributes)) {}
Status MissingAttributeError(StringPiece attr_name) {
return errors::InvalidArgument("Missing attribute: ", attr_name);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::string* value) const {
tfrt::string_view view;
bool success = attributes_.GetString(
llvm::StringRef(attr_name.data(), attr_name.size()), &view);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = view.str();
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
DataType* value) const {
tfrt::OpAttrType attrtype;
bool success = attributes_.Get<tfrt::OpAttrType>(
llvm::StringRef(attr_name.data(), attr_name.size()), &attrtype);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = tfd::ConvertToTfDataType(attrtype);
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
Padding* value) const {
std::string padding_str;
TF_RETURN_IF_ERROR(GetAttr<std::string>(attr_name, &padding_str));
return GetPaddingFromString(padding_str, value);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::vector<int32>* value) const {
llvm::ArrayRef<int32> arrayref;
bool success = attributes_.GetArray<int32>(
llvm::StringRef(attr_name.data(), attr_name.size()), &arrayref);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = arrayref;
return absl::OkStatus();
}
void TFRTOpKernelConstruction::CtxFailure(const Status& s) {
error_ = tfrt::MakeStatusString(s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
namespace {
std::string FillFailureMessage(const char* file, int line, const Status& s) {
std::string error;
llvm::raw_string_ostream sstr(error);
sstr << "OP_REQUIRES failed at " << file << ":" << line << " : "
<< tfrt::MakeStatusString(s);
sstr.str();
return error;
}
}
void TFRTOpKernelConstruction::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
const std::optional<std::string>& TFRTOpKernelConstruction::error() {
return error_;
}
TFRTOpKernelContext::TFRTOpKernelContext(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs, int num_outputs,
const TFRTOpMeta* op_meta, tfrt::HostContext* host)
: inputs_(inputs),
op_meta_(op_meta),
outputs_(num_outputs),
eigen_host_context_(host) {}
const Tensor& TFRTOpKernelContext::output(int index) { return outputs_[index]; }
const std::optional<std::string>& TFRTOpKernelContext::error() {
return error_;
}
bool TFRTOpKernelContext::ValidateInputsAreSameShape(TFRTOpKernel* op) {
return true;
}
const Tensor& TFRTOpKernelContext::input(int index) {
return inputs_[index]->get<Tensor>();
}
int TFRTOpKernelContext::num_inputs() const { return inputs_.size(); }
int TFRTOpKernelContext::num_outputs() const { return outputs_.size(); }
void TFRTOpKernelContext::set_output(int index, const Tensor& tensor) {
outputs_[index] = tensor;
}
Status TFRTOpKernelContext::allocate_temp(DataType type,
const TensorShape& shape,
Tensor* out_temp) {
*out_temp = Tensor(type, shape);
return absl::OkStatus();
}
Status TFRTOpKernelContext::allocate_output(int index, const TensorShape& shape,
Tensor** tensor) {
DataType output_type = op_meta_->output_type(index);
outputs_[index] = Tensor(output_type, shape);
*tensor = &outputs_[index];
return absl::OkStatus();
}
DataType TFRTOpKernelContext::expected_output_dtype(int i) const {
return op_meta_->output_type(i);
}
void TFRTOpKernelContext::CtxFailure(const Status& s) { error_ = s.message(); }
void TFRTOpKernelContext::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
void TFRTOpKernelContext::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelContext::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
template <>
const Eigen::ThreadPoolDevice& TFRTOpKernelContext::eigen_device() const {
return eigen_host_context_.Device();
}
TFRTOpMeta::TFRTOpMeta(std::vector<DataType> output_types)
: output_types_(std::move(output_types)) {}
DataType TFRTOpMeta::output_type(int index) const {
return output_types_[index];
}
TFRTOpMetaBuilder::TFRTOpMetaBuilder(StringPiece op_name) : op_name_(op_name) {}
namespace {
DataType ParseInputOutputSpec(StringPiece spec) {
std::vector<absl::string_view> name_type =
absl::StrSplit(spec, absl::MaxSplits(':', 2));
DataType data_type;
bool success =
DataTypeFromString(absl::StripAsciiWhitespace(name_type[1]), &data_type);
assert(success && "Failed to parse DataType");
(void)success;
return data_type;
}
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Output(StringPiece output_spec) {
output_types_.push_back(ParseInputOutputSpec(output_spec));
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Input(StringPiece input_spec) {
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Attr(StringPiece attr_spec) {
return *this;
}
const string& TFRTOpMetaBuilder::op_name() const { return op_name_; }
TFRTOpMeta TFRTOpMetaBuilder::BuildMeta() const {
return TFRTOpMeta(output_types_);
}
TFRTOpMetaMap::TFRTOpMetaMap() = default;
void TFRTOpMetaMap::RegisterOpMeta(const TFRTOpMetaBuilder& op_builder) {
auto insert_result = op_metas_.insert(
std::make_pair(op_builder.op_name(), op_builder.BuildMeta()));
assert(insert_result.second && "Multiple registrations for the same op_name");
(void)insert_result;
}
const TFRTOpMeta* TFRTOpMetaMap::GetOpMeta(StringPiece op_name) const {
auto it = op_metas_.find(llvm::StringRef(op_name.data(), op_name.size()));
if (it == op_metas_.end()) return nullptr;
return &it->second;
}
TFRTOpRegisterer::TFRTOpRegisterer(const TFRTOpMetaBuilder& op_builder) {
tfrt_forwarding_op_meta_map->RegisterOpMeta(op_builder);
}
llvm::ManagedStatic<TFRTOpMetaMap> tfrt_forwarding_op_meta_map;
llvm::ManagedStatic<TFRTOpKernelFactories> tfrt_forwarding_kernel_factories;
TFRTOpKernelFactories::TFRTOpKernelFactories() = default;
void TFRTOpKernelFactories::RegisterFactory(StringPiece kernel_class_name,
TFRTOpKernelReg kernel_info) {
factories_[std::string(kernel_class_name)].push_back(kernel_info);
}
Status ValidKernelAttr(StringPiece kernel_class_name,
TFRTOpKernelConstruction* construction,
const llvm::StringMap<DataType>& constraints) {
for (const auto& constraint : constraints) {
auto attr_name = std::string(constraint.first());
DataType type;
Status s = construction->GetAttr(attr_name, &type);
if (!s.ok()) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name,
" has constraint for unset tfdtype attribute ", attr_name, ".");
}
if (type != constraint.second) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name, " with type constraint ", attr_name,
": ", DataTypeString(constraint.second),
" does not match attribute type ", DataTypeString(type), ".");
}
}
return absl::OkStatus();
}
std::unique_ptr<TFRTOpKernel> TFRTOpKernelFactories::CreateKernel(
StringPiece kernel_class_name,
TFRTOpKernelConstruction* op_kernel_construction) const {
auto it = factories_.find(std::string(kernel_class_name));
if (it == factories_.end()) {
op_kernel_construction->CtxFailure(errors::NotFound(
"Could not find kernel ", kernel_class_name, " in the registry."));
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
Status status;
for (const auto& kernel_info : it->second) {
Status s = ValidKernelAttr(kernel_class_name, op_kernel_construction,
kernel_info.type_constraints);
if (s.ok()) {
return kernel_info.callback(op_kernel_construction);
}
status.Update(s);
}
op_kernel_construction->CtxFailure(status);
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
} | #include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/error_codes.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/padding.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace {
std::unique_ptr<tfrt::HostContext> CreateTestHostContext(int num_threads) {
return std::make_unique<tfrt::HostContext>(
[](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(),
tfrt::CreateSingleThreadedWorkQueue());
}
TEST(TFRTOpKernelTest, TestGetBoolAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
attrs.Set<bool>("bar", false);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_TRUE(value);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_FALSE(value);
}
TEST(TFRTOpKernelTest, TestGetIntAttr) {
tfrt::OpAttrs attrs;
attrs.Set<int32>("foo", -2);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
int32_t value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, -2);
}
TEST(TFRTOpKernelTest, TestGetIntListAttr) {
tfrt::OpAttrs attrs;
attrs.SetArray<int32>("foo", {});
attrs.SetArray<int32>("bar", {1});
attrs.SetArray<int32>("baz", {1, 2, 3});
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::vector<int32> v1, v2, v3;
std::vector<int32> expected_v1;
std::vector<int32> expected_v2 = {1};
std::vector<int32> expected_v3 = {1, 2, 3};
TF_ASSERT_OK(ctx.GetAttr("foo", &v1));
ASSERT_EQ(v1, expected_v1);
TF_ASSERT_OK(ctx.GetAttr("bar", &v2));
ASSERT_EQ(v2, expected_v2);
TF_ASSERT_OK(ctx.GetAttr("baz", &v3));
ASSERT_EQ(v3, expected_v3);
}
TEST(TFRTOpKernelTest, TestGetStrAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "");
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::string value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, "");
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, "test");
}
TEST(TFRTOpKernelTest, TestGetPaddingAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "VALID");
attrs.SetString("bar", "SAME");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
Padding value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, Padding::VALID);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, Padding::SAME);
}
TEST(TFRTOpKernelTest, TestMissingAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
auto status = ctx.GetAttr("bar", &value);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
class TestKernel : public TFRTOpKernel {
public:
explicit TestKernel(TFRTOpKernelConstruction* construction)
: TFRTOpKernel(construction) {}
void Compute(TFRTOpKernelContext* context) override {}
};
TEST(TFRTOpKernelTest, TestKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::F32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelFloatInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelFloatInt",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestSecondKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg1([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
TFRTOpKernelReg reg2([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg1.type_constraints["foo"] = DT_FLOAT;
reg2.type_constraints["foo"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg1);
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg2);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernel2ndConstraint",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestKernelDoesNotMatchTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelIntInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelIntInt", &ctx);
ASSERT_EQ(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestAllocateTemp) {
auto host_context = CreateTestHostContext(1);
int num_outputs = 1;
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs;
TFRTOpMeta op_meta({DT_INT32});
TFRTOpKernelContext ctx(inputs, num_outputs, &op_meta, host_context.get());
Tensor out;
ASSERT_EQ(out.AllocatedBytes(), 0);
TF_EXPECT_OK(ctx.allocate_temp(DT_INT32, {}, &out));
ASSERT_GT(out.AllocatedBytes(), 0);
out.scalar<int32>()() = 123;
ASSERT_EQ(out.dtype(), DT_INT32);
ASSERT_EQ(out.shape().dims(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4506e907-c408-4e9e-a9ab-f52b10e03bdf | cpp | tensorflow/tensorflow | kernel_fallback_compat_request_state | tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.cc | tensorflow/core/runtime_fallback/test/kernel_fallback_compat_request_state_test.cc | #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <cstdlib>
#include <cstring>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/graph_executor/config.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/pointer_util.h"
namespace tensorflow {
namespace tfd {
using ::tensorflow::tfrt_stub::OpKernelRunnerTable;
void FallbackResourceArray::SetResource(
int index, tensorflow::tfrt_stub::ImmutableTensor tensor) {
if (resource_async_values_.size() <= index) {
resource_storage_.resize(index + 1);
resource_async_values_.resize(index + 1);
}
DCHECK(resource_storage_[index].get() == nullptr);
DCHECK(resource_async_values_[index].AsPtr().value() == nullptr);
resources_.push_back(std::make_unique<tensorflow::tfrt_stub::ImmutableTensor>(
std::move(tensor)));
resource_storage_[index] = std::make_unique<
tfrt::internal::AsyncValueStorage<tfrt_stub::FallbackTensor>>();
resource_async_values_[index] =
tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
*resource_storage_[index], resources_.back().get());
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer> step_container,
std::unique_ptr<CollectiveExecutor::Handle> collective_executor_handle,
core::RefCountPtr<Rendezvous> rendezvous, OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: step_id_(step_id),
runner_(runner),
step_container_(std::move(step_container)),
collective_executor_handle_(std::move(collective_executor_handle)),
collective_executor_(collective_executor_handle_
? collective_executor_handle_->get()
: nullptr),
rendezvous_(std::move(rendezvous)),
device_manager_(device_manager),
runner_table_(runner_table),
resource_array_(resource_array),
intra_op_threadpool_(user_intra_op_threadpool),
pflr_(pflr) {
DCHECK(runner_);
DCHECK(device_manager_);
DCHECK(runner_table_);
DCHECK(resource_array_);
DCHECK(rendezvous_);
DCHECK(pflr_);
cpu_device_ = device_manager_->HostCPU();
cpu_function_library_runtime_ = pflr_->GetFLR(cpu_device_->name());
if (user_intra_op_threadpool != nullptr) {
custom_cpu_device_ = tensorflow::RenamedDevice::NewRenamedDevice(
cpu_device_->name(), cpu_device_, false,
false, user_intra_op_threadpool);
cpu_device_ = custom_cpu_device_.get();
for (auto* device : device_manager_->ListDevices()) {
custom_device_[device] = tensorflow::RenamedDevice::NewRenamedDevice(
device->name(), device, false,
false, user_intra_op_threadpool);
}
}
if (model_metadata.has_value()) {
session_metadata_ = *model_metadata;
}
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
OpKernelRunnerTable* runner_table, FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: KernelFallbackCompatRequestState(
runner, device_manager, step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer>{
std::make_unique<ScopedStepContainer>(
step_id,
[step_id, device_manager](const std::string& name) {
for (tensorflow::Device* device :
device_manager->ListDevices()) {
auto status = device->resource_manager()->Cleanup(name);
(void)status;
tensorflow::ScopedAllocatorMgr* sam =
device->GetScopedAllocatorMgr();
if (sam) sam->Cleanup(step_id);
}
})},
nullptr,
core::RefCountPtr<RefCountedIntraProcessRendezvous>(
new RefCountedIntraProcessRendezvous(device_manager)),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr) {}
static std::function<void(std::function<void()>)>* GetDefaultRunner() {
static auto* const default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
Status SetUpKernelFallbackCompatRequestContext(
tfrt::RequestContextBuilder* builder,
const tensorflow::DeviceMgr* device_manager,
const tensorflow::ProcessFunctionLibraryRuntime* pflr,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
std::function<void(std::function<void()>)>* runner,
tfrt_stub::CostRecorder* cost_recorder,
tfrt::ResourceContext* client_graph_resource_context,
tensorflow::CancellationManager* cancellation_manager,
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config) {
DCHECK(builder);
DCHECK(device_manager);
DCHECK(pflr);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
builder->context_data().emplace<KernelFallbackCompatRequestState>(
runner ? runner : GetDefaultRunner(), device_manager, builder->id(),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_cancellation_manager(cancellation_manager);
fallback_request_state.set_runtime_config(runtime_config);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
namespace tfd {
namespace {
TEST(FallbackResourceArrayTest, SetAndGetResourceOk) {
Tensor tensor_1 =
test::AsTensor<float>({0.0, 1.0, 2.0, 3.0}, TensorShape({1, 4}));
tfrt_stub::ImmutableTensor imm_tensor_1 =
tfrt_stub::ImmutableTensor::Create(tensor_1);
tensorflow::Tensor tensor_2 =
test::AsTensor<float>({5.0, 6.0, 7.0}, tensorflow::TensorShape({1, 3}));
tfrt_stub::ImmutableTensor imm_tensor_2 =
tfrt_stub::ImmutableTensor::Create(tensor_2);
FallbackResourceArray resource_array;
resource_array.SetResource(0, imm_tensor_1);
resource_array.SetResource(1, imm_tensor_2);
test::ExpectTensorEqual<float>(resource_array.GetResource(0)->tensor(),
tensor_1);
test::ExpectTensorEqual<float>(resource_array.GetResource(1)->tensor(),
tensor_2);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(0).tensor(), tensor_1);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(1).tensor(), tensor_2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/test/kernel_fallback_compat_request_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b607fa56-9dba-4632-8c78-0e876e514ba2 | cpp | tensorflow/tensorflow | random_ops | tensorflow/compiler/tf2xla/kernels/random_ops.cc | tensorflow/lite/kernels/random_ops_test.cc | #include <vector>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/lib/random.h"
#include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
class RandomUniformOp : public XlaOpKernel {
public:
explicit RandomUniformOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(
0, &shape, xla::ValueInferenceMode::kUpperBound));
const DataType dtype = output_type(0);
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.uniform with XLA compilation will ignore "
"seeds; consider using tf.random.stateless_uniform instead if "
"reproducible behavior is desired. "
<< name();
xla::XlaOp result = xla::RngUniform(XlaHelpers::Zero(b, dtype),
XlaHelpers::One(b, dtype), xla_shape);
auto result_status_or =
SetAllDimensionSizes(&ctx->value_inference(), result, ctx->Input(0));
OP_REQUIRES_OK(ctx, result_status_or.status());
result = result_status_or.value();
ctx->SetOutput(0, result);
}
private:
RandomUniformOp(const RandomUniformOp&) = delete;
void operator=(const RandomUniformOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomUniform").CompileTimeConstantInput("shape"),
RandomUniformOp);
REGISTER_XLA_OP(Name("RandomShuffle"), MlirXlaOpKernel);
class RandomUniformIntOp : public XlaOpKernel {
public:
explicit RandomUniformIntOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx,
TensorShapeToXLAShape(input_type(1), shape, &xla_shape));
const TensorShape minval_shape = ctx->InputShape(1);
const TensorShape maxval_shape = ctx->InputShape(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(minval_shape),
errors::InvalidArgument("minval must be 0-D, got shape ",
minval_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(maxval_shape),
errors::InvalidArgument("maxval must be 0-D, got shape ",
maxval_shape.DebugString()));
auto minval = ctx->Input(1);
auto maxval = ctx->Input(2);
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.uniform with XLA compilation will ignore "
"seeds; consider using tf.random.stateless_uniform instead if "
"reproducible behavior is desired. "
<< name();
ctx->SetOutput(0, xla::RngUniform(minval, maxval, xla_shape));
}
private:
RandomUniformIntOp(const RandomUniformIntOp&) = delete;
void operator=(const RandomUniformIntOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomUniformInt").CompileTimeConstantInput("shape"),
RandomUniformIntOp);
class RandomStandardNormalOp : public XlaOpKernel {
public:
explicit RandomStandardNormalOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(
0, &shape, xla::ValueInferenceMode::kUpperBound));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp result = xla::RngNormal(XlaHelpers::Zero(b, dtype),
XlaHelpers::One(b, dtype), xla_shape);
auto result_status_or =
SetAllDimensionSizes(&ctx->value_inference(), result, ctx->Input(0));
OP_REQUIRES_OK(ctx, result_status_or.status());
result = result_status_or.value();
ctx->SetOutput(0, result);
}
private:
RandomStandardNormalOp(const RandomStandardNormalOp&) = delete;
void operator=(const RandomStandardNormalOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomStandardNormal").CompileTimeConstantInput("shape"),
RandomStandardNormalOp);
class TruncatedNormalOp : public XlaOpKernel {
public:
explicit TruncatedNormalOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp one = xla::One(b, xla_shape.element_type());
xla::XlaOp min_positive =
xla::MinPositiveNormalValue(b, xla_shape.element_type());
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.truncated_normal with XLA "
"compilation will ignore seeds; consider using "
"tf.random.stateless_truncated_normal instead if "
"reproducible behavior is desired. "
<< name();
auto uniform = xla::RngUniform(min_positive, one, xla_shape);
ctx->SetOutput(0, TruncatedNormal(uniform));
}
};
REGISTER_XLA_OP(Name("TruncatedNormal")
.CompileTimeConstantInput("shape")
.TypeConstraint("dtype", {DT_FLOAT, DT_DOUBLE}),
TruncatedNormalOp);
static absl::StatusOr<xla::XlaOp> BroadcastParameters(
xla::XlaOp params, TensorShape& output_shape) {
int rank = output_shape.dims();
std::vector<int64_t> bcast_shape;
for (int i = 1; i < rank; ++i) {
bcast_shape.push_back(output_shape.dim_size(i));
}
bcast_shape.push_back(output_shape.dim_size(0));
TF_ASSIGN_OR_RETURN(xla::XlaOp bcast_params,
BroadcastTo(params, bcast_shape));
std::vector<int64_t> permutation;
permutation.push_back(rank - 1);
for (int i = 0; i < rank - 1; ++i) {
permutation.push_back(i);
}
return xla::Transpose(bcast_params, permutation);
}
class ParameterizedTruncatedNormalOp : public XlaOpKernel {
public:
explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
OP_REQUIRES(ctx, xla_shape.rank() >= 1,
errors::InvalidArgument(
"shape parameter must have rank >= 1, received (",
xla::ShapeUtil::HumanString(xla_shape), ")"));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp one = xla::One(b, xla_shape.element_type());
xla::XlaOp min_positive =
xla::MinPositiveNormalValue(b, xla_shape.element_type());
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.truncated_normal with XLA "
"compilation will ignore seeds; consider using "
"tf.random.stateless_truncated_normal instead if "
"reproducible behavior is desired. "
<< name();
xla::XlaOp uniform = xla::RngUniform(min_positive, one, xla_shape);
auto result = b->ReportErrorOrReturn([&]() -> absl::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::XlaOp means,
BroadcastParameters(ctx->Input(1), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp stddevs,
BroadcastParameters(ctx->Input(2), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp minvals,
BroadcastParameters(ctx->Input(3), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp maxvals,
BroadcastParameters(ctx->Input(4), shape));
return ParameterizedTruncatedNormal(uniform, means, stddevs, minvals,
maxvals);
});
ctx->SetOutput(0, result);
}
};
REGISTER_XLA_OP(Name("ParameterizedTruncatedNormal")
.CompileTimeConstantInput("shape")
.TypeConstraint("dtype", {DT_FLOAT, DT_DOUBLE}),
ParameterizedTruncatedNormalOp);
}
} | #include <algorithm>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
enum class InputType {
kConst = 0,
kDynamic = 1,
};
class RandomOpModel : public SingleOpModel {
public:
RandomOpModel(BuiltinOperator op_code, InputType input_type,
const std::initializer_list<int32_t>& shape,
int32_t seed = 0, int32_t seed2 = 0) {
bool is_input_const = (input_type == InputType::kConst);
if (is_input_const) {
input_ = AddConstInput(TensorType_INT32, shape,
{static_cast<int32_t>(shape.size())});
} else {
input_ =
AddInput({TensorType_INT32, {static_cast<int32_t>(shape.size())}});
}
output_ = AddOutput({TensorType_FLOAT32, {}});
SetBuiltinOp(op_code, BuiltinOptions_RandomOptions,
CreateRandomOptions(builder_, seed, seed2).Union());
BuildInterpreter({GetShape(input_)});
if (!is_input_const) {
PopulateTensor<int32_t>(input_, std::vector<int32_t>(shape));
}
}
int input() { return input_; }
int output() { return output_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
class MultinomialOpModel : public SingleOpModel {
public:
MultinomialOpModel(InputType input_type,
const std::initializer_list<float>& logits,
int num_batches, int num_classes, int num_samples,
int32_t seed = 0, int32_t seed2 = 0,
tflite::TensorType output_type = TensorType_INT64) {
bool is_input_const = (input_type == InputType::kConst);
auto logits_shape = {num_batches, num_classes};
if (is_input_const) {
logits_ = AddConstInput(TensorType_FLOAT32, logits, logits_shape);
} else {
logits_ = AddInput({TensorType_FLOAT32, logits_shape});
}
num_samples_ = AddConstInput(TensorType_INT32, {num_samples}, {});
output_ = AddOutput({output_type, {}});
SetBuiltinOp(BuiltinOperator_MULTINOMIAL, BuiltinOptions_RandomOptions,
CreateRandomOptions(builder_, seed, seed2).Union());
BuildInterpreter({GetShape(logits_), GetShape(num_samples_)});
if (!is_input_const) {
PopulateTensor<float>(logits_, std::vector<float>(logits));
}
}
int logits() { return logits_; }
int num_samples() { return num_samples_; }
int output() { return output_; }
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
std::vector<int32_t> GetInt32Output() {
return ExtractVector<int32_t>(output_);
}
private:
int logits_;
int num_samples_;
int output_;
};
class TestSuite : public testing::TestWithParam<std::tuple<
BuiltinOperator, InputType>> {
};
TEST_P(TestSuite, NonDeterministicOutputWithSeedsEqualToZero)
{
BuiltinOperator op_code = std::get<0>(GetParam());
InputType input_type = std::get<1>(GetParam());
RandomOpModel m1(op_code, input_type,
{100, 50, 5}, 0, 0);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), 100 * 50 * 5);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
RandomOpModel m2(op_code, input_type,
{100, 50, 5}, 0, 0);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), 100 * 50 * 5);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_NE(output1a, output2a);
EXPECT_NE(output1b, output2b);
}
TEST_P(TestSuite, DeterministicOutputWithNonZeroSeeds) {
BuiltinOperator op_code = std::get<0>(GetParam());
InputType input_type = std::get<1>(GetParam());
RandomOpModel m1(op_code, input_type, {100, 50, 5},
1234, 5678);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), 100 * 50 * 5);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
RandomOpModel m2(op_code, input_type, {100, 50, 5},
1234, 5678);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), 100 * 50 * 5);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_EQ(output1a, output2a);
EXPECT_EQ(output1b, output2b);
}
INSTANTIATE_TEST_SUITE_P(
RandomOpTest, TestSuite,
testing::Combine(
testing::Values(BuiltinOperator_RANDOM_UNIFORM,
BuiltinOperator_RANDOM_STANDARD_NORMAL),
testing::Values(InputType::kConst, InputType::kDynamic)),
[](const testing::TestParamInfo<TestSuite::ParamType>& info) {
std::string name = absl::StrCat(
std::get<0>(info.param) == BuiltinOperator_RANDOM_UNIFORM ?
"_RandomUniformOp" : "_RandomStandardNormalOp",
std::get<1>(info.param) == InputType::kConst ?
"_ConstInput" : "_DynamicInput");
return name;
}
);
TEST(RandomUniformOpTest, OutputMeanAndVariance) {
RandomOpModel m(BuiltinOperator_RANDOM_UNIFORM,
InputType::kConst,
{100, 50, 5}, 1234, 5678);
const std::vector<float> output_data(100 * 50 * 5,
std::numeric_limits<float>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), 100 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double mean = sum / output.size();
ASSERT_LT(std::abs(mean - 0.5), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - mean, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1. / 12 - var), 0.05);
}
TEST(RandomStandardNormalOpTest, OutputMeanAndVariance) {
RandomOpModel m(BuiltinOperator_RANDOM_STANDARD_NORMAL,
InputType::kConst,
{100, 50, 5}, 1234, 5678);
const std::vector<float> output_data(100 * 50 * 5,
std::numeric_limits<float>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), 100 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double mean = sum / output.size();
ASSERT_LT(std::abs(mean), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - mean, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1.0 - var), 0.05);
}
class MultinomialOpTestSuite : public testing::TestWithParam<InputType> {};
TEST_P(MultinomialOpTestSuite, NonDeterministicOutputWithSeedsEqualToZero) {
const std::initializer_list<float> kLogits = {logf(0.3f), logf(0.7f)};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 30;
MultinomialOpModel m1(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 0, 0);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), kNumSamples);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
MultinomialOpModel m2(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 0, 0);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), kNumSamples);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_NE(output1a, output2a);
EXPECT_NE(output1b, output2b);
}
TEST_P(MultinomialOpTestSuite, DeterministicOutputWithNonZeroSeeds) {
const std::initializer_list<float> kLogits = {logf(0.3f), logf(0.7f)};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 30;
MultinomialOpModel m1(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 123, 456);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), kNumBatches * kNumSamples);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1b = m1.GetOutput();
EXPECT_EQ(output1b.size(), kNumBatches * kNumSamples);
EXPECT_NE(output1a, output1b);
MultinomialOpModel m2(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 123, 456);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), kNumBatches * kNumSamples);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2b = m2.GetOutput();
EXPECT_EQ(output2b.size(), kNumBatches * kNumSamples);
EXPECT_NE(output2a, output2b);
EXPECT_EQ(output1a, output2a);
EXPECT_EQ(output1b, output2b);
}
INSTANTIATE_TEST_SUITE_P(
RandomOpTest2, MultinomialOpTestSuite,
testing::Values(InputType::kConst, InputType::kDynamic),
[](const testing::TestParamInfo<MultinomialOpTestSuite::ParamType>& info) {
std::string name = absl::StrCat(
"_MultinomialOp",
info.param == InputType::kConst ? "_ConstInput" : "_DynamicInput");
return name;
});
TEST(MultinomialTest, ValidateTFLiteOutputisTheSameAsTFOutput_OutputTypeInt32) {
const std::initializer_list<float> kLogits = {-1.2039728, -0.35667497};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 10;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678,
TensorType_INT32);
const std::vector<std::vector<int32_t>> expected_outputs = {
{1, 0, 1, 0, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 0, 0, 0, 1},
{0, 1, 1, 0, 1, 1, 1, 1, 0, 1},
{1, 1, 1, 0, 1, 0, 0, 0, 1, 0}};
for (int i = 0; i < expected_outputs.size(); i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetInt32Output();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_outputs[i], output);
}
}
TEST(MultinomialTest, ValidateTFLiteOutputisTheSameAsTFOutput) {
const std::initializer_list<float> kLogits = {-1.609438, -1.2039728,
-0.6931472};
const int kNumBatches = 1;
const int kNumClasses = 3;
const int kNumSamples = 15;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 5678, 1234);
const std::vector<std::vector<int64_t>> expected_outputs = {
{1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2},
{1, 2, 0, 0, 2, 1, 2, 0, 1, 0, 2, 2, 0, 2, 2},
{1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 2},
{0, 1, 1, 1, 2, 0, 1, 2, 1, 1, 2, 2, 1, 2, 2},
{0, 2, 2, 0, 2, 0, 2, 0, 1, 1, 2, 2, 0, 0, 1}};
for (int i = 0; i < expected_outputs.size(); i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_outputs[i], output);
}
}
TEST(MultinomialTest,
ValidateTFLiteOutputisTheSameAsTFOutput_MultiBatchMultiInvoke) {
const std::vector<float> kProb = {0.1f, 0.2f, 0.7f, 0.2f, 0.3f,
0.5f, 0.1f, 0.1f, 0.8f};
const std::initializer_list<float> kLogits = {
logf(0.1f), logf(0.2f), logf(0.7f), logf(0.2f), logf(0.3f),
logf(0.5f), logf(0.1f), logf(0.1f), logf(0.8f)};
const int kNumBatches = 3;
const int kNumClasses = 3;
const int kNumSamples = 10;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
const std::vector<std::vector<int64_t>> expected_output = {
{2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2,
2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2},
{2, 2, 2, 0, 2, 1, 0, 0, 2, 0, 2, 0, 2, 1, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 2, 2},
{2, 0, 0, 0, 1, 2, 1, 2, 0, 0, 2, 2, 2, 2, 0,
2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2}};
for (int i = 0; i < 3; i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_output[i], output);
}
}
TEST(MultinomialTest, ValidateClassProbabilities) {
const std::vector<float> kProb = {0.1f, 0.9f, 0.2f, 0.8f, 0.3f,
0.7f, 0.4f, 0.6f, 0.5f, 0.5f};
const std::initializer_list<float> kLogits = {
logf(0.1f), logf(0.9f), logf(0.2f), logf(0.8f), logf(0.3f),
logf(0.7f), logf(0.4f), logf(0.6f), logf(0.5f), logf(0.5f)};
const int kNumBatches = 5;
const int kNumClasses = 2;
const int kNumSamples = 10000;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
int total_count = 0;
for (int i = 0; i < kNumBatches; i++) {
for (int j = 0; j < kNumClasses; j++) {
int idx = i * kNumClasses + j;
const int expected_count = static_cast<int>(kProb[idx] * kNumSamples);
const int allowed_misses = static_cast<int>(expected_count / 20);
int actual_count = std::count(output.begin() + i * kNumSamples,
output.begin() + (i + 1) * kNumSamples, j);
EXPECT_LE(abs(actual_count - expected_count), allowed_misses);
total_count += actual_count;
}
}
EXPECT_EQ(total_count, kNumBatches * kNumSamples);
}
TEST(MultinomialTest, ValidatePreciseOutput) {
const std::initializer_list<float> kLogits = {1000.0f, 1001.0f};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 1000;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
double p0 = static_cast<double>(c0) / (c0 + c1);
EXPECT_LT(std::abs(p0 - 0.26894142137), 0.01);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/random_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
678376f5-040b-442d-b9eb-167c86ddc768 | cpp | tensorflow/tensorflow | set_ops | tensorflow/core/ops/set_ops.cc | tensorflow/core/ops/set_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("SetSize")
.Input("set_indices: int64")
.Input("set_values: T")
.Input("set_shape: int64")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("size: int32")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("DenseToDenseSetOperation")
.Input("set1: T")
.Input("set2: T")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 2) {
return errors::InvalidArgument("len(inputs) != 2.");
}
DimensionHandle output_rank;
ShapeHandle input0_shape = c->input(0);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input0_shape, 2, &input0_shape));
if (c->RankKnown(input0_shape)) {
const int32_t input0_rank = c->Rank(input0_shape);
ShapeHandle input1_shape = c->input(1);
TF_RETURN_IF_ERROR(
c->WithRank(input1_shape, input0_rank, &input1_shape));
if (c->RankKnown(input1_shape)) {
const int32_t rank = c->Rank(input1_shape);
ShapeHandle group0_shape;
TF_RETURN_IF_ERROR(
c->Subshape(input0_shape, 0, rank - 1, &group0_shape));
ShapeHandle group1_shape;
TF_RETURN_IF_ERROR(
c->Subshape(input1_shape, 0, rank - 1, &group1_shape));
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(
c->Merge(group0_shape, group1_shape, &unused_shape));
}
output_rank = c->MakeDim(input0_rank);
} else {
ShapeHandle input1_shape = c->input(1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input1_shape, 2, &input1_shape));
if (c->RankKnown(input1_shape)) {
output_rank = c->MakeDim(c->Rank(input1_shape));
} else {
output_rank = c->UnknownDim();
}
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank));
return absl::OkStatus();
});
REGISTER_OP("DenseToSparseSetOperation")
.Input("set1: T")
.Input("set2_indices: int64")
.Input("set2_values: T")
.Input("set2_shape: int64")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 4) {
return errors::InvalidArgument("len(inputs) != 4.");
}
ShapeHandle input1_shape_shape = c->input(3);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(1), c->input(2), input1_shape_shape));
DimensionHandle input1_rank_dim = c->Dim(input1_shape_shape, 0);
DimensionHandle output_rank_dim;
ShapeHandle input0_shape = c->input(0);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input0_shape, 2, &input0_shape));
if (c->RankKnown(input0_shape)) {
const int32_t input0_rank = c->Rank(input0_shape);
TF_RETURN_IF_ERROR(
c->WithValue(input1_rank_dim, input0_rank, &input1_rank_dim));
output_rank_dim = c->MakeDim(input0_rank);
} else if (c->ValueKnown(input1_rank_dim)) {
output_rank_dim = input1_rank_dim;
} else {
output_rank_dim = c->UnknownDim();
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank_dim));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank_dim));
return absl::OkStatus();
});
REGISTER_OP("SparseToSparseSetOperation")
.Input("set1_indices: int64")
.Input("set1_values: T")
.Input("set1_shape: int64")
.Input("set2_indices: int64")
.Input("set2_values: T")
.Input("set2_shape: int64")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 6) {
return errors::InvalidArgument("len(inputs) != 6.");
}
ShapeHandle input0_shape_shape = c->input(2);
ShapeHandle input1_shape_shape = c->input(5);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(0), c->input(1), input0_shape_shape));
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(3), c->input(4), input1_shape_shape));
DimensionHandle input0_rank_dim = c->Dim(input0_shape_shape, 0);
DimensionHandle input1_rank_dim = c->Dim(input1_shape_shape, 0);
DimensionHandle output_rank_dim;
if (c->ValueKnown(input0_rank_dim)) {
const int64_t input0_rank = c->Value(input0_rank_dim);
if (input0_rank < 2) {
return errors::InvalidArgument("Input 0, expected rank >= 2, got ",
input0_rank, ".");
}
TF_RETURN_IF_ERROR(
c->WithValue(input1_rank_dim, input0_rank, &input1_rank_dim));
output_rank_dim = input0_rank_dim;
} else if (c->ValueKnown(input1_rank_dim)) {
const int64_t input1_rank = c->Value(input1_rank_dim);
if (input1_rank < 2) {
return errors::InvalidArgument("Input 1, expected rank >= 2, got ",
input1_rank, ".");
}
output_rank_dim = input1_rank_dim;
} else {
output_rank_dim = c->UnknownDim();
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank_dim));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank_dim));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(SetOpsTest, DenseToDenseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("DenseToDenseSetOperation");
op.input_tensors.resize(3);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?");
}
TEST(SetOpsTest, DenseToDenseShape) {
ShapeInferenceTestOp op("DenseToDenseSetOperation");
INFER_OK(op, "?;?", "[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?];[?,?,?]");
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[?,?,?];[?,?]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[2,1];[2,1,2]");
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[2,1,2];[2,1]");
INFER_OK(op, "[?,?];?", "[?,2];[?];[2]");
INFER_OK(op, "?;[?,?]", "[?,2];[?];[2]");
INFER_OK(op, "[?,?];[?,?]", "[?,2];[?];[2]");
INFER_OK(op, "[?,?,?,?];?", "[?,4];[?];[4]");
INFER_OK(op, "?;[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];?", "[?,4];[?];[4]");
INFER_OK(op, "?;[5,3,2,1]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[5,3,2,1]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];[?,?,?,?]", "[?,4];[?];[4]");
INFER_ERROR("Dimension 0 in both shapes must be equal", op,
"[4,?,2,?];[3,1,?,5]");
INFER_ERROR("Dimension 2 in both shapes must be equal", op,
"[4,3,2,1];[4,3,3,1]");
INFER_OK(op, "[4,5,6,7];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[?,?,?,4]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[4,5,6,7]", "[?,4];[?];[4]");
INFER_OK(op, "[4,?,2,?];[?,1,?,5]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[4,?,6,?]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[4,5,6,4]", "[?,4];[?];[4]");
}
TEST(SetOpsTest, DenseToSparseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("DenseToSparseSetOperation");
op.input_tensors.resize(5);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?;?;?");
}
TEST(SetOpsTest, DenseToSparseShape) {
ShapeInferenceTestOp op("DenseToSparseSetOperation");
INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[?];[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[?];[5,3];[5];[3]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[2];?;?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[2];[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[2];[5,3];[5];[3]");
INFER_OK(op, "[?,?];?;?;?", "[?,2];[?];[2]");
INFER_OK(op, "[?,?];[?,?];[?];[?]", "[?,2];[?];[2]");
INFER_OK(op, "?;[?,2];[?];[2]", "[?,d3_0];[?];[d3_0]");
INFER_OK(op, "?;[5,2];[5];[2]", "[?,d3_0];[?];[d3_0]");
INFER_OK(op, "[?,?];[5,2];[5];[2]", "[?,2];[?];[2]");
INFER_OK(op, "[4,3];[5,2];[5];[2]", "[?,2];[?];[2]");
INFER_ERROR("elements in index (5) and values (6) do not match", op,
"?;[5,3];[6];[3]");
INFER_ERROR("rank (3) and shape rank (4) do not match", op,
"?;[5,3];[5];[4]");
}
TEST(SetOpsTest, SparseToSparseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("SparseToSparseSetOperation");
op.input_tensors.resize(7);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?;?;?;?;?");
}
TEST(SetOpsTest, SparseToSparseShape) {
ShapeInferenceTestOp op("SparseToSparseSetOperation");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "[?,?];[?];[?];[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_OK(op, "[?,?];[?];[?];?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "[?,2];[?];[2];?;?;?", "[?,d2_0];[?];[d2_0]");
INFER_OK(op, "?;?;?;[?,2];[?];[2]", "[?,d5_0];[?];[d5_0]");
INFER_OK(op, "[?,2];[?];[2];[?,?];[?];[?]", "[?,d2_0];[?];[d2_0]");
INFER_OK(op, "[?,?];[?];[?];[?,2];[?];[2]", "[?,d5_0];[?];[d5_0]");
INFER_OK(op, "[?,2];[?];[2];[?,2];[?];[2]", "[?,d2_0];[?];[d2_0]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/set_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/set_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df45c772-447a-4b88-b2e8-35a046c634d9 | cpp | tensorflow/tensorflow | functional_grad | tensorflow/cc/gradients/functional_grad.cc | tensorflow/cc/gradients/functional_grad_test.cc | #include <iostream>
#include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/functional_ops.h"
namespace tensorflow {
namespace ops {
namespace {
Status PartitionedCallGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
NameAttrList f;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "f", &f));
for (const auto& attr : op.node()->attrs()) {
(*f.mutable_attr())[attr.first] = attr.second;
}
std::vector<Output> func_inputs;
std::vector<DataType> input_dtypes;
const int num_inputs = op.num_inputs();
func_inputs.reserve(num_inputs + grad_inputs.size());
input_dtypes.reserve(num_inputs);
for (int i = 0; i < num_inputs; i++) {
func_inputs.push_back(op.input(i));
input_dtypes.push_back(op.input_type(i));
}
func_inputs.insert(std::end(func_inputs), std::begin(grad_inputs),
std::end(grad_inputs));
auto grad = SymbolicGradient(scope, func_inputs, input_dtypes, f);
for (int i = 0; i < num_inputs; i++) {
grad_outputs->push_back(grad[i]);
}
return scope.status();
}
REGISTER_GRADIENT_OP("PartitionedCall", PartitionedCallGrad);
REGISTER_GRADIENT_OP("StatefulPartitionedCall", PartitionedCallGrad);
}
}
} | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace ops {
namespace {
class FunctionGradTest : public ::testing::Test {
protected:
FunctionGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
auto result = (ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error));
TF_CHECK_OK(result);
TF_ASSERT_OK(result);
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(FunctionGradTest, PartitionedCallGrad) {
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
TF_ASSERT_OK(scope_.graph()->AddFunctionLibrary(f_lib_proto));
Output x = Placeholder(scope_, DT_FLOAT);
NameAttrList f;
f.set_name("XTimesTwo");
(*f.mutable_attr())["T"].set_type(DT_FLOAT);
auto results =
PartitionedCall(scope_, std::initializer_list<Input>{x}, {DT_FLOAT}, f);
RunTest(x, {}, results[0], {});
auto stateful_results = StatefulPartitionedCall(
scope_, std::initializer_list<Input>{x}, {DT_FLOAT}, f);
RunTest(x, {}, stateful_results[0], {});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/functional_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/functional_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
543a1e52-6a80-4a21-8635-769d455ec6d8 | cpp | tensorflow/tensorflow | tpu_cross_replica_ops | tensorflow/core/ops/tpu_cross_replica_ops.cc | tensorflow/core/ops/tpu_cross_replica_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("AllToAll")
.Input("input: T")
.Input("group_assignment: int32")
.Output("output: T")
.Attr("T: {numbertype, bool}")
.Attr("concat_dimension: int")
.Attr("split_dimension: int")
.Attr("split_count: int")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input = c->input(0);
ShapeHandle group_assignment = c->input(1);
if (!c->RankKnown(input)) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
int64_t rank = c->Rank(input);
int concat_dimension;
int split_dimension;
int split_count;
TF_RETURN_IF_ERROR(c->GetAttr("split_count", &split_count));
if (split_count < 1) {
return errors::InvalidArgument("split_count ", split_count,
" must at least be one.");
}
if (c->RankKnown(group_assignment) && c->Rank(group_assignment) != 2) {
return errors::InvalidArgument("group_assignment must have rank 2.");
}
DimensionHandle num_replicas_per_group = c->Dim(group_assignment, 1);
if (c->ValueKnown(num_replicas_per_group) &&
(c->Value(num_replicas_per_group) != split_count)) {
return errors::InvalidArgument(
"split_count ", split_count,
" must equal the size of the second dimension of group_assignment ",
c->Value(num_replicas_per_group));
}
TF_RETURN_IF_ERROR(c->GetAttr("concat_dimension", &concat_dimension));
if (concat_dimension < 0 || concat_dimension >= rank) {
return errors::InvalidArgument("concat_dimension ", concat_dimension,
" is out of range of input rank ", rank);
}
TF_RETURN_IF_ERROR(c->GetAttr("split_dimension", &split_dimension));
if (split_dimension < 0 || split_dimension >= rank) {
return errors::InvalidArgument("split_dimension ", split_dimension,
" is out of range of input rank ", rank);
}
if (!c->ValueKnown(c->Dim(input, concat_dimension)) ||
!c->ValueKnown(c->Dim(input, split_dimension))) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
}
std::vector<DimensionHandle> dims;
dims.resize(rank);
for (int32_t i = 0; i < rank; ++i) {
dims[i] = c->Dim(input, i);
if (i == concat_dimension) {
dims[i] = c->MakeDim(c->Value(dims[i]) * split_count);
}
if (i == split_dimension) {
if (c->ValueKnown(dims[i]) &&
(c->Value(dims[i]) % split_count != 0)) {
return errors::InvalidArgument(
"input dimension ", c->Value(dims[i]),
" not divisible by split_count ", split_count);
}
dims[i] = c->MakeDim(c->Value(dims[i]) / split_count);
}
}
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("CrossReplicaSum")
.Input("input: T")
.Input("group_assignment: int32")
.Output("output: T")
.Attr("T: {half, bfloat16, float, float64, int32, uint32}")
.SetIsStateful()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectivePermute")
.Input("input: T")
.Input("source_target_pairs: int32")
.Output("output: T")
.Attr("T: numbertype")
.SetIsStateful()
.SetShapeFn(shape_inference::UnchangedShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(AllToAll, UnknownRank) {
ShapeInferenceTestOp op("AllToAll");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
}
TEST(AllToAll, KnownRankUnknownDims) {
ShapeInferenceTestOp op("AllToAll");
op.input_tensors.resize(2);
AddNodeAttr("concat_dimension", 0, &op.node_def);
AddNodeAttr("split_count", 1, &op.node_def);
AddNodeAttr("split_dimension", 1, &op.node_def);
INFER_OK(op, "[?,1];[?,?]", "?");
INFER_OK(op, "[1,?];[?,?]", "?");
INFER_OK(op, "[?,?];[?,?]", "?");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/tpu_cross_replica_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/tpu_cross_replica_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d40f4bec-05dc-4d5e-acb1-12eb713ff1ff | cpp | tensorflow/tensorflow | uniform_quant_ops | tensorflow/core/ops/uniform_quant_ops.cc | tensorflow/core/ops/uniform_quant_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
namespace tensorflow {
namespace {
using shape_inference::DimensionHandle;
using shape_inference::ShapeHandle;
using tensorflow::errors::InvalidArgument;
using tensorflow::errors::Unknown;
absl::StatusOr<TensorShape> ToTensorShape(ShapeHandle shape_handle,
int64_t rank) {
TensorShape shape;
for (int i = 0; i < rank; ++i) {
int64_t dim_size = shape_inference::InferenceContext::Value(
shape_inference::InferenceContext::DimKnownRank(shape_handle, i));
if (dim_size == shape_inference::InferenceContext::kUnknownDim) {
return Unknown("Dim size unknown.");
}
shape.AddDim(dim_size);
}
return shape;
}
Status ScalesZeroPointsShapeValid(shape_inference::InferenceContext* context,
DimensionHandle match_dimension_handle,
ShapeHandle scales, ShapeHandle zero_points) {
const int32_t scales_rank = shape_inference::InferenceContext::Rank(scales);
const int32_t zero_points_rank =
shape_inference::InferenceContext::Rank(zero_points);
if (scales_rank == shape_inference::InferenceContext::kUnknownRank ||
zero_points_rank == shape_inference::InferenceContext::kUnknownRank) {
return absl::OkStatus();
}
if (scales_rank != zero_points_rank) {
return InvalidArgument("scales and zero_points must have same rank.");
}
if (scales_rank == 0) {
return absl::OkStatus();
}
DimensionHandle scales_size = context->Dim(scales, 0);
DimensionHandle zero_points_size = context->Dim(zero_points, 0);
DimensionHandle merged_scales;
TF_RETURN_IF_ERROR(
context->Merge(scales_size, match_dimension_handle, &merged_scales));
DimensionHandle merged_zero_points;
TF_RETURN_IF_ERROR(context->Merge(zero_points_size, match_dimension_handle,
&merged_zero_points));
return absl::OkStatus();
}
Status DotShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(1), 2, &rhs));
ShapeHandle lhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 0, &lhs_scales));
ShapeHandle lhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 0, &lhs_zero_points));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(4), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(5), 1, &rhs_zero_points));
ShapeHandle output_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(6), 1, &output_scales));
ShapeHandle output_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(7), 1, &output_zero_points));
DimensionHandle inner_lhs = context->Dim(lhs, 1);
DimensionHandle inner_rhs = context->Dim(rhs, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(context->Merge(inner_lhs, inner_rhs, &merged));
DimensionHandle output_rows = context->Dim(lhs, 0);
DimensionHandle output_cols = context->Dim(rhs, 1);
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_cols,
rhs_scales, rhs_zero_points));
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(
context, output_cols, output_scales, output_zero_points));
context->set_output(0, context->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
Status DotHybridShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRank(context->input(1), 2, &rhs));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 1, &rhs_zero_points));
DimensionHandle inner_lhs = context->Dim(lhs, 1);
DimensionHandle inner_rhs = context->Dim(rhs, 0);
DimensionHandle merged;
TF_RETURN_IF_ERROR(context->Merge(inner_lhs, inner_rhs, &merged));
DimensionHandle output_rows = context->Dim(lhs, 0);
DimensionHandle output_cols = context->Dim(rhs, 1);
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_cols,
rhs_scales, rhs_zero_points));
context->set_output(0, context->Matrix(output_rows, output_cols));
return absl::OkStatus();
}
struct ShapeCommonParams {
ShapeHandle lhs;
ShapeHandle rhs;
ShapeHandle lhs_scales;
ShapeHandle lhs_zero_points;
ShapeHandle rhs_scales;
ShapeHandle rhs_zero_points;
ShapeHandle output_scales;
ShapeHandle output_zero_points;
bool is_output_scales_zero_points_set;
ShapeCommonParams(ShapeHandle lhs, ShapeHandle rhs, ShapeHandle lhs_scales,
ShapeHandle lhs_zero_points, ShapeHandle rhs_scales,
ShapeHandle rhs_zero_points, ShapeHandle output_scales,
ShapeHandle output_zero_points)
: lhs(lhs),
rhs(rhs),
lhs_scales(lhs_scales),
lhs_zero_points(lhs_zero_points),
rhs_scales(rhs_scales),
rhs_zero_points(rhs_zero_points),
output_scales(output_scales),
output_zero_points(output_zero_points),
is_output_scales_zero_points_set(true) {}
ShapeCommonParams(ShapeHandle lhs, ShapeHandle rhs, ShapeHandle rhs_scales,
ShapeHandle rhs_zero_points)
: lhs(lhs),
rhs(rhs),
rhs_scales(rhs_scales),
rhs_zero_points(rhs_zero_points),
is_output_scales_zero_points_set(false) {}
};
Status ConvolutionShapeCommon(shape_inference::InferenceContext* context,
const ShapeCommonParams& params) {
const int32_t lhs_rank = shape_inference::InferenceContext::Rank(params.lhs);
const int32_t rhs_rank = shape_inference::InferenceContext::Rank(params.rhs);
if (lhs_rank == shape_inference::InferenceContext::kUnknownRank &&
rhs_rank == shape_inference::InferenceContext::kUnknownRank) {
context->set_output(0, context->UnknownShape());
return absl::OkStatus();
} else if (lhs_rank == shape_inference::InferenceContext::kUnknownRank ||
rhs_rank == shape_inference::InferenceContext::kUnknownRank) {
context->set_output(
0, context->UnknownShapeOfRank(
lhs_rank == shape_inference::InferenceContext::kUnknownRank
? rhs_rank
: lhs_rank));
return absl::OkStatus();
} else if (lhs_rank != rhs_rank) {
return InvalidArgument("lhs and rhs must have same rank.");
}
auto lhs_shape = ToTensorShape(params.lhs, lhs_rank);
auto rhs_shape = ToTensorShape(params.rhs, rhs_rank);
if (!lhs_shape.ok() || !rhs_shape.ok()) {
context->set_output(0, context->UnknownShapeOfRank(lhs_rank));
return absl::OkStatus();
}
UniformQuantizedConvolutionParams convolution_params;
TF_RETURN_IF_ERROR(convolution_params.LoadFromAttrs(*context));
TF_RETURN_IF_ERROR(convolution_params.ValidateOrFillParamsAndValidateShape(
lhs_shape.value(), rhs_shape.value()));
DimensionHandle output_feature = context->Dim(
params.rhs,
convolution_params.dimension_numbers().kernel_output_feature_dimension());
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(
context, output_feature, params.rhs_scales, params.rhs_zero_points));
if (params.is_output_scales_zero_points_set) {
TF_RETURN_IF_ERROR(ScalesZeroPointsShapeValid(context, output_feature,
params.output_scales,
params.output_zero_points));
if (shape_inference::InferenceContext::Rank(params.output_scales) > 0) {
DimensionHandle scales_merged;
TF_RETURN_IF_ERROR(context->Merge(context->Dim(params.rhs_scales, 0),
context->Dim(params.output_scales, 0),
&scales_merged));
}
}
TF_ASSIGN_OR_RETURN(const auto& out_shape,
convolution_params.CalculateOutputShape(
lhs_shape.value(), rhs_shape.value()));
ShapeHandle out_shape_handle;
TF_RETURN_IF_ERROR(
context->MakeShapeFromTensorShape(out_shape, &out_shape_handle));
context->set_output(0, out_shape_handle);
return absl::OkStatus();
}
Status ConvolutionShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(1), 2, &rhs));
ShapeHandle lhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 0, &lhs_scales));
ShapeHandle lhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 0, &lhs_zero_points));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(4), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(5), 1, &rhs_zero_points));
ShapeHandle output_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(6), 1, &output_scales));
ShapeHandle output_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(7), 1, &output_zero_points));
return ConvolutionShapeCommon(
context,
ShapeCommonParams(lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales,
rhs_zero_points, output_scales, output_zero_points));
}
Status ConvolutionHybridShape(shape_inference::InferenceContext* context) {
ShapeHandle lhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(0), 2, &lhs));
ShapeHandle rhs;
TF_RETURN_IF_ERROR(context->WithRankAtLeast(context->input(1), 2, &rhs));
ShapeHandle rhs_scales;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(2), 1, &rhs_scales));
ShapeHandle rhs_zero_points;
TF_RETURN_IF_ERROR(
context->WithRankAtMost(context->input(3), 1, &rhs_zero_points));
return ConvolutionShapeCommon(
context, ShapeCommonParams(lhs, rhs, rhs_scales, rhs_zero_points));
}
}
REGISTER_OP("UniformQuantize")
.Input("input: Tin")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {float}")
.Attr("Tout: {qint8, qint32}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformRequantize")
.Input("input: Tin")
.Input("input_scales: float")
.Input("input_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8, qint32}")
.Attr("Tout: {qint8, qint32}")
.Attr("input_quantization_axis: int = -1")
.Attr("input_quantization_min_val: int")
.Attr("input_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformDequantize")
.Input("input: Tin")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8, qint32}")
.Attr("Tout: {float}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UniformQuantizedDot")
.Input("lhs: Tin")
.Input("rhs: Tin")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8}")
.Attr("Tout: {qint32}")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(DotShape);
REGISTER_OP("UniformQuantizedDotHybrid")
.Input("lhs: Tlhs")
.Input("rhs: Trhs")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Output("output: Tout")
.Attr("Tlhs: {float}")
.Attr("Trhs: {qint8}")
.Attr("Tout: {float}")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.SetShapeFn(DotHybridShape);
REGISTER_OP("UniformQuantizedConvolution")
.Input("lhs: Tin")
.Input("rhs: Tin")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: Tout")
.Attr("Tin: {qint8}")
.Attr("Tout: {qint32}")
.Attr("window_strides: list(int) = []")
.Attr("padding: string")
.Attr("explicit_padding: list(int) = []")
.Attr("lhs_dilation: list(int) = []")
.Attr("rhs_dilation: list(int) = []")
.Attr("batch_group_count: int = 1")
.Attr("feature_group_count: int = 1")
.Attr("dimension_numbers: string = ''")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.SetShapeFn(ConvolutionShape);
REGISTER_OP("UniformQuantizedConvolutionHybrid")
.Input("lhs: Tlhs")
.Input("rhs: Trhs")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Output("output: Tout")
.Attr("Tlhs: {float}")
.Attr("Trhs: {qint8}")
.Attr("Tout: {float}")
.Attr("window_strides: list(int) = []")
.Attr("padding: string")
.Attr("explicit_padding: list(int) = []")
.Attr("lhs_dilation: list(int) = []")
.Attr("rhs_dilation: list(int) = []")
.Attr("batch_group_count: int = 1")
.Attr("feature_group_count: int = 1")
.Attr("dimension_numbers: string = ''")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.SetShapeFn(ConvolutionHybridShape);
REGISTER_OP("UniformQuantizedAdd")
.Input("lhs: T")
.Input("rhs: T")
.Input("lhs_scales: float")
.Input("lhs_zero_points: int32")
.Input("rhs_scales: float")
.Input("rhs_zero_points: int32")
.Input("output_scales: float")
.Input("output_zero_points: int32")
.Output("output: T")
.Attr("lhs_quantization_axis: int = -1")
.Attr("lhs_quantization_min_val: int")
.Attr("lhs_quantization_max_val: int")
.Attr("rhs_quantization_axis: int = -1")
.Attr("rhs_quantization_min_val: int")
.Attr("rhs_quantization_max_val: int")
.Attr("output_quantization_axis: int = -1")
.Attr("output_quantization_min_val: int")
.Attr("output_quantization_max_val: int")
.Attr("T: {qint32}")
.SetShapeFn(shape_inference::BroadcastBinaryOpShapeFn);
REGISTER_OP("UniformQuantizedClipByValue")
.Input("operand: T")
.Input("min: T")
.Input("max: T")
.Input("scales: float")
.Input("zero_points: int32")
.Output("output: T")
.Attr("T: {qint32}")
.Attr("quantization_axis: int = -1")
.Attr("quantization_min_val: int")
.Attr("quantization_max_val: int")
.SetShapeFn(shape_inference::UnchangedShape);
} | #include <limits>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
namespace {
constexpr int32_t kInt8Min = std::numeric_limits<int8_t>::min();
constexpr int32_t kInt8Max = std::numeric_limits<int8_t>::max();
constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min();
constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
}
TEST(UniformQuantizedOpsTest, UniformQuantizedDotShapeInference) {
ShapeInferenceTestOp op("UniformQuantizedDot");
INFER_OK(op, "[4,2];[2,3];[];[];[];[];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[];[];[3];[3];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[];[];[3];[3];[3];[3]", "[d0_0,d1_1]");
INFER_ERROR("", op, "[4,2];[6,3];[];[];[];[];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[4];[4];[];[];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[4,2];[2,3];[];[];[3];[];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[];[];[6];[6];[];[]");
INFER_ERROR("", op, "[4,2];[2,3];[];[];[];[];[6];[6]");
}
TEST(UniformQuantizedOpsTest, UniformQuantizedDotHybridShapeInference) {
ShapeInferenceTestOp op("UniformQuantizedDotHybrid");
INFER_OK(op, "[4,2];[2,3];[];[]", "[d0_0,d1_1]");
INFER_OK(op, "[4,2];[2,3];[3];[3]", "[d0_0,d1_1]");
INFER_ERROR("", op, "[4,2];[6,3];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[4,2];[2,3];[3];[]");
INFER_ERROR("", op, "[4,2];[2,3];[6];[6]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerTensor) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[];[];[];[]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,9,4,5];[];[];[];[];[];[]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[2];[2];[];[];[];[]");
INFER_ERROR("scales and zero_points must have same rank.", op,
"[2,3,40,50];[6,3,4,5];[];[];[6];[];[];[]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[];[];[];[];[12];[12]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerChannelRhs) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[6];[6];[];[]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[];[];[12];[12];[];[]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionShapeInferencePerChannelRhsAndOutput) {
ShapeInferenceTestOp op("UniformQuantizedConvolution");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolution")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt8Min)
.Attr("lhs_quantization_max_val", kInt8Max)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[];[];[6];[6];[6];[6]", "[2,6,37,46]");
}
TEST(UniformQuantizedOpsTest,
UniformQuantizedConvolutionHybridShapeInferencePerChannel) {
ShapeInferenceTestOp op("UniformQuantizedConvolutionHybrid");
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedConvolutionHybrid")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tlhs", DT_QINT8)
.Attr("Trhs", DT_QINT8)
.Attr("Tout", DT_QINT32)
.Attr("rhs_quantization_axis", 0)
.Attr("rhs_quantization_min_val", kInt8Min)
.Attr("rhs_quantization_max_val", kInt8Max)
.Attr("padding", "VALID")
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,40,50];[6,3,4,5];[6];[6]", "[2,6,37,46]");
INFER_ERROR("", op, "[2,3,40,50];[6,3,4,5];[12];[12]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/uniform_quant_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/uniform_quant_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
afc1e4f6-41e2-420b-8809-b2b778f8d91b | cpp | tensorflow/tensorflow | linalg_ops | tensorflow/core/ops/linalg_ops.cc | tensorflow/core/ops/linalg_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status MakeBatchSquareMatrix(InferenceContext* c, ShapeHandle input,
ShapeHandle* out) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, 2, &s));
DimensionHandle d;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(s, -2), c->Dim(s, -1), &d));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(s, 0, -2, &batch_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(d, d), out));
return absl::OkStatus();
}
Status BatchUnchangedSquareShapeFn(InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status BandedTriangularSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
DimensionHandle num_bands = c->Dim(lhs, -2);
DimensionHandle m = c->Dim(lhs, -1);
if (c->ValueKnown(num_bands) && c->Value(num_bands) <= 0) {
return errors::InvalidArgument("Number of bands must be positive, but is ",
c->Value(num_bands));
}
if (c->ValueKnown(num_bands) && c->ValueKnown(m) &&
c->Value(num_bands) > c->Value(m)) {
return errors::InvalidArgument("Number of bands ", c->Value(num_bands),
" cannot exceed the size of the matrix ",
c->Value(m));
}
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(m, c->Dim(rhs, -2), &m));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status MatrixSolveShapeFn(InferenceContext* c, bool square) {
ShapeHandle lhs;
ShapeHandle rhs;
if (square) {
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs));
} else {
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
}
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape));
DimensionHandle m;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -2), c->Dim(rhs, -2), &m));
DimensionHandle n = c->Dim(lhs, -1);
if (square) {
TF_RETURN_IF_ERROR(c->Merge(m, n, &n));
}
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(lhs_batch_shape, c->Vector(n), &out));
TF_RETURN_IF_ERROR(c->Concatenate(out, c->Vector(c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status MatrixTriangularSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
ShapeHandle output_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, lhs_batch_shape, rhs_batch_shape, true, &output_batch_shape));
DimensionHandle m;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(lhs, -1), c->Dim(rhs, -2), &m));
ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(output_batch_shape, c->Matrix(m, c->Dim(rhs, -1)), &out));
c->set_output(0, out);
return absl::OkStatus();
}
Status SelfAdjointEigV2ShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input));
DimensionHandle n;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle e_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &e_shape));
c->set_output(0, e_shape);
bool compute_v;
TF_RETURN_IF_ERROR(c->GetAttr("compute_v", &compute_v));
if (compute_v) {
ShapeHandle v_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape));
c->set_output(1, v_shape);
} else {
c->set_output(1, c->Vector(0ll));
}
return absl::OkStatus();
}
Status LuShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle n;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -2), c->Dim(input, -1), &n));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle lu_shape;
ShapeHandle p_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(n, n), &lu_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(n), &p_shape));
c->set_output(0, lu_shape);
c->set_output(1, p_shape);
return absl::OkStatus();
}
Status QrShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle m = c->Dim(input, -2);
DimensionHandle n = c->Dim(input, -1);
DimensionHandle p;
TF_RETURN_IF_ERROR(c->Min(m, n, &p));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle q_shape;
ShapeHandle r_shape;
bool full_matrices;
TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices));
if (full_matrices) {
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, m), &q_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, n), &r_shape));
} else {
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(m, p), &q_shape));
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Matrix(p, n), &r_shape));
}
c->set_output(0, q_shape);
c->set_output(1, r_shape);
return absl::OkStatus();
}
Status SvdShapeFn(InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle m = c->Dim(input, -2);
DimensionHandle n = c->Dim(input, -1);
DimensionHandle p;
TF_RETURN_IF_ERROR(c->Min(m, n, &p));
ShapeHandle batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &batch_shape));
ShapeHandle e_shape;
TF_RETURN_IF_ERROR(c->Concatenate(batch_shape, c->Vector(p), &e_shape));
c->set_output(0, e_shape);
bool compute_uv;
TF_RETURN_IF_ERROR(c->GetAttr("compute_uv", &compute_uv));
if (compute_uv) {
ShapeHandle u_shape;
ShapeHandle v_shape;
bool full_matrices;
TF_RETURN_IF_ERROR(c->GetAttr("full_matrices", &full_matrices));
if (full_matrices) {
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(m, m), &u_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(n, n), &v_shape));
} else {
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(m, p), &u_shape));
TF_RETURN_IF_ERROR(
c->Concatenate(batch_shape, c->Matrix(n, p), &v_shape));
}
c->set_output(1, u_shape);
c->set_output(2, v_shape);
} else {
c->set_output(1, c->Vector(0ll));
c->set_output(2, c->Vector(0ll));
}
return absl::OkStatus();
}
Status TridiagonalMatMulShapeFn(InferenceContext* c) {
ShapeHandle superdiag;
ShapeHandle maindiag;
ShapeHandle subdiag;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &superdiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &maindiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 2, &subdiag));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(3), 2, &rhs));
ShapeHandle superdiag_batch_shape;
ShapeHandle maindiag_batch_shape;
ShapeHandle subdiag_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(superdiag, 0, -2, &superdiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(maindiag, 0, -2, &maindiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(subdiag, 0, -2, &subdiag_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &superdiag));
TF_RETURN_IF_ERROR(
c->Merge(maindiag_batch_shape, rhs_batch_shape, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(subdiag_batch_shape, rhs_batch_shape, &rhs_batch_shape));
TF_RETURN_IF_ERROR(c->Merge(superdiag, maindiag, &maindiag));
TF_RETURN_IF_ERROR(c->Merge(subdiag, maindiag, &maindiag));
DimensionHandle m_lhs = c->Dim(maindiag, -1);
DimensionHandle m_rhs = c->Dim(rhs, -2);
TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(maindiag, -2), 1, &unused));
c->set_output(0, rhs);
return absl::OkStatus();
}
Status TridiagonalSolveShapeFn(InferenceContext* c) {
ShapeHandle lhs;
ShapeHandle rhs;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &lhs));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &rhs));
ShapeHandle lhs_batch_shape;
ShapeHandle rhs_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(lhs, 0, -2, &lhs_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(rhs, 0, -2, &rhs_batch_shape));
TF_RETURN_IF_ERROR(
c->Merge(lhs_batch_shape, rhs_batch_shape, &lhs_batch_shape));
DimensionHandle m_lhs = c->Dim(lhs, -1);
DimensionHandle m_rhs = c->Dim(rhs, -2);
TF_RETURN_IF_ERROR(c->Merge(m_lhs, m_rhs, &m_lhs));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(lhs, -2), 3, &m_lhs));
c->set_output(0, rhs);
return absl::OkStatus();
}
}
REGISTER_OP("MatrixDeterminant")
.Input("input: T")
.Output("output: T")
.Attr("T: {half, float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("LogMatrixDeterminant")
.Input("input: T")
.Output("sign: T")
.Output("log_abs_determinant: T")
.Attr("T: {half, float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(input, -1), c->Dim(input, -2), &unused));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s));
c->set_output(0, s);
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &out));
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("MatrixInverse")
.Input("input: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("MatrixExponential")
.Deprecated(
27, "Use Python implementation tf.linalg.matrix_exponential instead.")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("MatrixLogarithm")
.Input("input: T")
.Output("output: T")
.Attr("T: {complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("Cholesky")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("CholeskyGrad")
.Input("l: T")
.Input("grad: T")
.Output("output: T")
.Attr("T: {half, float, double}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("SelfAdjointEig")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half}")
.Deprecated(11, "Use SelfAdjointEigV2 instead.")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input;
TF_RETURN_IF_ERROR(MakeBatchSquareMatrix(c, c->input(0), &input));
DimensionHandle d = c->Dim(input, -1);
DimensionHandle d_plus_1;
TF_RETURN_IF_ERROR(c->Add(d, 1, &d_plus_1));
ShapeHandle s;
TF_RETURN_IF_ERROR(c->Subshape(input, 0, -2, &s));
TF_RETURN_IF_ERROR(c->Concatenate(s, c->Matrix(d_plus_1, d), &s));
c->set_output(0, s);
return absl::OkStatus();
});
REGISTER_OP("Eig")
.Input("input: T")
.Output("e: Tout")
.Output("v: Tout")
.Attr("compute_v: bool = True")
.Attr("T: {float, double, complex64, complex128}")
.Attr("Tout: {complex64, complex128}")
.SetShapeFn(SelfAdjointEigV2ShapeFn);
REGISTER_OP("SelfAdjointEigV2")
.Input("input: T")
.Output("e: T")
.Output("v: T")
.Attr("compute_v: bool = True")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(SelfAdjointEigV2ShapeFn);
REGISTER_OP("Lu")
.Input("input: T")
.Output("lu: T")
.Output("p: output_idx_type")
.Attr("T: {double, float, half, complex64, complex128}")
.Attr("output_idx_type: {int32, int64} = DT_INT32")
.SetShapeFn(LuShapeFn);
REGISTER_OP("MatrixSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return MatrixSolveShapeFn(c, true );
});
REGISTER_OP("BandedTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return BandedTriangularSolveShapeFn(c);
});
REGISTER_OP("MatrixTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {bfloat16, double, float, half, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
return MatrixTriangularSolveShapeFn(c);
});
REGISTER_OP("MatrixSolveLs")
.Input("matrix: T")
.Input("rhs: T")
.Input("l2_regularizer: double")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.Attr("fast: bool = True")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle l2_regularizer;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &l2_regularizer));
return MatrixSolveShapeFn(c, false );
});
REGISTER_OP("MatrixSquareRoot")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(BatchUnchangedSquareShapeFn);
REGISTER_OP("Qr")
.Input("input: T")
.Output("q: T")
.Output("r: T")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(QrShapeFn);
REGISTER_OP("Svd")
.Input("input: T")
.Output("s: T")
.Output("u: T")
.Output("v: T")
.Attr("compute_uv: bool = True")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, half, complex64, complex128}")
.SetShapeFn(SvdShapeFn);
REGISTER_OP("TridiagonalMatMul")
.Input("superdiag: T")
.Input("maindiag: T")
.Input("subdiag: T")
.Input("rhs: T")
.Output("output: T")
.Attr("T: {double, float, complex64, complex128}")
.SetShapeFn(TridiagonalMatMulShapeFn);
REGISTER_OP("TridiagonalSolve")
.Input("diagonals: T")
.Input("rhs: T")
.Output("output: T")
.Attr("partial_pivoting: bool = True")
.Attr("perturb_singular: bool = False")
.Attr("T: {double, float, complex64, complex128}")
.SetShapeFn(TridiagonalSolveShapeFn);
REGISTER_OP("Einsum")
.Input("inputs: N * T")
.Output("output: T")
.Attr("equation: string")
.Attr("N: int >= 1")
.Attr("T: type")
.SetShapeFn(shape_inference::EinsumShape);
REGISTER_OP("BatchSelfAdjointEig")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float}")
.Deprecated(11, "Use SelfAdjointEigV2 instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixDeterminant")
.Input("input: T")
.Output("output: T")
.Attr("T: {float, double, complex64, complex128}")
.Deprecated(13, "Use MatrixDeterminant instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixInverse")
.Input("input: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixInverse instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchCholesky")
.Input("input: T")
.Output("output: T")
.Attr("T: {double, float}")
.Deprecated(13, "Use Cholesky instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchCholeskyGrad")
.Input("l: T")
.Input("grad: T")
.Output("output: T")
.Attr("T: {float, double}")
.Deprecated(13, "Use CholeskyGrad instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchSelfAdjointEigV2")
.Input("input: T")
.Output("e: T")
.Output("v: T")
.Attr("compute_v: bool = True")
.Attr("T: {double, float}")
.Deprecated(13, "Use SelfAdjointEigV2 instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixSolve instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixTriangularSolve")
.Input("matrix: T")
.Input("rhs: T")
.Output("output: T")
.Attr("lower: bool = True")
.Attr("adjoint: bool = False")
.Attr("T: {double, float}")
.Deprecated(13, "Use MatrixTriangularSolve instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchMatrixSolveLs")
.Input("matrix: T")
.Input("rhs: T")
.Input("l2_regularizer: double")
.Output("output: T")
.Attr("T: {double, float}")
.Attr("fast: bool = True")
.Deprecated(13, "Use MatrixSolveLs instead.")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("BatchSvd")
.Input("input: T")
.Output("s: T")
.Output("u: T")
.Output("v: T")
.Attr("compute_uv: bool = True")
.Attr("full_matrices: bool = False")
.Attr("T: {double, float, complex64, complex128}")
.Deprecated(13, "Use Svd instead.")
.SetShapeFn(shape_inference::UnknownShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(LinalgOpsTest, MatrixDeterminant_ShapeFn) {
ShapeInferenceTestOp op("MatrixDeterminant");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "[1,?,3,4,1,2]");
INFER_OK(op, "[?,?]", "[]");
INFER_OK(op, "[1,?]", "[]");
INFER_OK(op, "[?,1]", "[]");
INFER_OK(op, "[1,?,3,4,?,?]", "[d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,4,1,?]", "[d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,4,?,1]", "[d0_0,d0_1,d0_2,d0_3]");
}
TEST(LinalgOpsTest, UnchangedSquare_ShapeFn) {
for (const char* op_name : {"Cholesky", "CholeskyGrad", "MatrixInverse"}) {
ShapeInferenceTestOp op(op_name);
const string extra_shape = (op.name == "CholeskyGrad" ? ";?" : "");
INFER_OK(op, "?" + extra_shape, "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[1]" + extra_shape);
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op,
"[1,2]" + extra_shape);
INFER_OK(op, "[?,?]" + extra_shape, "[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[1,?]" + extra_shape, "[d0_0,d0_0]");
INFER_OK(op, "[?,1]" + extra_shape, "[d0_1,d0_1]");
INFER_OK(op, "[5,?,7,?,?]" + extra_shape,
"[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]" + extra_shape, "[d0_0,d0_1,d0_2,d0_3,d0_3]");
INFER_OK(op, "[5,?,7,?,1]" + extra_shape, "[d0_0,d0_1,d0_2,d0_4,d0_4]");
}
}
TEST(LinalgOpsTest, SelfAdjointEig_ShapeFn) {
ShapeInferenceTestOp op("SelfAdjointEig");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]");
INFER_OK(op, "[?,?]", "[?,d0_0|d0_1]");
INFER_OK(op, "[1,?]", "[2,d0_0]");
INFER_OK(op, "[?,1]", "[2,d0_1]");
INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,?,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,2,d0_3]");
INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,2,d0_4]");
}
TEST(LinalgOpsTest, SelfAdjointEigV2_ShapeFn) {
ShapeInferenceTestOp op("SelfAdjointEigV2");
auto set_compute_v = [&op](bool compute_v) {
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"input", 0, DT_FLOAT}})
.Attr("compute_v", compute_v)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"input", 0, DT_HALF}})
.Attr("compute_v", compute_v)
.Finalize(&op.node_def));
};
set_compute_v(false);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[3,1,2]");
INFER_OK(op, "?", "?;[0]");
INFER_OK(op, "[?,?]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[1,?]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[?,1]", "[d0_0|d0_1];[0]");
INFER_OK(op, "[5,?,7,?,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
INFER_OK(op, "[5,?,7,1,?]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
INFER_OK(op, "[5,?,7,?,1]", "[d0_0,d0_1,d0_2,d0_3|d0_4];[0]");
set_compute_v(true);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[1,?]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[?,1]", "[d0_0|d0_1];[d0_0|d0_1,d0_0|d0_1]");
INFER_OK(op, "[5,?,7,?,?]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,1,?]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
INFER_OK(op, "[5,?,7,?,1]",
"[d0_0,d0_1,d0_2,d0_3|d0_4];[d0_0,d0_1,d0_2,d0_3|d0_4,d0_3|d0_4]");
}
TEST(LinalgOpsTest, MatrixSolve_ShapeFn) {
ShapeInferenceTestOp op("MatrixSolve");
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]");
INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op,
"[5,?];[6,?,?]");
INFER_OK(op, "[?,?];?", "[d0_0|d0_1,?]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]");
INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]");
INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]");
INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]");
INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]");
INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]");
INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]");
}
TEST(LinalgOpsTest, MatrixTriangularSolve_ShapeFn) {
ShapeInferenceTestOp op("MatrixTriangularSolve");
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[5,?,?];[6]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[?,?];[1,?]", "[d1_0,d1_1]");
INFER_OK(op, "[1,?];[1,?]", "[d0_0|d1_0,d1_1]");
INFER_OK(op, "[?,1];[1,?]", "[d0_1|d1_0,d1_1]");
INFER_OK(op, "[1,1];[?,?]", "[d0_0,d1_1]");
INFER_OK(op, "[1,1];[1,?]", "[d0_0|d0_1|d1_0,d1_1]");
INFER_OK(op, "[10,?,?,?];[?,20,1,?]", "[d0_0,d1_1,d1_2,d1_3]");
INFER_OK(op, "[10,?,1,?];[?,20,1,?]", "[d0_0,d1_1,d0_2|d1_2,d1_3]");
INFER_OK(op, "[10,?,?,1];[?,20,1,?]", "[d0_0,d1_1,d0_3|d1_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,?,?]", "[d0_0,d1_1,d0_2,d1_3]");
INFER_OK(op, "[10,?,1,1];[?,20,1,?]", "[d0_0,d1_1,d0_2|d0_3|d1_2,d1_3]");
}
TEST(LinalgOpsTest, MatrixSolveLs_ShapeFn) {
ShapeInferenceTestOp op("MatrixSolveLs");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "?;?;[]", "?");
INFER_OK(op, "[1,?];[1,?];?", "[d0_1,d1_1]");
INFER_OK(op, "[1,2];[1,3];?", "[d0_1,d1_1]");
INFER_ERROR("Dimensions must be equal, but are 5 and 6", op, "[5,?];[6,?];?");
INFER_OK(op, "[10,?,1,?];[?,20,1,?];?", "[d0_0,d1_1,d0_3,d1_3]");
INFER_OK(op, "[10,20,1,2];[10,20,1,3];?", "[d0_0|d1_0,d0_1|d1_1,d0_3,d1_3]");
INFER_ERROR("Dimensions must be equal, but are 5 and 6", op,
"[10,?,5,?];[?,20,6,?];?");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 10 and 11", op,
"[10,?,5,?];[11,?,5,?];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[?];?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]");
}
TEST(LinalgOpsTest, Qr_ShapeFn) {
ShapeInferenceTestOp op("Qr");
auto set_attrs = [&op](bool full_matrices) {
TF_ASSERT_OK(NodeDefBuilder("test", "Qr")
.Input({"input", 0, DT_FLOAT})
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Qr")
.Input({"input", 0, DT_HALF})
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
};
set_attrs(false);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,?];[d0_0,?,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1,d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
}
TEST(LinalgOpsTest, Svd_ShapeFn) {
ShapeInferenceTestOp op("Svd");
auto set_attrs = [&op](bool compute_uv, bool full_matrices) {
TF_ASSERT_OK(NodeDefBuilder("test", "Svd")
.Input({"input", 0, DT_FLOAT})
.Attr("compute_uv", compute_uv)
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "Svd")
.Input({"input", 0, DT_HALF})
.Attr("compute_uv", compute_uv)
.Attr("full_matrices", full_matrices)
.Finalize(&op.node_def));
};
set_attrs(false, false);
INFER_OK(op, "?", "?;[0];[0]");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[0];[0]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[0];[0]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[0];[0]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[0];[0]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[0];[0]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true, false);
INFER_OK(op, "?", "?;?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,?];[d0_0,d0_2,?]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_2];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
set_attrs(true, true);
INFER_OK(op, "?", "?;?;?");
INFER_OK(op, "[?,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,?,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,?]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,?,2]", "[d0_0,?];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,2]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,3,2]", "[d0_0,d0_2];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[?,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_OK(op, "[4,2,3]", "[d0_0,d0_1];[d0_0,d0_1,d0_1];[d0_0,d0_2,d0_2]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
}
TEST(LinalgOpsTest, Lu_ShapeFn) {
ShapeInferenceTestOp op("Lu");
INFER_OK(op, "?", "?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?,3,4,1,2]");
INFER_OK(op, "[?,?]", "[d0_0,d0_0];[d0_0]");
INFER_OK(op, "[1,?]", "[d0_0,d0_0];[d0_0]");
INFER_OK(op, "[?,1]", "[d0_1,d0_1];[d0_1]");
INFER_OK(op, "[1,?,3,4,?,?]",
"[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]");
INFER_OK(op, "[1,?,3,4,1,?]",
"[d0_0,d0_1,d0_2,d0_3,d0_4,d0_4];[d0_0,d0_1,d0_2,d0_3,d0_4]");
INFER_OK(op, "[1,?,3,4,?,1]",
"[d0_0,d0_1,d0_2,d0_3,d0_5,d0_5];[d0_0,d0_1,d0_2,d0_3,d0_5]");
}
TEST(LinalgOpsTest, TridiagonalMatMul_ShapeFn) {
ShapeInferenceTestOp op("TridiagonalMatMul");
INFER_OK(op, "?;?;?;?", "in3");
INFER_OK(op, "[1,5];[1,5];[1,5];[?,1]", "in3");
INFER_OK(op, "[1,5];[1,5];[1,5];[5,1]", "in3");
INFER_OK(op, "[?,1,?];[?,1,?];[?,1,?];[?,?,?]", "in3");
INFER_OK(op, "[?,1,5];[?,1,5];[?,1,5];[7,5,2]", "in3");
INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[?,5,2]", "in3");
INFER_OK(op, "[7,1,5];[7,1,5];[7,1,5];[7,5,2]", "in3");
INFER_OK(op, "[7,?,1,5];[7,?,1,5];[7,?,1,5];[7,8,5,2]", "in3");
INFER_OK(op, "[7,8,1,5];[7,8,1,5];[7,8,1,5];[7,8,5,2]", "in3");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[3];[3];[3];[5,1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[3,5];[3,5];[3,5];[5]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [6,4] and [6,8].",
op, "[6,4,3,5];[6,4,3,5];[6,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [?,4] and [6,8].",
op, "[?,4,3,5];[?,4,3,5];[?,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 5 and 6. "
"Shapes are [1,5] and [1,6]",
op, "[1,5];[1,6];[1,5];[6,2]");
INFER_ERROR("Dimension must be 1 but is 3", op, "[3,5];[3,5];[3,5];[5,2]");
}
TEST(LinalgOpsTest, TridiagonalSolve_ShapeFn) {
ShapeInferenceTestOp op("TridiagonalSolve");
INFER_OK(op, "?;?", "in1");
INFER_OK(op, "[3,5];[?,1]", "in1");
INFER_OK(op, "[?,5];[5,1]", "in1");
INFER_OK(op, "[?,5];[?,?]", "in1");
INFER_OK(op, "[?,?];[?,?]", "in1");
INFER_OK(op, "[3,5];[5,1]", "in1");
INFER_OK(op, "[3,5];[5,2]", "in1");
INFER_OK(op, "[?,?,?];[?,?,?]", "in1");
INFER_OK(op, "[?,3,5];[7,5,2]", "in1");
INFER_OK(op, "[7,3,5];[?,5,2]", "in1");
INFER_OK(op, "[7,?,5];[?,5,?]", "in1");
INFER_OK(op, "[7,3,5];[7,5,2]", "in1");
INFER_OK(op, "[7,?,3,5];[7,8,5,2]", "in1");
INFER_OK(op, "[7,8,3,5];[7,8,5,2]", "in1");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3];[5,1]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[3,5];[5]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [6,4] and [6,8].",
op, "[6,4,3,5];[6,8,5,2]");
INFER_ERROR(
"Dimension 1 in both shapes must be equal, but are 4 and 8. "
"Shapes are [?,4] and [6,8].",
op, "[?,4,3,5];[6,8,5,2]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[4,5];[5,2]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[6,4,5];[6,5,2]");
INFER_ERROR("Dimensions must be equal, but are 9 and 5", op, "[3,9];[5,2]");
INFER_ERROR("Dimensions must be equal, but are 9 and 5", op,
"[6,3,9];[6,5,2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/linalg_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/linalg_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8cea8c52-41ca-4a4d-ba13-9aee3808da0f | cpp | tensorflow/tensorflow | sparse_csr_matrix_ops | tensorflow/core/ops/sparse_csr_matrix_ops.cc | tensorflow/core/ops/sparse_csr_matrix_ops_test.cc | #include <tuple>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
Status GetVariantInput(InferenceContext* c, int index,
ShapeAndType* shape_and_type) {
ShapeHandle variant;
TF_RETURN_IF_ERROR(c->WithRank(c->input(index), 0, &variant));
auto* shapes_and_types = c->input_handle_shapes_and_types(index);
if (shapes_and_types == nullptr || shapes_and_types->size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Unable to access shape and type info from variant input ", index));
}
*shape_and_type = shapes_and_types->at(0);
return absl::OkStatus();
}
Status ValidateSquareMatrixShape(InferenceContext* c,
const ShapeHandle& matrix_shape,
DimensionHandle* matrix_dimension) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(matrix_shape, 2, &out));
TF_RETURN_IF_ERROR(c->WithRankAtMost(matrix_shape, 3, &out));
if (!c->RankKnown(matrix_shape)) {
return absl::InvalidArgumentError("Sparse matrix has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(c->Dim(matrix_shape, -2),
c->Dim(matrix_shape, -1), matrix_dimension));
return absl::OkStatus();
}
REGISTER_OP("SparseTensorToCSRSparseMatrix")
.Input("indices: int64")
.Input("values: T")
.Input("dense_shape: int64")
.Attr("T: {float, double, complex64, complex128}")
.Output("sparse_matrix: variant")
.SetShapeFn([](InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(0), c->input(1), c->input(2)));
auto rank = c->Value(c->Dim(c->input(0), 1));
ShapeHandle dense_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &dense_shape));
TF_RETURN_IF_ERROR(c->WithRank(dense_shape, rank, &dense_shape));
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("T", &dtype));
c->set_output(0, c->Scalar());
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixToSparseTensor")
.Input("sparse_matrix: variant")
.Output("indices: int64")
.Output("values: type")
.Output("dense_shape: int64")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
int rank = c->Rank(sparse_matrix);
ShapeHandle indices = c->Matrix(c->UnknownDim(), rank);
ShapeHandle values = c->Vector(c->UnknownDim());
ShapeHandle dense_shape = c->Vector(rank);
c->set_output(0, indices);
c->set_output(1, values);
c->set_output(2, dense_shape);
return absl::OkStatus();
});
REGISTER_OP("DenseToCSRSparseMatrix")
.Input("dense_input: T")
.Input("indices: int64")
.Attr("T: {float, double, complex64, complex128}")
.Output("sparse_output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle dense_shape = c->input(0);
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank of dense: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
auto rank = c->Rank(dense_shape);
ShapeHandle indices = c->input(1);
if (!c->RankKnown(indices) || c->Rank(indices) != 2) {
return absl::InvalidArgumentError(
absl::StrCat("indices must be a matrix; but its rank is not 2: ",
c->Rank(indices)));
}
auto indices_col = c->Dim(indices, 1);
if (!c->ValueKnown(indices_col) || c->Value(indices_col) != rank) {
return absl::InvalidArgumentError(
absl::StrCat("indices.shape[1] must match rank of dense; saw: ",
c->Value(indices_col), " vs. ", rank));
}
ShapeHandle fake_values_vec = c->Vector(c->Dim(indices, 0));
ShapeHandle fake_shape_shape = c->Vector(rank);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, indices , fake_values_vec ,
fake_shape_shape ));
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("T", &dtype));
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixToDense")
.Input("sparse_input: variant")
.Output("dense_output: type")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
c->set_output(0, sparse_matrix);
return absl::OkStatus();
});
REGISTER_OP("CSRSparseMatrixComponents")
.Input("csr_sparse_matrix: variant")
.Input("index: int32")
.Output("row_ptrs: int32")
.Output("col_inds: int32")
.Output("values: type")
.Attr("type: {float, double, complex64, complex128}")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle csr_sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(
c->WithRankAtLeast(csr_sparse_matrix, 2, &csr_sparse_matrix));
TF_RETURN_IF_ERROR(
c->WithRankAtMost(csr_sparse_matrix, 3, &csr_sparse_matrix));
ShapeHandle index;
if (c->Rank(c->input(1)) != 0) {
return absl::InvalidArgumentError("index must be a scalar.");
}
if (!c->RankKnown(csr_sparse_matrix)) {
return absl::InvalidArgumentError(
"csr_sparse_matrix has an unknown rank.");
}
auto row_ptrs_dh = c->Dim(csr_sparse_matrix, -2);
TF_RETURN_IF_ERROR(c->Add(row_ptrs_dh, 1, &row_ptrs_dh));
ShapeHandle row_ptrs = c->Vector(row_ptrs_dh);
c->set_output(0, row_ptrs);
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(c->UnknownDim()));
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixNNZ")
.Input("sparse_matrix: variant")
.Output("nnz: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle sparse_matrix = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(sparse_matrix, 2, &sparse_matrix));
TF_RETURN_IF_ERROR(c->WithRankAtMost(sparse_matrix, 3, &sparse_matrix));
if (!c->RankKnown(sparse_matrix)) {
return absl::InvalidArgumentError("sparse_matrix has an unknown rank.");
}
ShapeHandle out;
if (c->Rank(sparse_matrix) == 3) {
out = c->Vector(c->Dim(sparse_matrix, 0));
} else {
out = c->Scalar();
}
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixMatMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Attr("transpose_output: bool = false")
.Attr("conjugate_output: bool = false")
.Output("output: T")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
bool transpose_a = false;
bool transpose_b = false;
bool transpose_output = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_output", &transpose_output));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
}
if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
if (transpose_output) {
std::tie(output_rows, output_cols) =
std::make_tuple(output_cols, output_rows);
}
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return absl::OkStatus();
});
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
REGISTER_OP("_MklNativeSparseMatrixMatMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Attr("transpose_output: bool = false")
.Attr("conjugate_output: bool = false")
.Output("output: T")
.SetShapeFn([](InferenceContext* c) {
VLOG(1) << "_MklNativeSparseMatrixMatMul shape function";
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRank(a_shape, 2, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &b_shape));
VLOG(1) << "_MklNativeSparseMatrixMatMul shape function still";
bool transpose_a = false;
bool transpose_b = false;
bool transpose_output = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_output", &transpose_output));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
}
if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
if (transpose_output) {
std::tie(output_rows, output_cols) =
std::make_tuple(output_cols, output_rows);
}
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(c->Merge(a_batch_dims, b_batch_dims, &batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output(0, out);
return OkStatus();
});
#endif
REGISTER_OP("SparseMatrixMul")
.Input("a: variant")
.Input("b: T")
.Attr("T: type")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
ShapeHandle b_shape;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
ShapeHandle out;
if (c->Rank(b_shape) == 0) {
out = a_shape;
} else if (c->Rank(b_shape) == 3) {
if (c->Rank(a_shape) != 3) {
return absl::UnimplementedError(
"rank of b is 3 but rank of a is not.");
}
if (!(c->Value(c->Dim(b_shape, 1)) == 1 &&
c->Value(c->Dim(b_shape, 2)) == 1)) {
return absl::UnimplementedError(
"b must be a scalar or shaped [batch_size, 1, 1]");
}
DimensionHandle batch_size = c->Dim(a_shape, 0);
TF_RETURN_IF_ERROR(
c->Merge(batch_size, c->Dim(b_shape, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(b_shape, 0, batch_size, &b_shape));
TF_RETURN_IF_ERROR(c->ReplaceDim(a_shape, 0, batch_size, &a_shape));
out = a_shape;
} else {
return absl::UnimplementedError(
"b must be a scalar or shaped [batch_size, 1, 1]");
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixAdd")
.Input("a: variant")
.Input("b: variant")
.Input("alpha: T")
.Input("beta: T")
.Attr("T: {float, double, complex64, complex128}")
.Output("c: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused_scalar_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_scalar_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused_scalar_shape));
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle b_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(b_shape, 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Merge(a_shape, b_shape, &out));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSparseMatMul")
.Input("a: variant")
.Input("b: variant")
.Attr("type: {float, double, complex64, complex128}")
.Attr("transpose_a: bool = false")
.Attr("transpose_b: bool = false")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.Output("c: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle a_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(a_shape, 3, &a_shape));
if (!c->RankKnown(a_shape)) {
return absl::InvalidArgumentError("a has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle b_shape = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(b_shape, 2, &b_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(b_shape, 3, &b_shape));
if (!c->RankKnown(b_shape)) {
return absl::InvalidArgumentError("b has an unknown rank.");
}
bool transpose_a = false;
bool transpose_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(c->GetAttr("transpose_b", &transpose_b));
bool adjoint_a = false;
bool adjoint_b = false;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
if (adjoint_a && transpose_a) {
return absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true.");
} else if (adjoint_b && transpose_b) {
return absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true.");
}
transpose_a = transpose_a || adjoint_a;
transpose_b = transpose_b || adjoint_b;
auto output_rows = c->Dim(a_shape, transpose_a ? -1 : -2);
auto output_cols = c->Dim(b_shape, transpose_b ? -2 : -1);
ShapeHandle a_batch_dims;
ShapeHandle b_batch_dims;
ShapeHandle batch_dims;
TF_RETURN_IF_ERROR(c->Subshape(a_shape, 0, -2, &a_batch_dims));
TF_RETURN_IF_ERROR(c->Subshape(b_shape, 0, -2, &b_batch_dims));
TF_RETURN_IF_ERROR(BroadcastBinaryOpOutputShapeFnHelper(
c, a_batch_dims, b_batch_dims, true,
&batch_dims));
shape_inference::DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(a_shape, transpose_a ? -2 : -1),
c->Dim(b_shape, transpose_b ? -1 : -2),
&unused));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
batch_dims, c->Matrix(output_rows, output_cols), &out));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixZeros")
.Input("dense_shape: int64")
.Attr("type: {float, double, complex64, complex128}")
.Output("sparse_matrix: variant")
.SetShapeFn([](InferenceContext* c) {
auto rank = c->NumElements(c->input(0));
ShapeHandle dense_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &dense_shape));
TF_RETURN_IF_ERROR(
c->WithRank(dense_shape, c->Value(rank), &dense_shape));
if (!c->RankKnown(dense_shape) || c->Rank(dense_shape) < 2 ||
c->Rank(dense_shape) > 3) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid rank: ", c->Rank(dense_shape),
". Expected a known rank of either 2 or 3."));
}
DataType dtype;
TF_RETURN_IF_ERROR(c->GetAttr("type", &dtype));
c->set_output_handle_shapes_and_types(0,
{ShapeAndType{dense_shape, dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixTranspose")
.Input("input: variant")
.Attr("conjugate: bool = false")
.Attr("type: {float, double, complex64, complex128}")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle input = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, 2, &input));
TF_RETURN_IF_ERROR(c->WithRankAtMost(input, 3, &input));
if (!c->RankKnown(input)) {
return absl::InvalidArgumentError("input has an unknown rank.");
}
ShapeHandle output;
if (c->Rank(input) == 2) {
output = c->Matrix(c->Dim(input, 1), c->Dim(input, 0));
} else {
output = c->MakeShape(
{c->Dim(input, 0), c->Dim(input, 2), c->Dim(input, 1)});
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{output, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSoftmax")
.Input("logits: variant")
.Attr("type: {float, double}")
.Output("softmax: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle logits = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(logits, 2, &logits));
TF_RETURN_IF_ERROR(c->WithRankAtMost(logits, 3, &logits));
if (!c->RankKnown(logits)) {
return absl::InvalidArgumentError("logits has an unknown rank.");
}
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{logits, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSoftmaxGrad")
.Input("softmax: variant")
.Input("grad_softmax: variant")
.Attr("type: {float, double}")
.Output("gradient: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle softmax = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(softmax, 2, &softmax));
TF_RETURN_IF_ERROR(c->WithRankAtMost(softmax, 3, &softmax));
if (!c->RankKnown(softmax)) {
return absl::InvalidArgumentError("softmax has an unknown rank.");
}
TF_RETURN_IF_ERROR(GetVariantInput(c, 1, &sparse_matrix_shape_and_type));
ShapeHandle grad_softmax = sparse_matrix_shape_and_type.shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(grad_softmax, 2, &grad_softmax));
TF_RETURN_IF_ERROR(c->WithRankAtMost(grad_softmax, 3, &grad_softmax));
if (!c->RankKnown(grad_softmax)) {
return absl::InvalidArgumentError("grad_softmax has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(softmax, grad_softmax, &softmax));
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{softmax, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixOrderingAMD")
.Input("input: variant")
.Output("output: int32")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle matrix_shape = sparse_matrix_shape_and_type.shape;
DimensionHandle n;
TF_RETURN_IF_ERROR(ValidateSquareMatrixShape(c, matrix_shape, &n));
ShapeHandle output;
if (c->Rank(matrix_shape) == 2) {
output = c->Vector(c->Dim(matrix_shape, 0));
} else {
output = c->Matrix(c->Dim(matrix_shape, 0), c->Dim(matrix_shape, 1));
}
c->set_output(0, output);
return absl::OkStatus();
});
REGISTER_OP("SparseMatrixSparseCholesky")
.Input("input: variant")
.Input("permutation: int32")
.Attr("type: {float, double, complex64, complex128}")
.Output("output: variant")
.SetShapeFn([](InferenceContext* c) {
ShapeAndType sparse_matrix_shape_and_type;
TF_RETURN_IF_ERROR(GetVariantInput(c, 0, &sparse_matrix_shape_and_type));
ShapeHandle matrix_shape = sparse_matrix_shape_and_type.shape;
DimensionHandle n;
TF_RETURN_IF_ERROR(ValidateSquareMatrixShape(c, matrix_shape, &n));
ShapeHandle perm_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &perm_shape));
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 2, &perm_shape));
if (!c->RankKnown(perm_shape)) {
return absl::InvalidArgumentError("permutation has an unknown rank.");
}
TF_RETURN_IF_ERROR(c->Merge(n, c->Dim(perm_shape, -1), &n));
ShapeHandle matrix_batch_shape;
ShapeHandle perm_batch_shape;
TF_RETURN_IF_ERROR(c->Subshape(matrix_shape, 0, -2, &matrix_batch_shape));
TF_RETURN_IF_ERROR(c->Subshape(perm_shape, 0, -1, &perm_shape));
TF_RETURN_IF_ERROR(
c->Merge(matrix_batch_shape, perm_batch_shape, &matrix_batch_shape));
ShapeHandle out = matrix_shape;
c->set_output_handle_shapes_and_types(
0, {ShapeAndType{out, sparse_matrix_shape_and_type.dtype}});
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(SparseMatrixOpsTest, SparseTensorToCSRSparseMatrix_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorToCSRSparseMatrix");
(*op.node_def.mutable_attr())["T"].set_type(DT_FLOAT);
op.input_tensors.resize(3);
INFER_ERROR("Expected a known rank", op, "?;?;?");
INFER_ERROR("either 2 or 3", op, "[?,4];?;?");
INFER_OK(op, "[?,2];?;?", "[]");
INFER_OK(op, "[?,3];?;?", "[]");
Tensor dense_shape_t = test::AsTensor<int64_t>({5, 6});
op.input_tensors[2] = &dense_shape_t;
INFER_ERROR("Shape must be rank 3 but is rank 2 for", op, "[?,3];?;?");
INFER_OK(op, "[?,2];?;?", "[]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixToSparseTensor_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixToSparseTensor");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[4,5]";
INFER_OK(op, "[]", "[?,2];[?];[2]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[?,2];[?];[2]");
shapes_and_types[0].first = "[4,5,6]";
INFER_OK(op, "[]", "[?,3];[?];[3]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[?,3];[?];[3]");
}
TEST(SparseMatrixOpsTest, DenseToCSRSparseMatrix_ShapeFn) {
ShapeInferenceTestOp op("DenseToCSRSparseMatrix");
(*op.node_def.mutable_attr())["T"].set_type(DT_FLOAT);
INFER_ERROR("Expected a known rank", op, "?;?");
INFER_ERROR("either 2 or 3", op, "[?];?");
INFER_OK(op, "[?,?];[?,2]", "[]");
INFER_OK(op, "[?,?,?];[?,3]", "[]");
INFER_ERROR("indices.shape[1] must match rank of dense; saw: 2 vs. 3", op,
"[?,?,?];[?,2]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixToDense_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixToDense");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[?,?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[?,?,?]");
}
TEST(SparseMatrixOpsTest, CSRSparseMatrixComponents_ShapeFn) {
ShapeInferenceTestOp op("CSRSparseMatrixComponents");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types[0].first = "[4,5]";
INFER_OK(op, "[];[]", "[5];[?];[?]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[];[]", "[?];[?];[?]");
shapes_and_types[0].first = "[19,34,55]";
INFER_OK(op, "[];[]", "[35];[?];[?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[]", "[?];[?];[?]");
shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("index must be a scalar", op, "[];?");
}
TEST(SparseMatrixOpsTest, SparseMatrixMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixMatMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
auto set_options = [&op](bool transpose_a, bool transpose_b, bool adjoint_a,
bool adjoint_b, bool transpose_output) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseMatrixMatMul")
.Input("a", 0, DT_VARIANT)
.Input("b", 1, DT_FLOAT)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Attr("transpose_output", transpose_output)
.Finalize(&op.node_def));
};
set_options(false, false, false, false, false );
a_shapes_and_types[0].first = "?";
INFER_ERROR("a has an unknown rank", op, "[];?");
a_shapes_and_types[0].first = "[?]";
INFER_ERROR("must be at least rank 2 but is rank 1", op, "[];?");
a_shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[];?", "[?,?]");
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];?", "[?,?,?]");
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,?]", "[?,3,d1_2]");
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,4]", "[?,3,d1_2]");
a_shapes_and_types[0].first = "[?,?,6]";
INFER_OK(op, "[];[?,6,?]", "[?,?,d1_2]");
a_shapes_and_types[0].first = "[?,?,5]";
INFER_ERROR("must be equal, but are 5 and 6 for", op, "[];[?,6,?]");
set_options(false, false, false, false, true );
a_shapes_and_types[0].first = "[?,3,?]";
INFER_OK(op, "[];[?,?,4]", "[?,d1_2,3]");
a_shapes_and_types[0].first = "[3,?]";
INFER_OK(op, "[];[?,4]", "[d1_1,3]");
set_options(true, true,
false, false,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,?,d1_1]");
set_options(false, false,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,?,d1_1]");
set_options(true , true ,
false, false,
true );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[];[?,?,?]", "[?,d1_1,?]");
set_options(true, false,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("Only one of adjoint_a and transpose_a", op, "[];[?,?,?]");
set_options(false, true,
true, true,
false );
a_shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("Only one of adjoint_b and transpose_b", op, "[];[?,?,?]");
}
TEST(SparseMatrixOpsTest, SparseMatrixAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixAdd");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[3,4]", "[3,4]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[3,4,5]", "[3,4,5]");
INFER_OK(op, "[];[];?;?", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[];[];[]", "[]");
set_shapes("[?,?]", "[?,?]");
INFER_ERROR("must be rank 0 but is rank 1", op, "[];[];?;[?]");
set_shapes("[?,?,?]", "?");
INFER_ERROR("b has an unknown rank", op, "[];[];?;?");
set_shapes("[?,?,?]", "[?,?]");
INFER_ERROR("must be equal", op, "[];[];?;?");
}
TEST(SparseMatrixOpsTest, SparseMatrixSparseMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSparseMatMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
auto set_options = [&op](bool transpose_a, bool transpose_b, bool adjoint_a,
bool adjoint_b) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseMatrixMatMul")
.Input("a", 0, DT_VARIANT)
.Input("b", 1, DT_FLOAT)
.Attr("transpose_a", transpose_a)
.Attr("transpose_b", transpose_b)
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Finalize(&op.node_def));
};
set_options(false, false, false, false);
set_shapes("?", "?");
INFER_ERROR("has an unknown rank", op, "[];[]");
set_shapes("[?]", "[?,?]");
INFER_ERROR("must be at least rank 2 but is rank 1", op, "[];[]");
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,3,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,3,?]", "[?,?,4]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,6]", "[?,6,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?,5]", "[?,6,?]");
INFER_ERROR("must be equal, but are 5 and 6 for", op, "[];[]");
set_options(true, true, false,
false);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_options(false, false, true,
true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_options(true, false,
true, true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_ERROR("Only one of adjoint_a and transpose_a", op, "[];[]");
set_options(false, true,
true, true);
set_shapes("[?,?,?]", "[?,?,?]");
INFER_ERROR("Only one of adjoint_b and transpose_b", op, "[];[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixTranspose_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixTranspose");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[3,4,5]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "[3,4]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "?";
INFER_ERROR("input has an unknown rank", op, "[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixSoftmax_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSoftmax");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
shapes_and_types[0].first = "[?,?,?]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "[?,?]";
INFER_OK(op, "[]", "[]");
shapes_and_types[0].first = "?";
INFER_ERROR("logits has an unknown rank", op, "[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixSoftmaxGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixSoftmaxGrad");
std::vector<ShapeInferenceTestOp::ShapeAndType> a_shapes_and_types(1);
std::vector<ShapeInferenceTestOp::ShapeAndType> b_shapes_and_types(1);
a_shapes_and_types[0].second = DT_FLOAT;
b_shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&a_shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(&b_shapes_and_types);
auto set_shapes = [&a_shapes_and_types, &b_shapes_and_types](
const string& a_shape, const string& b_shape) {
a_shapes_and_types[0].first = a_shape;
b_shapes_and_types[0].first = b_shape;
};
set_shapes("[?,?,?]", "[?,?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[?,?]", "[?,?]");
INFER_OK(op, "[];[]", "[]");
set_shapes("[3,4]", "[5,6]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 3 and 5", op,
"[];[]");
set_shapes("?", "[?,?]");
INFER_ERROR("softmax has an unknown rank", op, "[];[]");
set_shapes("[?,?,?]", "?");
INFER_ERROR("grad_softmax has an unknown rank", op, "[];[]");
}
TEST(SparseMatrixOpsTest, SparseMatrixMul_ShapeFn) {
ShapeInferenceTestOp op("SparseMatrixMul");
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types(1);
shapes_and_types[0].second = DT_FLOAT;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types[0].first = "[3,4]";
INFER_OK(op, "[];[]", "[]");
shapes_and_types[0].first = "[5,3,4]";
INFER_OK(op, "[];[?,1,1]", "[]");
shapes_and_types[0].first = "[?,?,?]";
INFER_ERROR("b must be a scalar or shaped [batch_size, 1, 1]", op,
"[];[3,4]");
shapes_and_types[0].first = "[3,4]";
INFER_ERROR("b must be a scalar or shaped", op, "[];[3,4]");
shapes_and_types[0].first = "[3,4,5]";
INFER_ERROR("b must be a scalar or shaped", op, "[];[3,4,5]");
shapes_and_types[0].first = "[3,4,5]";
INFER_ERROR("must be equal, but are 3 and 4", op, "[];[4,1,1]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_csr_matrix_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_csr_matrix_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5de08bc7-3a08-43ab-ba5d-304198e5e664 | cpp | tensorflow/tensorflow | rnn_ops | tensorflow/core/ops/rnn_ops.cc | tensorflow/core/ops/rnn_ops_test.cc | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("GRUBlockCell")
.Attr("T: {float}")
.Input("x: T")
.Input("h_prev: T")
.Input("w_ru: T")
.Input("w_c: T")
.Input("b_ru: T")
.Input("b_c: T")
.Output("r: T")
.Output("u: T")
.Output("c: T")
.Output("h: T")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, h_prev;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &h_prev));
DimensionHandle batch_size = c->Dim(x, 0);
DimensionHandle cell_size = c->Dim(h_prev, 1);
ShapeHandle output = c->Matrix(batch_size, cell_size);
for (int i = 0; i < 4; ++i) {
c->set_output(i, output);
}
return absl::OkStatus();
});
REGISTER_OP("GRUBlockCellGrad")
.Attr("T: {float}")
.Input("x: T")
.Input("h_prev: T")
.Input("w_ru: T")
.Input("w_c: T")
.Input("b_ru: T")
.Input("b_c: T")
.Input("r: T")
.Input("u: T")
.Input("c: T")
.Input("d_h: T")
.Output("d_x: T")
.Output("d_h_prev: T")
.Output("d_c_bar: T")
.Output("d_r_bar_u_bar: T")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, h_prev, w_ru;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &h_prev));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &w_ru));
DimensionHandle batch_size = c->Dim(x, 0);
DimensionHandle cell_size = c->Dim(h_prev, 1);
DimensionHandle twice_cell_size = c->Dim(w_ru, 1);
ShapeHandle batch_cell_shape = c->Matrix(batch_size, cell_size);
c->set_output(0, x);
c->set_output(1, batch_cell_shape);
c->set_output(2, batch_cell_shape);
c->set_output(3, c->Matrix(batch_size, twice_cell_size));
return absl::OkStatus();
});
REGISTER_OP("LSTMBlockCell")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Output("i: T")
.Output("cs: T")
.Output("f: T")
.Output("o: T")
.Output("ci: T")
.Output("co: T")
.Output("h: T")
.Attr("forget_bias: float = 1.0")
.Attr("cell_clip: float = 3.0")
.Attr("use_peephole: bool = false")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, cs_prev;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &cs_prev));
DimensionHandle batch_size = c->Dim(x, 0);
DimensionHandle cell_size = c->Dim(cs_prev, 1);
ShapeHandle output = c->Matrix(batch_size, cell_size);
for (int i = 0; i < 7; ++i) {
c->set_output(i, output);
}
return absl::OkStatus();
});
REGISTER_OP("LSTMBlockCellGrad")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Input("i: T")
.Input("cs: T")
.Input("f: T")
.Input("o: T")
.Input("ci: T")
.Input("co: T")
.Input("cs_grad: T")
.Input("h_grad: T")
.Output("cs_prev_grad: T")
.Output("dicfo: T")
.Output("wci_grad: T")
.Output("wcf_grad: T")
.Output("wco_grad: T")
.Attr("use_peephole: bool")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, cs_prev;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &cs_prev));
DimensionHandle batch_size = c->Dim(x, 0);
DimensionHandle cell_size = c->Dim(cs_prev, 1);
DimensionHandle cell_size_times_4;
TF_RETURN_IF_ERROR(c->Multiply(cell_size, 4, &cell_size_times_4));
ShapeHandle cell_size_vec = c->Vector(cell_size);
c->set_output(0, c->Matrix(batch_size, cell_size));
c->set_output(1, c->Matrix(batch_size, cell_size_times_4));
c->set_output(2, cell_size_vec);
c->set_output(3, cell_size_vec);
c->set_output(4, cell_size_vec);
return absl::OkStatus();
});
REGISTER_OP("BlockLSTM")
.Input("seq_len_max: int64")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Output("i: T")
.Output("cs: T")
.Output("f: T")
.Output("o: T")
.Output("ci: T")
.Output("co: T")
.Output("h: T")
.Attr("forget_bias: float = 1.0")
.Attr("cell_clip: float = 3.0")
.Attr("use_peephole: bool = false")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 1), 1, &b));
DimensionHandle timelen = c->Dim(x, 0);
DimensionHandle batch_size = c->Dim(x, 1);
DimensionHandle cell_size;
TF_RETURN_IF_ERROR(
c->Divide(c->Dim(b, 0), 4, true , &cell_size));
DCHECK_EQ(7, c->num_outputs());
ShapeHandle output = c->MakeShape({timelen, batch_size, cell_size});
for (int i = 0; i < 7; ++i) {
c->set_output(i, output);
}
return absl::OkStatus();
});
REGISTER_OP("BlockLSTMV2")
.Input("seq_len_max: int64")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Output("i: T")
.Output("cs: T")
.Output("f: T")
.Output("o: T")
.Output("ci: T")
.Output("co: T")
.Output("h: T")
.Attr("cell_clip: float = 0.0")
.Attr("use_peephole: bool = false")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(c->num_inputs() - 1), 1, &b));
DimensionHandle timelen = c->Dim(x, 0);
DimensionHandle batch_size = c->Dim(x, 1);
DimensionHandle cell_size;
TF_RETURN_IF_ERROR(
c->Divide(c->Dim(b, 0), 4, true , &cell_size));
DCHECK_EQ(7, c->num_outputs());
ShapeHandle output = c->MakeShape({timelen, batch_size, cell_size});
for (int i = 0; i < 7; ++i) {
c->set_output(i, output);
}
return absl::OkStatus();
});
REGISTER_OP("BlockLSTMGrad")
.Input("seq_len_max: int64")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Input("i: T")
.Input("cs: T")
.Input("f: T")
.Input("o: T")
.Input("ci: T")
.Input("co: T")
.Input("h: T")
.Input("cs_grad: T")
.Input("h_grad: T")
.Output("x_grad: T")
.Output("cs_prev_grad: T")
.Output("h_prev_grad: T")
.Output("w_grad: T")
.Output("wci_grad: T")
.Output("wcf_grad: T")
.Output("wco_grad: T")
.Output("b_grad: T")
.Attr("use_peephole: bool")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, cs_prev, h_prev, w, wci, wco, wcf, b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &cs_prev));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &h_prev));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 2, &w));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 1, &wci));
TF_RETURN_IF_ERROR(c->WithRank(c->input(6), 1, &wco));
TF_RETURN_IF_ERROR(c->WithRank(c->input(7), 1, &wcf));
TF_RETURN_IF_ERROR(c->WithRank(c->input(8), 1, &b));
c->set_output(0, x);
c->set_output(1, cs_prev);
c->set_output(2, h_prev);
c->set_output(3, w);
c->set_output(4, wci);
c->set_output(5, wco);
c->set_output(6, wcf);
c->set_output(7, b);
return absl::OkStatus();
});
REGISTER_OP("BlockLSTMGradV2")
.Input("seq_len_max: int64")
.Input("x: T")
.Input("cs_prev: T")
.Input("h_prev: T")
.Input("w: T")
.Input("wci: T")
.Input("wcf: T")
.Input("wco: T")
.Input("b: T")
.Input("i: T")
.Input("cs: T")
.Input("f: T")
.Input("o: T")
.Input("ci: T")
.Input("co: T")
.Input("h: T")
.Input("cs_grad: T")
.Input("h_grad: T")
.Output("x_grad: T")
.Output("cs_prev_grad: T")
.Output("h_prev_grad: T")
.Output("w_grad: T")
.Output("wci_grad: T")
.Output("wcf_grad: T")
.Output("wco_grad: T")
.Output("b_grad: T")
.Attr("use_peephole: bool")
.Attr("T: {half, float}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle x, cs_prev, h_prev, w, wci, wco, wcf, b;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 3, &x));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &cs_prev));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &h_prev));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 2, &w));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 1, &wci));
TF_RETURN_IF_ERROR(c->WithRank(c->input(6), 1, &wco));
TF_RETURN_IF_ERROR(c->WithRank(c->input(7), 1, &wcf));
TF_RETURN_IF_ERROR(c->WithRank(c->input(8), 1, &b));
c->set_output(0, x);
c->set_output(1, cs_prev);
c->set_output(2, h_prev);
c->set_output(3, w);
c->set_output(4, wci);
c->set_output(5, wco);
c->set_output(6, wcf);
c->set_output(7, b);
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
static string JoinedCopies(const string& s, int copies) {
string res;
for (int i = 0; i < copies; ++i) {
strings::StrAppend(&res, i > 0 ? ";" : "", s);
}
return res;
}
TEST(RnnOpsTest, GRUBlockCell_ShapeFn) {
ShapeInferenceTestOp op("GRUBlockCell");
INFER_ERROR("must be rank 2", op, "[?];?;?;?;?;?");
INFER_ERROR("must be rank 2", op, "?;[?];?;?;?;?");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?,?];[?,?];[?,?]");
INFER_OK(op, "[?,?];[?,?];?;?;?;?",
"[d0_0,d1_1];[d0_0,d1_1];[d0_0,d1_1];[d0_0,d1_1]");
}
TEST(RnnOpsTest, GRUBlockCellGrad_ShapeFn) {
ShapeInferenceTestOp op("GRUBlockCellGrad");
INFER_ERROR("must be rank 2", op, "[?];?;?;?;?;?;?;?;?;?");
INFER_ERROR("must be rank 2", op, "?;[?];?;?;?;?;?;?;?;?");
INFER_ERROR("must be rank 2", op, "?;?;[?];?;?;?;?;?;?;?");
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?", "[?,?];[?,?];[?,?];[?,?]");
INFER_OK(op, "[?,?];[?,?];[?,?];?;?;?;?;?;?;?",
"in0;[d0_0,d1_1];[d0_0,d1_1];[d0_0,d2_1]");
}
TEST(RnnOpsTest, LSTMBlockCell_ShapeFn) {
ShapeInferenceTestOp op("LSTMBlockCell");
string input_suffix = strings::StrCat(";", JoinedCopies("?", 6));
INFER_ERROR("must be rank 2", op, "[?];?" + input_suffix);
INFER_ERROR("must be rank 2", op, "?;[?]" + input_suffix);
INFER_OK(op, "?;?" + input_suffix, JoinedCopies("[?,?]", 7));
INFER_OK(op, "[?,?];[?,?]" + input_suffix, JoinedCopies("[d0_0,d1_1]", 7));
}
TEST(RnnOpsTest, LSTMBlockCellGrad_ShapeFn) {
ShapeInferenceTestOp op("LSTMBlockCellGrad");
string input_suffix = strings::StrCat(";", JoinedCopies("?", 14));
INFER_ERROR("must be rank 2", op, "[?];?" + input_suffix);
INFER_ERROR("must be rank 2", op, "?;[?]" + input_suffix);
INFER_OK(op, "?;?" + input_suffix, "[?,?];[?,?];[?];[?];[?]");
INFER_OK(op, "[?,?];[?,?]" + input_suffix,
"[d0_0,d1_1];[d0_0,?];[d1_1];[d1_1];[d1_1]");
INFER_OK(op, "[1,2];[3,4]" + input_suffix,
"[d0_0,d1_1];[d0_0,16];[d1_1];[d1_1];[d1_1]");
}
TEST(RnnOpsTest, BlockLSTM_ShapeFn) {
ShapeInferenceTestOp op("BlockLSTM");
TF_ASSERT_OK(NodeDefBuilder("test", "BlockLSTM")
.Input({"seq_len_max", 0, DT_INT64})
.Input({"x", 0, DT_FLOAT})
.Input({"cs_prev", 0, DT_FLOAT})
.Input({"h_prev", 0, DT_FLOAT})
.Input({"w", 0, DT_FLOAT})
.Input({"wci", 0, DT_FLOAT})
.Input({"wcf", 0, DT_FLOAT})
.Input({"wco", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Finalize(&op.node_def));
string infix = ";" + JoinedCopies("?", 6) + ";";
INFER_ERROR("must be rank 3", op, "?;[?]" + infix + "?");
INFER_ERROR("must be rank 1", op, "?;?" + infix + "[?,?]");
INFER_OK(op, "?;?" + infix + "?", JoinedCopies("[?,?,?]", 7));
INFER_OK(op, "?;[?,?,?]" + infix + "?", JoinedCopies("[d1_0,d1_1,?]", 7));
INFER_OK(op, "?;[?,?,?]" + infix + "[?]", JoinedCopies("[d1_0,d1_1,?]", 7));
INFER_OK(op, "?;[?,?,?]" + infix + "[20]", JoinedCopies("[d1_0,d1_1,5]", 7));
INFER_ERROR("must be evenly divisible", op, "?;?" + infix + "[11]");
}
TEST(RnnOpsTest, BlockLSTMGrad_ShapeFn) {
ShapeInferenceTestOp op("BlockLSTMGrad");
TF_ASSERT_OK(NodeDefBuilder("test", "BlockLSTMGrad")
.Input({"seq_len_max", 0, DT_INT64})
.Input({"x", 0, DT_FLOAT})
.Input({"cs_prev", 0, DT_FLOAT})
.Input({"h_prev", 0, DT_FLOAT})
.Input({"w", 0, DT_FLOAT})
.Input({"wci", 0, DT_FLOAT})
.Input({"wcf", 0, DT_FLOAT})
.Input({"wco", 0, DT_FLOAT})
.Input({"b", 0, DT_FLOAT})
.Input({"i", 0, DT_FLOAT})
.Input({"cs", 0, DT_FLOAT})
.Input({"f", 0, DT_FLOAT})
.Input({"o", 0, DT_FLOAT})
.Input({"ci", 0, DT_FLOAT})
.Input({"co", 0, DT_FLOAT})
.Input({"h", 0, DT_FLOAT})
.Input({"cs_grad", 0, DT_FLOAT})
.Input({"h_grad", 0, DT_FLOAT})
.Finalize(&op.node_def));
string suffix = ";" + JoinedCopies("?", 9);
INFER_ERROR("must be rank 3", op, "?;[?];?;?;?;?;?;?;?" + suffix);
INFER_ERROR("must be rank 2", op, "?;?;[1];?;?;?;?;?;?" + suffix);
INFER_ERROR("must be rank 2", op, "?;?;?;[1];?;?;?;?;?" + suffix);
INFER_ERROR("must be rank 2", op, "?;?;?;?;[1];?;?;?;?" + suffix);
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;[1,?];?;?;?" + suffix);
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;?;[1,?];?;?" + suffix);
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;?;?;[1,?];?" + suffix);
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;?;?;?;[1,?]" + suffix);
INFER_OK(
op, JoinedCopies("?", 18),
"[?,?,?];" + JoinedCopies("[?,?]", 3) + ";" + JoinedCopies("[?]", 4));
string input = strings::StrCat("?;[?,?,?];", JoinedCopies("[?,?]", 3), ";",
JoinedCopies("[?]", 4), suffix);
string expected = "in1";
for (int i = 1; i < 8; ++i) {
strings::StrAppend(&expected, ";in", (i + 1));
}
INFER_OK(op, input, expected);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/rnn_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/rnn_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
669e6ad1-db68-4efd-9487-266bfd0a1db6 | cpp | tensorflow/tensorflow | state_ops | tensorflow/core/ops/state_ops.cc | tensorflow/core/ops/state_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("VariableV2")
.Output("ref: Ref(dtype)")
.Attr("shape: shape")
.Attr("dtype: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ExplicitShape);
REGISTER_OP("Variable")
.Output("ref: Ref(dtype)")
.Attr("shape: shape")
.Attr("dtype: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
if (shape.dims() <= 0) {
return shape_inference::UnknownShape(c);
}
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("IsVariableInitialized")
.Input("ref: Ref(dtype)")
.Output("is_initialized: bool")
.Attr("dtype: type")
.SetAllowsUninitializedInput()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TemporaryVariable")
.Output("ref: Ref(dtype)")
.Attr("shape: shape")
.Attr("dtype: type")
.Attr("var_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ExplicitShape);
REGISTER_OP("DestroyTemporaryVariable")
.Input("ref: Ref(T)")
.Output("value: T")
.Attr("T: type")
.Attr("var_name: string")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Assign")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("validate_shape: bool = true")
.Attr("use_locking: bool = true")
.SetAllowsUninitializedInput()
.SetShapeFn([](InferenceContext* c) {
bool validate_shape;
TF_RETURN_IF_ERROR(c->GetAttr("validate_shape", &validate_shape));
if (validate_shape) {
return shape_inference::MergeBothInputsShapeFn(c);
}
c->set_output(0, c->input(1));
return absl::OkStatus();
});
REGISTER_OP("AssignAdd")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
REGISTER_OP("AssignSub")
.Input("ref: Ref(T)")
.Input("value: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("use_locking: bool = false")
.SetShapeFn(shape_inference::MergeBothInputsShapeFn);
namespace {
Status ScatterUpdateShape(InferenceContext* c) {
ShapeHandle var_shape = c->input(0);
ShapeHandle indices_shape = c->input(1);
ShapeHandle unused_updates_shape;
ShapeHandle concat;
ShapeHandle var_subshape;
TF_RETURN_IF_ERROR(c->Subshape(var_shape, 1, &var_subshape));
TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, var_subshape, &concat));
TF_RETURN_IF_ERROR(
InferenceContext::Rank(c->input(2)) == 0
? absl::OkStatus()
: c->Merge(c->input(2), concat, &unused_updates_shape));
c->set_output(0, var_shape);
return absl::OkStatus();
}
Status ScatterNdUpdateShape(InferenceContext* c) {
ShapeHandle input_shape = c->input(0);
if (c->input_handle_shapes_and_types(0) != nullptr) {
const auto& shape_and_type = *(c->input_handle_shapes_and_types(0));
if (!shape_and_type.empty()) {
input_shape = shape_and_type[0].shape;
}
}
ShapeHandle indices_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices_shape));
ShapeHandle updates_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &updates_shape));
return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape,
input_shape);
}
}
REGISTER_OP("ScatterUpdate")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterAdd")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterSub")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterMul")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterDiv")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterMin")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: {half, bfloat16, float, double, int32, int64}")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterMax")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: {half, bfloat16, float, double, int32, int64}")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.SetShapeFn(ScatterUpdateShape);
REGISTER_OP("ScatterNdUpdate")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ResourceScatterNdUpdate")
.Input("ref: resource")
.Input("indices: Tindices")
.Input("updates: T")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ResourceScatterNdAdd")
.Input("ref: resource")
.Input("indices: Tindices")
.Input("updates: T")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ResourceScatterNdSub")
.Input("ref: resource")
.Input("indices: Tindices")
.Input("updates: T")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ResourceScatterNdMin")
.Input("ref: resource")
.Input("indices: Tindices")
.Input("updates: T")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ResourceScatterNdMax")
.Input("ref: resource")
.Input("indices: Tindices")
.Input("updates: T")
.Attr("T: type")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = true")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ScatterNdAdd")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ScatterNdSub")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ScatterNdMax")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("ScatterNdMin")
.Input("ref: Ref(T)")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output_ref: Ref(T)")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.Attr("use_locking: bool = false")
.Attr("bad_indices_policy: string = ''")
.SetShapeFn(ScatterNdUpdateShape);
REGISTER_OP("CountUpTo")
.Input("ref: Ref(T)")
.Output("output: T")
.Attr("limit: int")
.Attr("T: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle output;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &output));
c->set_output(0, output);
return absl::OkStatus();
});
REGISTER_OP("ResourceCountUpTo")
.Input("resource: resource")
.Output("output: T")
.Attr("limit: int")
.Attr("T: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data == nullptr || handle_data->empty()) {
return errors::InvalidArgument("Handle has no shape/type information.");
}
shape_inference::ShapeAndType shape_and_type = (*handle_data)[0];
DataType value_dtype;
TF_RETURN_IF_ERROR(c->GetAttr("T", &value_dtype));
if (value_dtype != shape_and_type.dtype) {
return errors::InvalidArgument(
"Data types do not match: ", DataTypeString(value_dtype), " and ",
DataTypeString(shape_and_type.dtype));
}
ShapeHandle output;
TF_RETURN_IF_ERROR(c->WithRank(shape_and_type.shape, 0, &output));
c->set_output(0, output);
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(StateOpsTest, Assign_ShapeFn) {
ShapeInferenceTestOp op("Assign");
TF_ASSERT_OK(NodeDefBuilder("test", "Assign")
.Input("ref", 0, DT_FLOAT_REF)
.Input("value", 1, DT_FLOAT)
.Attr("validate_shape", true)
.Finalize(&op.node_def));
INFER_OK(op, "[1,2];[1,2]", "in0");
INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]");
INFER_ERROR("Dimension 0 in both shapes must be equal, but are 1 and 3", op,
"[1,?];[3,2]");
TF_ASSERT_OK(NodeDefBuilder("test", "Assign")
.Input("ref", 0, DT_FLOAT_REF)
.Input("value", 1, DT_FLOAT)
.Attr("validate_shape", false)
.Finalize(&op.node_def));
INFER_OK(op, "[1,2];[1,2,3,4]", "in1");
}
TEST(StateOpsTest, ScatterUpdate_ShapeFn) {
ShapeInferenceTestOp op("ScatterUpdate");
TF_ASSERT_OK(NodeDefBuilder("test", "ScatterUpdate")
.Input("ref", 0, DT_FLOAT_REF)
.Input("indices", 0, DT_INT32)
.Input("updates", 1, DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "[1,2];[3];[3,2]", "in0");
INFER_OK(op, "[1,2];[3];[?,2]", "in0");
INFER_OK(op, "[1,2];[3];?", "in0");
INFER_OK(op, "[1,2];[];[2]", "in0");
INFER_ERROR("Shapes must be equal rank, but are 1 and 0", op, "[2];[];[2]");
}
TEST(StateOpsTest, ResourceScatterNdUpdate_ShapeFn) {
ShapeInferenceTestOp op("ResourceScatterNdUpdate");
TF_ASSERT_OK(NodeDefBuilder("test", "ResourceScatterNdUpdate")
.Input("ref", 0, DT_RESOURCE)
.Input("indices", 0, DT_INT32)
.Input("updates", 1, DT_FLOAT)
.Finalize(&op.node_def));
std::vector<ShapeInferenceTestOp::ShapeAndType> shapes_and_types;
op.input_resource_handle_shapes_and_types.push_back(&shapes_and_types);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
op.input_resource_handle_shapes_and_types.push_back(nullptr);
shapes_and_types.emplace_back("[?,?]", DT_FLOAT);
INFER_OK(op, "[?];[?,2];[?]", "");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[?];[?,2];[]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[?];[8,2];[9]");
}
TEST(StateOpsTest, TemporaryVariable_ShapeFn) {
ShapeInferenceTestOp op("TemporaryVariable");
TensorShape shape({1, 2, 3});
TF_ASSERT_OK(NodeDefBuilder("test", "TemporaryVariable")
.Attr("shape", shape)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
}
TEST(StateOpsTest, Variable_ShapeFn) {
ShapeInferenceTestOp op("Variable");
TF_ASSERT_OK(NodeDefBuilder("test", "Variable")
.Attr("shape", PartialTensorShape())
.Finalize(&op.node_def));
INFER_OK(op, "", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "Variable")
.Attr("shape", TensorShape({}))
.Finalize(&op.node_def));
INFER_OK(op, "", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "Variable")
.Attr("shape", TensorShape({1, 2, 3}))
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
}
TEST(StateOpsTest, VariableV2_ShapeFn) {
ShapeInferenceTestOp op("VariableV2");
TF_ASSERT_OK(NodeDefBuilder("test", "VariableV2")
.Attr("shape", PartialTensorShape())
.Finalize(&op.node_def));
INFER_OK(op, "", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "VariableV2")
.Attr("shape", TensorShape({}))
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
TF_ASSERT_OK(NodeDefBuilder("test", "VariableV2")
.Attr("shape", TensorShape({1, 2, 3}))
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/state_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/state_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c181e2d3-689a-486d-aee3-8f9ceee8eb9e | cpp | tensorflow/tensorflow | parsing_ops | tensorflow/core/ops/parsing_ops.cc | tensorflow/core/ops/parsing_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/example_proto_helper.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
template <typename TensorShapeType>
Status AddDenseOutputShapes(const std::vector<TensorShapeType>& dense_shapes,
const ShapeHandle& prefix, InferenceContext* c,
int* output_idx) {
for (const auto& dense_shape : dense_shapes) {
ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(dense_shape, &s));
TF_RETURN_IF_ERROR(c->Concatenate(prefix, s, &s));
c->set_output((*output_idx)++, s);
}
return absl::OkStatus();
}
void AddSparseOutputShapes(int num_sparse, const ShapeHandle input_shape,
int64_t rank_delta, InferenceContext* c,
int* output_idx) {
shape_inference::DimensionOrConstant rank(c->UnknownDim());
if (c->RankKnown(input_shape)) {
rank = c->Rank(input_shape) + rank_delta;
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Matrix(c->UnknownDim(), rank));
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
for (int i = 0; i < num_sparse; ++i) {
c->set_output((*output_idx)++, c->Vector(rank));
}
}
Status AddRaggedOutputShapes(int num_ragged, bool ragged_rank_2,
const DimensionHandle& num_examples,
InferenceContext* c, int* output_idx) {
DimensionHandle num_splits;
TF_RETURN_IF_ERROR(c->Add(num_examples, 1, &num_splits));
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(num_splits));
}
if (ragged_rank_2) {
for (int i = 0; i < num_ragged; ++i) {
c->set_output((*output_idx)++, c->Vector(c->UnknownDim()));
}
}
return absl::OkStatus();
}
void AddDenseLengthsShapes(int num_dense, const ShapeHandle& shape,
InferenceContext* c, int* output_idx) {
for (int i = 0; i < num_dense; ++i) {
c->set_output((*output_idx)++, shape);
}
}
}
REGISTER_OP("DecodeRaw")
.Input("bytes: string")
.Output("output: out_type")
.Attr(
"out_type: "
"{half,float,double,int32,uint16,uint8,int16,int8,int64,complex64,"
"complex128,bool,bfloat16}")
.Attr("little_endian: bool = true")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(
c->input(0), c->Vector(InferenceContext::kUnknownDim), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("DecodePaddedRaw")
.Input("input_bytes: string")
.Input("fixed_length: int32")
.Output("output: out_type")
.Attr(
"out_type: {half,float,double,int32,uint16,uint8,int16,int8,int64,"
"bfloat16}")
.Attr("little_endian: bool = true")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle fixed_length;
TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(1, &fixed_length));
DataType out_type;
TF_RETURN_IF_ERROR(c->GetAttr("out_type", &out_type));
int32_t data_type_size = DataTypeSize(out_type);
DimensionHandle width;
TF_RETURN_IF_ERROR(c->Divide(fixed_length, data_type_size, true, &width));
ShapeHandle out;
TF_RETURN_IF_ERROR(c->Concatenate(c->input(0), c->Vector(width), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("DecodeCompressed")
.Input("bytes: string")
.Output("output: string")
.Attr("compression_type: string = ''")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("ParseExample")
.Input("serialized: string")
.Input("names: string")
.Input("sparse_keys: Nsparse * string")
.Input("dense_keys: Ndense * string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: Nsparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: Nsparse * int64")
.Output("dense_values: Tdense")
.Attr("Nsparse: int >= 0")
.Attr("Ndense: int >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 1));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &names));
int output_idx = 0;
AddSparseOutputShapes(attrs.num_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseExampleV2")
.Input("serialized: string")
.Input("names: string")
.Input("sparse_keys: string")
.Input("dense_keys: string")
.Input("ragged_keys: string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: num_sparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: num_sparse * int64")
.Output("dense_values: Tdense")
.Output("ragged_values: ragged_value_types")
.Output("ragged_row_splits: ragged_split_types")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("num_sparse: int >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("ragged_value_types: list({float,int64,string}) >= 0")
.Attr("ragged_split_types: list({int32,int64}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 2));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &names));
DimensionHandle num_examples = c->UnknownDim();
if (c->RankKnown(input) && c->Rank(input) == 1) {
num_examples = c->Dim(input, 0);
}
int output_idx = 0;
AddSparseOutputShapes(attrs.num_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(attrs.num_ragged, false,
num_examples, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSingleExample")
.Input("serialized: string")
.Input("dense_defaults: Tdense")
.Output("sparse_indices: num_sparse * int64")
.Output("sparse_values: sparse_types")
.Output("sparse_shapes: num_sparse * int64")
.Output("dense_values: Tdense")
.Attr("num_sparse: int >= 0")
.Attr("sparse_keys: list(string) >= 0")
.Attr("dense_keys: list(string) >= 0")
.Attr("sparse_types: list({float,int64,string}) >= 0")
.Attr("Tdense: list({float,int64,string}) >= 0")
.Attr("dense_shapes: list(shape) >= 0")
.SetShapeFn([](InferenceContext* c) {
ParseSingleExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &input));
int output_idx = 0;
AddSparseOutputShapes(attrs.sparse_keys.size(), input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(
AddDenseOutputShapes(attrs.dense_shapes, input, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSequenceExample")
.Input("serialized: string")
.Input("debug_name: string")
.Input("context_dense_defaults: Tcontext_dense")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Output("feature_list_dense_lengths: Nfeature_list_dense * int64")
.Attr("feature_list_dense_missing_assumed_empty: list(string) >= 0")
.Attr("context_sparse_keys: list(string) >= 0")
.Attr("context_dense_keys: list(string) >= 0")
.Attr("feature_list_sparse_keys: list(string) >= 0")
.Attr("feature_list_dense_keys: list(string) >= 0")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Ncontext_dense: int >= 0 = 0")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ParseSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &names));
DimensionHandle num_examples = c->Dim(input, 0);
ShapeHandle feature_list_dense_prefix =
c->Matrix(num_examples, c->UnknownDim());
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
feature_list_dense_prefix, c,
&output_idx));
AddDenseLengthsShapes(attrs.num_feature_list_dense, input, c,
&output_idx);
return absl::OkStatus();
});
REGISTER_OP("ParseSequenceExampleV2")
.Input("serialized: string")
.Input("debug_name: string")
.Input("context_sparse_keys: string")
.Input("context_dense_keys: string")
.Input("context_ragged_keys: string")
.Input("feature_list_sparse_keys: string")
.Input("feature_list_dense_keys: string")
.Input("feature_list_ragged_keys: string")
.Input("feature_list_dense_missing_assumed_empty: bool")
.Input("context_dense_defaults: Tcontext_dense")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("context_ragged_values: context_ragged_value_types")
.Output("context_ragged_row_splits: context_ragged_split_types")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Output("feature_list_dense_lengths: Nfeature_list_dense * int64")
.Output("feature_list_ragged_values: feature_list_ragged_value_types")
.Output("feature_list_ragged_outer_splits: feature_list_ragged_split_types")
.Output("feature_list_ragged_inner_splits: feature_list_ragged_split_types")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("context_ragged_value_types: list({float,int64,string}) >= 0 = []")
.Attr("context_ragged_split_types: list({int32,int64}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr(
"feature_list_ragged_value_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_ragged_split_types: list({int32,int64}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ParseSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c, 2));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &input));
ShapeHandle names;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(1), 1, &names));
ShapeHandle feature_list_dense_prefix;
TF_RETURN_IF_ERROR(c->Concatenate(input, c->UnknownShapeOfRank(1),
&feature_list_dense_prefix));
DimensionHandle num_examples = c->UnknownDim();
if (c->RankKnown(input) && c->Rank(input) == 1) {
num_examples = c->Dim(input, 0);
}
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(attrs.num_context_ragged, false,
num_examples, c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
feature_list_dense_prefix, c,
&output_idx));
AddDenseLengthsShapes(attrs.num_feature_list_dense, input, c,
&output_idx);
TF_RETURN_IF_ERROR(AddRaggedOutputShapes(
attrs.num_feature_list_ragged, true, num_examples, c, &output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseSingleSequenceExample")
.Input("serialized: string")
.Input("feature_list_dense_missing_assumed_empty: string")
.Input("context_sparse_keys: Ncontext_sparse * string")
.Input("context_dense_keys: Ncontext_dense * string")
.Input("feature_list_sparse_keys: Nfeature_list_sparse * string")
.Input("feature_list_dense_keys: Nfeature_list_dense * string")
.Input("context_dense_defaults: Tcontext_dense")
.Input("debug_name: string")
.Output("context_sparse_indices: Ncontext_sparse * int64")
.Output("context_sparse_values: context_sparse_types")
.Output("context_sparse_shapes: Ncontext_sparse * int64")
.Output("context_dense_values: Tcontext_dense")
.Output("feature_list_sparse_indices: Nfeature_list_sparse * int64")
.Output("feature_list_sparse_values: feature_list_sparse_types")
.Output("feature_list_sparse_shapes: Nfeature_list_sparse * int64")
.Output("feature_list_dense_values: feature_list_dense_types")
.Attr("Ncontext_sparse: int >= 0 = 0")
.Attr("Ncontext_dense: int >= 0 = 0")
.Attr("Nfeature_list_sparse: int >= 0 = 0")
.Attr("Nfeature_list_dense: int >= 0 = 0")
.Attr("context_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("Tcontext_dense: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_types: list({float,int64,string}) >= 0 = []")
.Attr("context_dense_shapes: list(shape) >= 0 = []")
.Attr("feature_list_sparse_types: list({float,int64,string}) >= 0 = []")
.Attr("feature_list_dense_shapes: list(shape) >= 0 = []")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ParseSingleSequenceExampleAttrs attrs;
TF_RETURN_IF_ERROR(attrs.Init(c));
ShapeHandle input;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &input));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
int output_idx = 0;
AddSparseOutputShapes(attrs.num_context_sparse, input, 1, c, &output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.context_dense_shapes, input,
c, &output_idx));
AddSparseOutputShapes(attrs.num_feature_list_sparse, input, 2, c,
&output_idx);
TF_RETURN_IF_ERROR(AddDenseOutputShapes(attrs.feature_list_dense_shapes,
c->UnknownShapeOfRank(1), c,
&output_idx));
return absl::OkStatus();
});
REGISTER_OP("ParseTensor")
.Input("serialized: string")
.Output("output: out_type")
.Attr("out_type: type")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("SerializeTensor")
.Input("tensor: T")
.Output("serialized: string")
.Attr("T: type")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("DecodeJSONExample")
.Input("json_examples: string")
.Output("binary_examples: string")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DecodeCSV")
.Input("records: string")
.Input("record_defaults: OUT_TYPE")
.Output("output: OUT_TYPE")
.Attr("OUT_TYPE: list({float,double,int32,int64,string})")
.Attr("field_delim: string = ','")
.Attr("use_quote_delim: bool = true")
.Attr("na_value: string = ''")
.Attr("select_cols: list(int) = []")
.SetShapeFn([](InferenceContext* c) {
for (int i = 1; i < c->num_inputs(); ++i) {
ShapeHandle v;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(i), 1, &v));
if (c->Rank(c->input(i)) == 1 && c->Value(c->Dim(v, 0)) > 1) {
return errors::InvalidArgument(
"Shape of a default must be a length-0 or length-1 vector, or a "
"scalar.");
}
}
for (int i = 0; i < c->num_outputs(); ++i) c->set_output(i, c->input(0));
return absl::OkStatus();
});
REGISTER_OP("StringToNumber")
.Input("string_tensor: string")
.Output("output: out_type")
.Attr("out_type: {float, double, int32, int64, uint32, uint64} = DT_FLOAT")
.SetShapeFn(shape_inference::UnchangedShape);
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ParsingOpsTest, DecodeRaw_ShapeFn) {
ShapeInferenceTestOp op("DecodeRaw");
INFER_OK(op, "?", "?");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1,d0_2,?]");
}
TEST(ParsingOpsTest, DecodeCSV_ShapeFn) {
ShapeInferenceTestOp op("DecodeCSV");
auto set_n_outputs = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
std::vector<DataType> out_types;
for (int i = 0; i < n; ++i) {
src_list.emplace_back("b", 0, DT_FLOAT);
out_types.push_back(DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeCSV")
.Input("a", 0, DT_STRING)
.Input(src_list)
.Attr("OUT_TYPE", out_types)
.Finalize(&op.node_def));
};
set_n_outputs(2);
INFER_OK(op, "?;?;?", "in0;in0");
INFER_OK(op, "[1,2,?,4];?;?", "in0;in0");
INFER_OK(op, "[1,2,?,4];[?];[?]", "in0;in0");
INFER_OK(op, "?;?;[]", "in0;in0");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "?;?;[1,2]");
INFER_ERROR("must be at most rank 1 but is rank 2", op, "?;[3,4];?");
INFER_ERROR("Shape of a default must be", op, "?;?;[2]");
INFER_ERROR("Shape of a default must be", op, "?;[2];?");
}
static std::vector<PartialTensorShape> MakeDenseShapes(int size,
bool add_extra_shape,
int unknown_outer_dims) {
std::vector<PartialTensorShape> shapes(size);
for (int i = 0; i < size; ++i) {
if (i == 0) {
shapes[i].Clear();
for (int d = 0; d < unknown_outer_dims; ++d) {
shapes[i].AddDim(-1);
}
} else {
shapes[i] = shapes[i - 1];
}
shapes[i].AddDim(i + 1);
}
if (add_extra_shape) shapes.push_back(PartialTensorShape({}));
return shapes;
}
TEST(ParsingOpsTest, ParseExample_ShapeFn) {
ShapeInferenceTestOp op("ParseExample");
auto set_outputs = [&op](int num_sparse, int num_dense,
bool add_extra_shape = false,
int unknown_outer_dims = 0) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseExample")
.Input("serialized", 0, DT_STRING)
.Input("names", 0, DT_STRING)
.Input(NodeOutList(num_sparse, string_in))
.Input(NodeOutList(num_dense, string_in))
.Input(NodeOutList(num_dense, string_in))
.Attr("sparse_types", DataTypeList(num_sparse, DT_FLOAT))
.Attr("dense_shapes", MakeDenseShapes(num_dense, add_extra_shape,
unknown_outer_dims))
.Finalize(&op.node_def));
};
set_outputs(0 , 0 );
INFER_OK(op, "?;?", "");
INFER_OK(op, "[10];[20]", "");
INFER_ERROR("must be rank 1", op, "[1,2];?");
INFER_ERROR("must be rank 1", op, "?;[2,3]");
set_outputs(2 , 3 );
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3]"));
set_outputs(2, 3, true );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op,
"?;?;?;?;?;?;?;?;?;?");
set_outputs(2, 3, false , 1 );
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,?,1];[?,?,1,2];[?,?,1,2,3]"));
INFER_OK(op, "[?];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
set_outputs(2, 3, true , 1 );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op,
"?;?;?;?;?;?;?;?;?;?");
set_outputs(2, 3, false , 2 );
INFER_ERROR("shapes[0] has unknown rank or unknown inner dimensions", op,
"?;?;?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseSequenceExample_ShapeFn) {
ShapeInferenceTestOp op("ParseSequenceExample");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_feature_list_sparse,
int num_feature_list_dense,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
string string_in("test");
NodeDefBuilder::NodeOut node_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSequenceExample")
.Input("serialized", 0, DT_STRING)
.Input("debug_name", 0, DT_STRING)
.Input(NodeOutList(num_context_dense, node_in))
.Attr("Ncontext_sparse", num_context_sparse)
.Attr("Ncontext_dense", num_context_dense)
.Attr("Nfeature_list_sparse", num_feature_list_sparse)
.Attr("Nfeature_list_dense", num_feature_list_dense)
.Attr("feature_list_dense_missing_assumed_empty",
std::vector<string>(num_feature_list_dense, string_in))
.Attr("context_sparse_keys",
std::vector<string>(num_context_sparse, string_in))
.Attr("context_dense_keys",
std::vector<string>(num_context_dense, string_in))
.Attr("feature_list_sparse_keys",
std::vector<string>(num_feature_list_sparse, string_in))
.Attr("feature_list_dense_keys",
std::vector<string>(num_feature_list_dense, string_in))
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0);
INFER_OK(op, "[?];[?]", "");
INFER_OK(op, "[8];[8]", "");
INFER_ERROR("must be rank 1", op, "[];[?]");
INFER_ERROR("must be rank 1", op, "[?];[]");
set_outputs(2 , 3 , 0, 0);
INFER_OK(op, "[?];[?];?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3]"));
set_outputs(0, 0, 2 ,
3 );
INFER_OK(op, "[?];[?]",
("[?,3];[?,3];[?];[?];[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3];"
"in0;in0;in0"));
set_outputs(2, 3, 2, 3);
INFER_OK(op, "[7];[7];?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?,3];[?,3];[?];[?];[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3];"
"in0;in0;in0"));
set_outputs(1, 1, 1, 1, true );
INFER_ERROR(
"num_context_dense (1) must match the size of "
"context_dense_types (1) and context_dense_shapes (2)",
op, "[?];[?];?");
}
TEST(ParsingOpsTest, ParseSingleSequenceExample_ShapeFn) {
ShapeInferenceTestOp op("ParseSingleSequenceExample");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_feature_list_sparse,
int num_feature_list_dense,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSingleSequenceExample")
.Input("serialized", 0, DT_STRING)
.Input("feature_list_dense_missing_assumed_empty", 0, DT_STRING)
.Input(NodeOutList(num_context_sparse, string_in))
.Input(NodeOutList(num_context_dense, string_in))
.Input(NodeOutList(num_feature_list_sparse, string_in))
.Input(NodeOutList(num_feature_list_dense, string_in))
.Input(NodeOutList(num_context_dense, string_in))
.Input("debug_name", 0, DT_STRING)
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0);
INFER_OK(op, "?;?;?", "");
INFER_OK(op, "[];[20];?", "");
INFER_ERROR("must be rank 0", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[2,3];?");
set_outputs(2 , 3 , 0, 0);
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];[?];[?];[1];[1];"
"[1];[1,2];[1,2,3]"));
set_outputs(0, 0, 2 ,
3 );
INFER_OK(op, "?;?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
set_outputs(2, 3, 2, 3);
INFER_OK(op, "?;?;?;?;?;?;?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];[?];[?];[1];[1];"
"[1];[1,2];[1,2,3];"
"[?,2];[?,2];[?];[?];[2];[2];"
"[?,1];[?,1,2];[?,1,2,3]"));
set_outputs(1, 1, 1, 1, true );
INFER_ERROR("len(context_dense_keys) != len(context_dense_shapes)", op,
"?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseExampleV2_ShapeFn) {
ShapeInferenceTestOp op("ParseExampleV2");
auto set_outputs = [&op](int num_sparse, int num_dense, int num_ragged,
bool add_extra_shape = false,
int unknown_outer_dims = 0) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
NodeDefBuilder::NodeOut string_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseExampleV2")
.Input("serialized", 0, DT_STRING)
.Input("names", 0, DT_STRING)
.Input("sparse_keys", 0, DT_STRING)
.Input("dense_keys", 0, DT_STRING)
.Input("ragged_keys", 0, DT_STRING)
.Input(NodeOutList(num_dense, string_in))
.Attr("num_sparse", num_sparse)
.Attr("sparse_types", DataTypeList(num_sparse, DT_FLOAT))
.Attr("ragged_value_types", DataTypeList(num_ragged, DT_FLOAT))
.Attr("ragged_split_types", DataTypeList(num_ragged, DT_INT32))
.Attr("dense_shapes", MakeDenseShapes(num_dense, add_extra_shape,
unknown_outer_dims))
.Finalize(&op.node_def));
};
set_outputs(0 , 0 , 0 );
INFER_OK(op, "?;?;[0];[0];[0]", "");
INFER_OK(op, "[10];[10];[0];[0];[0]", "");
INFER_OK(op, "[];[];[0];[0];[0]", "");
INFER_ERROR("must be at most rank 1", op, "[1,2];?;?;?;?");
INFER_ERROR("must be at most rank 1", op, "?;[2,3];?;?;?");
set_outputs(2 , 3 , 4 );
INFER_OK(op, "[?];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[11];[11];[11];[11]"));
INFER_OK(op, "[];?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(2, 3, 0, true );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op, "?;?;?;?;?;?;?;?");
set_outputs(2, 3, 0, true , 1 );
INFER_ERROR("len(dense_keys) != len(dense_shapes)", op, "?;?;?;?;?;?;?;?");
set_outputs(2, 3, 0, false , 1 );
INFER_OK(op, "[?];?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
INFER_OK(op, "[10];?;?;?;?;?;?;?",
("[?,2];[?,2];[?];[?];[2];[2];"
"[d0_0,?,1];[d0_0,?,1,2];[d0_0,?,1,2,3]"));
set_outputs(2, 3, 0, false , 2 );
INFER_ERROR("shapes[0] has unknown rank or unknown inner dimensions", op,
"?;?;?;?;?;?;?;?");
}
TEST(ParsingOpsTest, ParseSequenceExampleV2_ShapeFn) {
ShapeInferenceTestOp op("ParseSequenceExampleV2");
auto set_outputs = [&op](int num_context_sparse, int num_context_dense,
int num_context_ragged, int num_feature_list_sparse,
int num_feature_list_dense,
int num_feature_list_ragged,
bool add_extra_shape = false) {
using NodeOutList = std::vector<NodeDefBuilder::NodeOut>;
using DataTypeList = std::vector<DataType>;
string string_in("test");
NodeDefBuilder::NodeOut node_in{"a", 0, DT_STRING};
TF_ASSERT_OK(
NodeDefBuilder("test", "ParseSequenceExampleV2")
.Input("serialized", 0, DT_STRING)
.Input("debug_name", 0, DT_STRING)
.Input("context_sparse_keys", 0, DT_STRING)
.Input("context_dense_keys", 0, DT_STRING)
.Input("context_ragged_keys", 0, DT_STRING)
.Input("feature_list_sparse_keys", 0, DT_STRING)
.Input("feature_list_dense_keys", 0, DT_STRING)
.Input("feature_list_ragged_keys", 0, DT_STRING)
.Input("feature_list_dense_missing_assumed_empty", 0, DT_BOOL)
.Input(NodeOutList(num_context_dense, node_in))
.Attr("Ncontext_sparse", num_context_sparse)
.Attr("Nfeature_list_sparse", num_feature_list_sparse)
.Attr("Nfeature_list_dense", num_feature_list_dense)
.Attr("context_sparse_types",
DataTypeList(num_context_sparse, DT_FLOAT))
.Attr("context_dense_types",
DataTypeList(num_context_dense, DT_FLOAT))
.Attr("context_dense_shapes",
MakeDenseShapes(num_context_dense, add_extra_shape, 0))
.Attr("feature_list_sparse_types",
DataTypeList(num_feature_list_sparse, DT_FLOAT))
.Attr("feature_list_dense_types",
DataTypeList(num_feature_list_dense, DT_FLOAT))
.Attr("feature_list_dense_shapes",
MakeDenseShapes(num_feature_list_dense, add_extra_shape, 0))
.Attr("context_ragged_value_types",
DataTypeList(num_context_ragged, DT_FLOAT))
.Attr("context_ragged_split_types",
DataTypeList(num_context_ragged, DT_INT32))
.Attr("feature_list_ragged_value_types",
DataTypeList(num_feature_list_ragged, DT_FLOAT))
.Attr("feature_list_ragged_split_types",
DataTypeList(num_feature_list_ragged, DT_INT32))
.Finalize(&op.node_def));
};
set_outputs(0, 0, 0, 0, 0, 0);
INFER_OK(op, "?;[?];?;?;?;?;?;?;?", "");
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?", "");
INFER_OK(op, "[8];[8];?;?;?;?;?;?;?", "");
INFER_OK(op, "[];[];?;?;?;?;?;?;?", "");
INFER_ERROR("must be at most rank 1", op, "[1,2];?;?;?;?;?;?;?;?");
INFER_ERROR("must be at most rank 1", op, "?;[2,3];?;?;?;?;?;?;?");
set_outputs(2 , 3 ,
4 , 0, 0, 0);
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[6];[6];[6];[6]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(0, 0, 0, 2 , 3 ,
4 );
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?",
("[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?",
("[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[?,1];[?,1,2];[?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(2 , 3 ,
4 , 2 ,
3 , 4 );
INFER_OK(op, "[?];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[5];[?];?;?;?;?;?;?;?;?;?;?",
("[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[d0_0,1];[d0_0,1,2];[d0_0,1,2,3];"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?,3];[?,3];"
"[?];[?];"
"[3];[3];"
"[d0_0,?,1];[d0_0,?,1,2];"
"[d0_0,?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[6];[6];[6];[6];"
"[?];[?];[?];[?]"));
INFER_OK(op, "[];[?];?;?;?;?;?;?;?;?;?;?",
("[?,1];[?,1];"
"[?];[?];"
"[1];[1];"
"[1];[1,2];[1,2,3];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,2];[?,2];"
"[?];[?];"
"[2];[2];"
"[?,1];[?,1,2];[?,1,2,3];"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
INFER_OK(op, "?;[?];?;?;?;?;?;?;?;?;?;?",
("[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?,?];[?,?];"
"[?];[?];"
"[?];[?];"
"?;?;?;"
"in0;in0;in0;"
"[?];[?];[?];[?];"
"[?];[?];[?];[?];"
"[?];[?];[?];[?]"));
set_outputs(1, 1, 1, 1, 1, 1, true );
INFER_ERROR(
"num_context_dense (1) must match the size of "
"context_dense_types (1) and context_dense_shapes (2)",
op, "[?];[?];?;?;?;?;?;?;?;?");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/parsing_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/parsing_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34bddcca-fda3-446f-8e5e-0bd32648a7c4 | cpp | tensorflow/tensorflow | math_grad | tensorflow/c/experimental/gradients/math_grad.cc | tensorflow/c/experimental/gradients/math_grad_test.cc | #include "tensorflow/c/experimental/gradients/math_grad.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/gradients.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/experimental/ops/nn_ops.h"
using std::vector;
using tensorflow::ops::AddV2;
using tensorflow::ops::Div;
using tensorflow::ops::DivNoNan;
using tensorflow::ops::MatMul;
using tensorflow::ops::Mul;
using tensorflow::ops::Neg;
using tensorflow::ops::OnesLike;
using tensorflow::ops::SqrtGrad;
namespace tensorflow {
namespace gradients {
namespace {
static Status SafeConj(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name) {
auto dtype = input->DataType();
if (DataTypeIsFloating(BaseType(dtype)) ||
DataTypeIsInteger(BaseType(dtype))) {
return tensorflow::ops::Identity(ctx, input, output, name);
} else if (!DataTypeIsComplex(BaseType(dtype)) &&
BaseType(dtype) != DT_VARIANT) {
return errors::InvalidArgument(
"Expected numeric or variant tensor, got dtype ", dtype);
}
return tensorflow::ops::Conj(ctx, input, output, name);
}
class AddGradientFunction : public GradientFunction {
public:
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
DCHECK(grad_outputs[0]);
grad_inputs[0] = grad_outputs[0];
grad_inputs[1] = grad_outputs[0];
grad_inputs[0]->Ref();
grad_inputs[1]->Ref();
return absl::OkStatus();
}
~AddGradientFunction() override {}
};
class ExpGradientFunction : public GradientFunction {
public:
explicit ExpGradientFunction(AbstractTensorHandle* exp) : exp_(exp) {
exp->Ref();
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* conj_output;
std::string name = "Conj_Exp_Grad";
TF_RETURN_IF_ERROR(SafeConj(ctx, exp_.get(), &conj_output, name.c_str()));
AbstractTensorHandlePtr conj_output_releaser(conj_output);
name = "Mul_Exp_Grad";
TF_RETURN_IF_ERROR(
Mul(ctx, conj_output, grad_outputs[0], &grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~ExpGradientFunction() override {}
private:
AbstractTensorHandlePtr exp_;
};
class SqrtGradientFunction : public GradientFunction {
public:
explicit SqrtGradientFunction(AbstractTensorHandle* sqrt) : sqrt_(sqrt) {
sqrt->Ref();
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
std::string name = "Sqrt_Grad";
TF_RETURN_IF_ERROR(SqrtGrad(ctx, sqrt_.get(), grad_outputs[0],
&grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~SqrtGradientFunction() override {}
private:
AbstractTensorHandlePtr sqrt_;
};
class MatMulGradientFunction : public GradientFunction {
public:
explicit MatMulGradientFunction(vector<AbstractTensorHandle*> f_inputs,
AttrBuilder f_attrs)
: forward_inputs_(f_inputs), forward_attrs_(f_attrs) {
for (auto input : forward_inputs_) {
if (input) {
input->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
bool t_a;
TF_RETURN_IF_ERROR(forward_attrs_.Get("transpose_a", &t_a));
bool t_b;
TF_RETURN_IF_ERROR(forward_attrs_.Get("transpose_b", &t_b));
AbstractTensorHandle* conj_output;
std::string name = "Conj_A_MatMul_Grad";
TF_RETURN_IF_ERROR(
SafeConj(ctx, forward_inputs_[0], &conj_output, name.c_str()));
AbstractTensorHandlePtr A(conj_output);
name = "Conj_B_MatMul_Grad";
TF_RETURN_IF_ERROR(
SafeConj(ctx, forward_inputs_[1], &conj_output, name.c_str()));
AbstractTensorHandlePtr B(conj_output);
AbstractTensorHandle* matmul_A_output;
AbstractTensorHandle* matmul_B_output;
std::string name_grad_A = "MatMul_Grad_A";
std::string name_grad_B = "MatMul_Grad_B";
if (!t_a && !t_b) {
TF_RETURN_IF_ERROR(MatMul(ctx, upstream_grad, B.get(), &matmul_A_output,
false,
true, name_grad_A.c_str()));
TF_RETURN_IF_ERROR(MatMul(ctx, A.get(), upstream_grad, &matmul_B_output,
true,
false, name_grad_B.c_str()));
} else if (!t_a && t_b) {
TF_RETURN_IF_ERROR(MatMul(ctx, upstream_grad, B.get(), &matmul_A_output,
false,
false, name_grad_A.c_str()));
TF_RETURN_IF_ERROR(MatMul(ctx, upstream_grad, A.get(), &matmul_B_output,
true,
false, name_grad_B.c_str()));
} else if (t_a && !t_b) {
TF_RETURN_IF_ERROR(MatMul(ctx, B.get(), upstream_grad, &matmul_A_output,
false,
true, name_grad_A.c_str()));
TF_RETURN_IF_ERROR(MatMul(ctx, A.get(), upstream_grad, &matmul_B_output,
false,
false, name_grad_B.c_str()));
} else {
TF_RETURN_IF_ERROR(MatMul(ctx, B.get(), upstream_grad, &matmul_A_output,
true,
true, name_grad_A.c_str()));
TF_RETURN_IF_ERROR(MatMul(ctx, upstream_grad, A.get(), &matmul_B_output,
true,
true, name_grad_B.c_str()));
}
grad_inputs[0] = matmul_A_output;
grad_inputs[1] = matmul_B_output;
return absl::OkStatus();
}
~MatMulGradientFunction() override {
for (auto input : forward_inputs_) {
if (input) {
input->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_inputs_;
AttrBuilder forward_attrs_;
};
class NegGradientFunction : public GradientFunction {
public:
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
std::string name = "Neg_Grad";
TF_RETURN_IF_ERROR(
ops::Neg(ctx, grad_outputs[0], &grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~NegGradientFunction() override {}
};
class SubGradientFunction : public GradientFunction {
public:
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
DCHECK(grad_outputs[0]);
grad_inputs[0] = grad_outputs[0];
grad_inputs[0]->Ref();
std::string name = "Neg_Sub_Grad_B";
TF_RETURN_IF_ERROR(
ops::Neg(ctx, grad_outputs[0], &grad_inputs[1], name.c_str()));
return absl::OkStatus();
}
~SubGradientFunction() override {}
};
class MulGradientFunction : public GradientFunction {
public:
explicit MulGradientFunction(vector<AbstractTensorHandle*> f_inputs)
: forward_inputs_(f_inputs) {
for (auto input : forward_inputs_) {
if (input) {
input->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
std::string name = "Mul_Grad_A";
TF_RETURN_IF_ERROR(Mul(ctx, upstream_grad, forward_inputs_[1],
&grad_inputs[0], name.c_str()));
name = "Mul_Grad_B";
TF_RETURN_IF_ERROR(Mul(ctx, forward_inputs_[0], upstream_grad,
&grad_inputs[1], name.c_str()));
return absl::OkStatus();
}
~MulGradientFunction() override {
for (auto input : forward_inputs_) {
if (input) {
input->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_inputs_;
};
class Log1pGradientFunction : public GradientFunction {
public:
explicit Log1pGradientFunction(vector<AbstractTensorHandle*> f_inputs)
: forward_inputs_(f_inputs) {
for (auto input : forward_inputs_) {
if (input) {
input->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
AbstractTensorHandle* X = forward_inputs_[0];
AbstractTensorHandle* temp_output;
std::string name = "Conj_Log1p_Grad_X";
TF_RETURN_IF_ERROR(SafeConj(ctx, X, &temp_output, name.c_str()));
AbstractTensorHandlePtr Conj_X(temp_output);
name = "OnesLike_Log1p_Grad_X";
TF_RETURN_IF_ERROR(OnesLike(ctx, Conj_X.get(), &temp_output, name.c_str()));
AbstractTensorHandlePtr Ones_X(temp_output);
name = "Add_Log1p_Grad_X";
TF_RETURN_IF_ERROR(
AddV2(ctx, Ones_X.get(), Conj_X.get(), &temp_output, name.c_str()));
AbstractTensorHandlePtr Conj_XP1(temp_output);
name = "Div_Log1p_Grad_X";
TF_RETURN_IF_ERROR(
Div(ctx, upstream_grad, Conj_XP1.get(), &grad_inputs[0], name.c_str()));
return absl::OkStatus();
}
~Log1pGradientFunction() override {
for (auto input : forward_inputs_) {
if (input) {
input->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_inputs_;
};
class DivNoNanGradientFunction : public GradientFunction {
public:
explicit DivNoNanGradientFunction(vector<AbstractTensorHandle*> f_inputs,
vector<AbstractTensorHandle*> f_outputs)
: forward_inputs_(f_inputs), forward_outputs_(f_outputs) {
for (auto input : forward_inputs_) {
if (input) {
input->Ref();
}
}
for (auto output : forward_outputs_) {
if (output) {
output->Ref();
}
}
}
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
AbstractTensorHandle* upstream_grad = grad_outputs[0];
AbstractTensorHandle* Y = forward_inputs_[1];
AbstractTensorHandle* Z = forward_outputs_[0];
std::string name = "Div_Grad_X";
TF_RETURN_IF_ERROR(
DivNoNan(ctx, upstream_grad, Y, &grad_inputs[0], name.c_str()));
AbstractTensorHandle* temp_output;
name = "Neg_Div_Grad_Y";
TF_RETURN_IF_ERROR(Neg(ctx, upstream_grad, &temp_output,
name.c_str()));
AbstractTensorHandlePtr MinusU(temp_output);
name = "Mul_Div_Grad_Y";
TF_RETURN_IF_ERROR(Mul(ctx, MinusU.get(), Z, &temp_output,
name.c_str()));
AbstractTensorHandlePtr UZ(temp_output);
name = "Div_Grad_Y";
TF_RETURN_IF_ERROR(DivNoNan(ctx, UZ.get(), Y, &grad_inputs[1],
name.c_str()));
return absl::OkStatus();
}
~DivNoNanGradientFunction() override {
for (auto input : forward_inputs_) {
if (input) {
input->Unref();
}
}
for (auto output : forward_outputs_) {
if (output) {
output->Unref();
}
}
}
private:
vector<AbstractTensorHandle*> forward_inputs_;
vector<AbstractTensorHandle*> forward_outputs_;
};
}
GradientFunction* AddRegisterer(const ForwardOperation& op) {
return new AddGradientFunction;
}
GradientFunction* ExpRegisterer(const ForwardOperation& op) {
return new ExpGradientFunction(op.outputs[0]);
}
GradientFunction* MatMulRegisterer(const ForwardOperation& op) {
return new MatMulGradientFunction(op.inputs, op.attrs);
}
GradientFunction* SqrtRegisterer(const ForwardOperation& op) {
return new SqrtGradientFunction(op.outputs[0]);
}
GradientFunction* NegRegisterer(const ForwardOperation& op) {
return new NegGradientFunction;
}
GradientFunction* SubRegisterer(const ForwardOperation& op) {
return new SubGradientFunction;
}
GradientFunction* MulRegisterer(const ForwardOperation& op) {
return new MulGradientFunction(op.inputs);
}
GradientFunction* Log1pRegisterer(const ForwardOperation& op) {
return new Log1pGradientFunction(op.inputs);
}
GradientFunction* DivNoNanRegisterer(const ForwardOperation& op) {
return new DivNoNanGradientFunction(op.inputs, op.outputs);
}
}
} | #include "tensorflow/c/experimental/gradients/math_grad.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/grad_test_helper.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
Status AddModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::AddV2(ctx, inputs[0], inputs[1], &outputs[0], "Add");
}
Status ExpModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Exp(ctx, inputs[0], &outputs[0], "Exp");
}
Status SqrtModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Sqrt(ctx, inputs[0], &outputs[0], "Sqrt");
}
Status NegModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Neg(ctx, inputs[0], &outputs[0], "Neg");
}
Status SubModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Sub(ctx, inputs[0], inputs[1], &outputs[0], "Sub");
}
Status MulModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Mul(ctx, inputs[0], inputs[1], &outputs[0], "Mul");
}
Status Log1pModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Log1p(ctx, inputs[0], &outputs[0], "Log1p");
}
Status DivNoNanModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::DivNoNan(ctx, inputs[0], inputs[1], &outputs[0], "DivNoNan");
}
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr immediate_execution_ctx_;
GradientRegistry registry_;
Status status_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(CppGradients, TestAddGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
AbstractTensorHandlePtr y;
{
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
status_ = registry_.Register("AddV2", AddRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
AddModel, BuildGradModel(AddModel, registry_),
immediate_execution_ctx_.get(), {x.get(), y.get()}, UseFunction()));
}
TEST_P(CppGradients, TestExpGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Exp", ExpRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
ExpModel, BuildGradModel(ExpModel, registry_),
immediate_execution_ctx_.get(), {x.get()}, UseFunction()));
}
TEST_P(CppGradients, TestMatMulGrad) {
GTEST_SKIP();
float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
int64_t A_dims[] = {3, 3};
AbstractTensorHandlePtr A;
{
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
float B_vals[] = {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
int64_t B_dims[] = {3, 3};
AbstractTensorHandlePtr B;
{
AbstractTensorHandle* B_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), B_vals, B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
B.reset(B_raw);
}
status_ = registry_.Register("MatMul", MatMulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
for (bool transpose_a : {false, true}) {
for (bool transpose_b : {false, true}) {
Model MatMulModel =
[transpose_a, transpose_b](
AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) -> Status {
return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0], transpose_a,
transpose_b, "MatMul");
};
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
MatMulModel, BuildGradModel(MatMulModel, registry_),
immediate_execution_ctx_.get(), {A.get(), B.get()}, UseFunction()));
}
}
}
TEST_P(CppGradients, TestMatMulGradManual) {
float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f};
int64_t A_dims[] = {3, 3};
AbstractTensorHandlePtr A;
{
AbstractTensorHandle* A_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), A_vals, A_dims, 2, &A_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
A.reset(A_raw);
}
float B_vals[] = {9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
int64_t B_dims[] = {3, 3};
AbstractTensorHandlePtr B;
{
AbstractTensorHandle* B_raw;
status_ = TestTensorHandleWithDims<float, TF_FLOAT>(
immediate_execution_ctx_.get(), B_vals, B_dims, 2, &B_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
B.reset(B_raw);
}
status_ = registry_.Register("MatMul", MatMulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
bool transpose_a_vals[] = {false, false, true, true};
bool transpose_b_vals[] = {false, true, false, true};
float dA_vals[4][9] = {{24, 15, 6, 24, 15, 6, 24, 15, 6},
{18, 15, 12, 18, 15, 12, 18, 15, 12},
{24, 24, 24, 15, 15, 15, 6, 6, 6},
{18, 18, 18, 15, 15, 15, 12, 12, 12}};
float dB_vals[4][9] = {{12, 12, 12, 15, 15, 15, 18, 18, 18},
{12, 15, 18, 12, 15, 18, 12, 15, 18},
{6, 6, 6, 15, 15, 15, 24, 24, 24},
{6, 15, 24, 6, 15, 24, 6, 15, 24}};
for (int i{}; i < 4; ++i) {
bool transpose_a = transpose_a_vals[i];
bool transpose_b = transpose_b_vals[i];
Model MatMulModel =
[transpose_a, transpose_b](
AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) -> Status {
return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0], transpose_a,
transpose_b, "MatMul");
};
Model MatMulGradModel = BuildGradModel(MatMulModel, registry_);
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(MatMulGradModel, immediate_execution_ctx_.get(),
{A.get(), B.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], dA_vals[i],
{3, 3},
0));
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], dB_vals[i],
{3, 3},
0));
outputs[0]->Unref();
outputs[1]->Unref();
}
}
TEST_P(CppGradients, TestSqrtGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Sqrt", SqrtRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SqrtModel, BuildGradModel(SqrtModel, registry_),
immediate_execution_ctx_.get(), {x.get()}, UseFunction()));
}
TEST_P(CppGradients, TestNegGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Neg", NegRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
NegModel, BuildGradModel(NegModel, registry_),
immediate_execution_ctx_.get(), {x.get()}, UseFunction()));
}
TEST_P(CppGradients, TestSubGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
AbstractTensorHandlePtr y;
{
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
status_ = registry_.Register("Sub", SubRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
SubModel, BuildGradModel(SubModel, registry_),
immediate_execution_ctx_.get(), {x.get(), y.get()}, UseFunction()));
}
TEST_P(CppGradients, TestMulGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
AbstractTensorHandlePtr y;
{
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
status_ = registry_.Register("Mul", MulRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
MulModel, BuildGradModel(MulModel, registry_),
immediate_execution_ctx_.get(), {x.get(), y.get()}, UseFunction()));
}
TEST_P(CppGradients, TestLog1pGrad) {
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
status_ = registry_.Register("Log1p", Log1pRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
Log1pModel, BuildGradModel(Log1pModel, registry_),
immediate_execution_ctx_.get(), {x.get()}, UseFunction()));
}
TEST_P(CppGradients, TestDivNoNanGrad) {
status_ = registry_.Register("DivNoNan", DivNoNanRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto DivNoNanGradModel = BuildGradModel(DivNoNanModel, registry_);
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x.reset(x_raw);
}
AbstractTensorHandlePtr y;
{
AbstractTensorHandle* y_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 2.0f, &y_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
y.reset(y_raw);
}
ASSERT_NO_FATAL_FAILURE(CompareNumericalAndAutodiffGradients(
DivNoNanModel, DivNoNanGradModel, immediate_execution_ctx_.get(),
{x.get(), y.get()}, UseFunction()));
AbstractTensorHandlePtr z;
{
AbstractTensorHandle* z_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 0.0f, &z_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
z.reset(z_raw);
}
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(DivNoNanGradModel, immediate_execution_ctx_.get(),
{x.get(), z.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[0], {0.0f}, {},
0));
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], {0.0f}, {},
0));
outputs[0]->Unref();
outputs[1]->Unref();
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/math_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/math_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f4ce7de-febe-4637-8295-d004dcf2b9b9 | cpp | tensorflow/tensorflow | sparse_ops | tensorflow/core/ops/sparse_ops.cc | tensorflow/core/ops/sparse_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status SparseSparseMinOrMaxShapeFn(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 1, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
}
}
REGISTER_OP("SparseAddGrad")
.Input("backprop_val_grad: T")
.Input("a_indices: int64")
.Input("b_indices: int64")
.Input("sum_indices: int64")
.Output("a_val_grad: T")
.Output("b_val_grad: T")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle a_indices;
ShapeHandle b_indices;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &a_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &b_indices));
c->set_output(0, c->Vector(c->Dim(a_indices, 0)));
c->set_output(1, c->Vector(c->Dim(b_indices, 0)));
return absl::OkStatus();
});
REGISTER_OP("SparseAdd")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Input("thresh: Treal")
.Output("sum_indices: int64")
.Output("sum_values: T")
.Output("sum_shape: int64")
.Attr("T: numbertype")
.Attr("Treal: realnumbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle a_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &a_shape));
c->set_output(
0, c->Matrix(InferenceContext::kUnknownDim, c->Dim(a_shape, 0)));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, a_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseTensorDenseMatMul")
.Input("a_indices: Tindices")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b: T")
.Output("product: T")
.Attr("T: type")
.Attr("Tindices: {int32,int64} = DT_INT64")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle unused_dim;
ShapeHandle unused;
ShapeHandle b;
ShapeHandle a_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRank(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &b));
bool adjoint_a;
bool adjoint_b;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
DimensionHandle output_right = c->Dim(b, adjoint_b ? 0 : 1);
DimensionHandle output_left = c->Dim(a_shape, adjoint_a ? 1 : 0);
DimensionHandle inner_left = c->Dim(a_shape, adjoint_a ? 0 : 1);
DimensionHandle inner_right = c->Dim(b, adjoint_b ? 1 : 0);
TF_RETURN_IF_ERROR(c->Merge(inner_left, inner_right, &unused_dim));
c->set_output(0, c->Matrix(output_left, output_right));
return absl::OkStatus();
});
REGISTER_OP("SerializeSparse")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Attr("T: type")
.Output("serialized_sparse: out_type")
.Attr("out_type: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Vector(3));
return absl::OkStatus();
});
REGISTER_OP("SerializeManySparse")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Attr("T: type")
.Output("serialized_sparse: out_type")
.Attr("out_type: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 3));
return absl::OkStatus();
});
REGISTER_OP("DeserializeSparse")
.Input("serialized_sparse: Tserialized")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.Attr("Tserialized: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused_shape));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), -1), 3, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("DeserializeManySparse")
.Input("serialized_sparse: string")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle serialized_sparse;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &serialized_sparse));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(serialized_sparse, 1), 3, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("SparseToDense")
.Input("sparse_indices: Tindices")
.Input("output_shape: Tindices")
.Input("sparse_values: T")
.Input("default_value: T")
.Attr("validate_indices: bool = true")
.Attr("T: type")
.Output("dense: T")
.Attr("Tindices: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("SparseConcat")
.Input("indices: N * int64")
.Input("values: N * T")
.Input("shapes: N * int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("concat_dim: int")
.Attr("N: int >= 2")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle output_row_count = c->MakeDim(0ll);
DimensionHandle output_ind_cols = c->UnknownDim();
ShapeHandle output_shape = c->UnknownShape();
const int n = c->num_inputs() / 3;
for (int i = 0; i < n; i++) {
ShapeHandle ind;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 2, &ind));
ShapeHandle val;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i + n), 1, &val));
ShapeHandle shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i + 2 * n), 1, &shape));
DimensionHandle num_dim;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(ind, 0), c->Dim(val, 0), &num_dim));
TF_RETURN_IF_ERROR(
c->Add(output_row_count, num_dim, &output_row_count));
TF_RETURN_IF_ERROR(
c->Merge(output_ind_cols, c->Dim(ind, 1), &output_ind_cols));
TF_RETURN_IF_ERROR(c->Merge(output_shape, shape, &output_shape));
}
c->set_output(0, c->Matrix(output_row_count, output_ind_cols));
c->set_output(1, c->Vector(output_row_count));
c->set_output(2, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseCross")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Output("output_indices: int64")
.Output("output_values: out_type")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("hashed_output: bool")
.Attr("num_buckets: int >= 0")
.Attr("hash_key: int")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.Attr("out_type: {int64, string}")
.Attr("internal_type: {int64, string}")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseCrossV2")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Input("sep: string")
.Output("output_indices: int64")
.Output("output_values: string")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseCrossHashed")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Input("num_buckets: int64")
.Input("strong_hash: bool")
.Input("salt: int64")
.Output("output_indices: int64")
.Output("output_values: int64")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseSplit")
.Input("split_dim: int64")
.Input("indices: int64")
.Input("values: T")
.Input("shape: int64")
.Output("output_indices: num_split * int64")
.Output("output_values: num_split * T")
.Output("output_shape: num_split * int64")
.Attr("num_split: int >= 1")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_shape = c->input(3);
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle output_shape = input_shape;
int num_splits = c->num_outputs() / 3;
int out_idx = 0;
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_indices);
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_values);
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseSliceGrad")
.Input("backprop_val_grad: T")
.Input("input_indices: int64")
.Input("input_start: int64")
.Input("output_indices: int64")
.Output("val_grad: T")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &indices));
c->set_output(0, c->Vector(c->Dim(indices, 0)));
return absl::OkStatus();
});
REGISTER_OP("SparseSlice")
.Input("indices: int64")
.Input("values: T")
.Input("shape: int64")
.Input("start: int64")
.Input("size: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_shape = c->input(2);
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle output_shape = input_shape;
c->set_output(0, output_indices);
c->set_output(1, output_values);
c->set_output(2, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseReorder")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle values;
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, indices);
c->set_output(1, values);
return absl::OkStatus();
});
REGISTER_OP("SparseReshape")
.Input("input_indices: int64")
.Input("input_shape: int64")
.Input("new_shape: int64")
.Output("output_indices: int64")
.Output("output_shape: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
ShapeHandle new_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &new_shape));
c->set_output(0, c->Matrix(c->Dim(indices, 0), c->Dim(new_shape, 0)));
c->set_output(1, new_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseTensorDenseAdd")
.Input("a_indices: Tindices")
.Input("a_values: T")
.Input("a_shape: Tindices")
.Input("b: T")
.Output("output: T")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->input(3));
return absl::OkStatus();
});
REGISTER_OP("SparseReduceMax")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output: T")
.Attr("T: realnumbertype")
.SetShapeFn(shape_inference::SparseReduceShapeFn);
REGISTER_OP("SparseReduceMaxSparse")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: realnumbertype")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("SparseReduceSum")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output: T")
.Attr("T: numbertype")
.SetShapeFn(shape_inference::SparseReduceShapeFn);
REGISTER_OP("SparseReduceSumSparse")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: numbertype")
.SetShapeFn(shape_inference::UnknownShape);
#define SPARSE_DENSE_CWISE_SIGNATURE() \
Input("sp_indices: int64") \
.Input("sp_values: T") \
.Input("sp_shape: int64") \
.Input("dense: T") \
.Output("output: T") \
.Attr("T: numbertype") \
.SetShapeFn([](InferenceContext* c) { \
ShapeHandle input; \
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input)); \
c->set_output(0, c->Vector(c->Dim(input, 0))); \
return OkStatus(); \
})
REGISTER_OP("SparseDenseCwiseMul").SPARSE_DENSE_CWISE_SIGNATURE();
REGISTER_OP("SparseDenseCwiseDiv").SPARSE_DENSE_CWISE_SIGNATURE();
REGISTER_OP("SparseDenseCwiseAdd").SPARSE_DENSE_CWISE_SIGNATURE();
#undef SPARSE_DENSE_CWISE_SIGNATURE
REGISTER_OP("SparseSoftmax")
.Input("sp_indices: int64")
.Input("sp_values: T")
.Input("sp_shape: int64")
.Output("output: T")
.Attr("T: {half, float, double}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle values;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, values);
return absl::OkStatus();
});
REGISTER_OP("SparseSparseMaximum")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: realnumbertype")
.SetShapeFn(SparseSparseMinOrMaxShapeFn);
REGISTER_OP("SparseSparseMinimum")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: numbertype")
.SetShapeFn(SparseSparseMinOrMaxShapeFn);
REGISTER_OP("AddSparseToTensorsMap")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Output("sparse_handle: int64")
.Attr("T: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("AddManySparseToTensorsMap")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Output("sparse_handles: int64")
.Attr("T: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("TakeManySparseFromTensorsMap")
.Input("sparse_handles: int64")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle sparse_handles;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &sparse_handles));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("SparseFillEmptyRows")
.Input("indices: int64")
.Input("values: T")
.Input("dense_shape: int64")
.Input("default_value: T")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("empty_row_indicator: bool")
.Output("reverse_index_map: int64")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_indices = c->input(0);
TF_RETURN_IF_ERROR(c->WithRank(input_indices, 2, &input_indices));
ShapeHandle input_values = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(input_values, 1, &input_values));
ShapeHandle input_shape = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(input_shape, 1, &input_shape));
ShapeHandle default_value = c->input(3);
TF_RETURN_IF_ERROR(c->WithRank(default_value, 0, &default_value));
DimensionHandle N = c->Dim(input_indices, 0);
TF_RETURN_IF_ERROR(c->Merge(N, c->Dim(input_values, 0), &N));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input_indices, 1),
c->Dim(input_shape, 0), &unused_dim));
if (c->Value(c->NumElements(input_shape)) == 0)
return errors::InvalidArgument("dense_shape must not be empty");
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle constant_input_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &constant_input_shape));
ShapeHandle empty_row_indicator =
c->Vector(c->Dim(constant_input_shape, 0));
ShapeHandle reverse_index_map = c->Vector(N);
c->set_output(0, output_indices);
c->set_output(1, output_values);
c->set_output(2, empty_row_indicator);
c->set_output(3, reverse_index_map);
return absl::OkStatus();
});
REGISTER_OP("SparseFillEmptyRowsGrad")
.Input("reverse_index_map: int64")
.Input("grad_values: T")
.Output("d_values: T")
.Output("d_default_value: T")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle reverse_index_map = c->input(0);
TF_RETURN_IF_ERROR(c->WithRank(reverse_index_map, 1, &reverse_index_map));
ShapeHandle grad_values = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(grad_values, 1, &grad_values));
c->set_output(0, reverse_index_map);
c->set_output(1, c->Scalar());
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(SparseOpsTest, SparseTensorDenseAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorDenseAdd");
INFER_OK(op, "?;?;?;?", "in3");
}
TEST(SparseOpsTest, SparseAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseAdd");
INFER_OK(op, "?;?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;[?];?;?;?;?", "[?,d2_0];[?];in2");
INFER_OK(op, "?;?;[1];?;?;?;?", "[?,d2_0];[?];in2");
}
TEST(SparseOpsTest, SparseAddGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseAddGrad");
INFER_ERROR("must be rank 2", op, "?;?;[1];?");
INFER_ERROR("must be rank 2", op, "?;[1];?;?");
INFER_OK(op, "?;?;?;?", "[?];[?]");
INFER_OK(op, "?;[?,?];[?,?];?", "[d1_0];[d2_0]");
}
TEST(SparseOpsTest, SparseSliceGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseSliceGrad");
INFER_ERROR("must be rank 2", op, "?;[1];?;?");
INFER_OK(op, "?;?;?;?", "[?]");
INFER_OK(op, "?;[?,?];?;?", "[d1_0]");
}
TEST(SparseOpsTest, SparseReorder_ShapeFn) {
ShapeInferenceTestOp op("SparseReorder");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,?];[?]");
INFER_OK(op, "[?,?];[?];?", "in0;in1");
}
TEST(SparseOpsTest, SparseReshape_ShapeFn) {
ShapeInferenceTestOp op("SparseReshape");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,?];[?]");
INFER_OK(op, "[?,?];?;[?]", "[d0_0,d2_0];in2");
}
TEST(SparseOpsTest, SparseSplit_ShapeFn) {
ShapeInferenceTestOp op("SparseSplit");
TF_ASSERT_OK(NodeDefBuilder("test", "SparseSplit")
.Input({"split_dim", 0, DT_INT64})
.Input({"indices", 1, DT_INT64})
.Input({"values", 2, DT_INT64})
.Input({"shape", 3, DT_INT64})
.Attr("num_split", 2)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "[?,?];[?,?];[?];[?];in3;in3");
INFER_OK(op, "?;?;?;[5,4,3,2,1]", "[?,120];[?,120];[?];[?];in3;in3");
}
TEST(SparseOpsTest, SparseToDense_ShapeFn) {
ShapeInferenceTestOp op("SparseToDense");
op.input_tensors.resize(4);
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "?;[?];?;?", "?");
INFER_OK(op, "?;[4];?;?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[1] = &in_t;
INFER_OK(op, "?;[4];?;?", "[1,2,3,4]");
}
TEST(SparseOpsTest, SparseReduceSum_ShapeFn) {
ShapeInferenceTestOp op("SparseReduceSum");
TF_ASSERT_OK(NodeDefBuilder("test", "SparseReduceSum")
.Input({"input_indices", 0, DT_INT64})
.Input({"input_values", 1, DT_INT64})
.Input({"input_shape", 2, DT_INT64})
.Input({"reduction_axes", 3, DT_INT32})
.Attr("keep_dims", false)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "?");
}
TEST(SparseOpsTest, SerializeSparse_ShapeFn) {
ShapeInferenceTestOp op("SerializeSparse");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[3]");
}
TEST(SparseOpsTest, SerializeManySparse_ShapeFn) {
ShapeInferenceTestOp op("SerializeManySparse");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,3]");
}
TEST(SparseOpsTest, DeserializeManySparse_ShapeFn) {
ShapeInferenceTestOp op("DeserializeManySparse");
INFER_ERROR("must be rank 2", op, "[1]");
INFER_ERROR("must be 3", op, "[?,4]");
INFER_OK(op, "?", "[?,?];[?];[?]");
INFER_OK(op, "[?,3]", "[?,?];[?];[?]");
}
TEST(SparseOpsTest, SparseTensorDenseMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorDenseMatMul");
auto set_adjoints = [&op](bool adjoint_a, bool adjoint_b) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseTensorDenseMatMul")
.Input({"a_indices", 1, DT_INT64})
.Input({"a_values", 2, DT_INT64})
.Input({"a_shape", 3, DT_INT64})
.Input({"b", 3, DT_INT64})
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Finalize(&op.node_def));
};
set_adjoints(false, false);
INFER_ERROR("must be rank 2", op, "[1];?;?;?");
INFER_ERROR("must be rank 1", op, "?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?");
INFER_ERROR("must be rank 2", op, "?;?;[3];?");
INFER_ERROR("must be rank 2", op, "?;?;?;[]");
INFER_OK(op, "?;?;?;?", "[?,?]");
INFER_OK(op, "?;?;?;[?,?]", "[?,d3_1]");
INFER_OK(op, "?;?;?;[1,2]", "[?,d3_1]");
INFER_OK(op, "?;?;[2];[1,2]", "[?,d3_1]");
set_adjoints(false, true);
INFER_OK(op, "?;?;?;[?,?]", "[?,d3_0]");
INFER_OK(op, "?;?;?;[1,2]", "[?,d3_0]");
Tensor a_shape_t = test::AsTensor<int64_t>(std::vector<int64_t>{3, 1});
op.input_tensors.resize(4);
op.input_tensors[2] = &a_shape_t;
set_adjoints(false, false);
INFER_OK(op, "?;?;[2];[1,2]", "[3,d3_1]");
INFER_OK(op, "?;?;?;[1,2]", "[3,d3_1]");
set_adjoints(true, false);
INFER_ERROR("must be equal", op, "?;?;[2];[1,2]");
a_shape_t = test::AsTensor<int64_t>(std::vector<int64_t>{3, 1, 2});
INFER_ERROR("must be rank 2 but is rank 3", op, "?;?;[3];[1,2]");
}
TEST(SparseOpsTest, SparseSoftmax_ShapeFn) {
ShapeInferenceTestOp op("SparseSoftmax");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "?;[?];?", "in1");
INFER_OK(op, "?;[5];?", "in1");
}
TEST(SparseOpsTest, SparseSparseMinAndMin_ShapeFn) {
for (const char* op_name : {"SparseSparseMaximum", "SparseSparseMinimum"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("must be rank 2", op, "[1];?;?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;[];?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?;?;?");
INFER_ERROR("must be rank 2", op, "?;?;?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;[]");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?]");
INFER_OK(op, "?;[?];?;?;?;?", "[?,?];[?]");
INFER_OK(op, "?;[5];?;?;?;?", "[?,?];[?]");
}
}
TEST(SparseOpsTest, SparseConcat_ShapeFn) {
ShapeInferenceTestOp op("SparseConcat");
std::vector<NodeDefBuilder::NodeOut> src_list;
int n = 2;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_INT64);
TF_ASSERT_OK(NodeDefBuilder("test", "SparseConcat")
.Input(src_list)
.Input(src_list)
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_ERROR("must be rank 2", op, "[1];?;?;?;?;?");
INFER_ERROR("must be rank 2", op, "?;[1];?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;[]");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;?;[?];[?]", "[?,?];[?];in4|in5");
INFER_OK(op, "?;?;?;?;[?];[5]", "[?,?];[?];in5");
INFER_OK(op, "[4,5];[3,?];?;?;?;?", "[7,d0_1];[7];[?]");
INFER_OK(op, "?;?;[4];[3];?;?", "[7,?];[7];[?]");
INFER_OK(op, "[?,2];[3,?];[4];[?];?;?", "[7,d0_1];[7];[?]");
INFER_ERROR("but are 100 and 200", op, "[100,?];[?,?];[200];[?];?;?");
INFER_ERROR("but are 2 and 3", op, "[?,2];[?,3];[?];[?];?;?");
INFER_ERROR("but are 4 and 5", op, "?;?;?;?;[4];[5]");
}
TEST(SparseOpsTest, SparseDenseCwise_ShapeFn) {
for (const char* op_name :
{"SparseDenseCwiseMul", "SparseDenseCwiseDiv", "SparseDenseCwiseAdd"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?;?;?", "[?]");
INFER_OK(op, "[?,?];?;?;?", "[d0_0]");
INFER_ERROR("must be rank 2", op, "[1];?;?;?");
}
}
TEST(SparseOpsTest, AddSparseToTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("AddSparseToTensorsMap");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[]");
}
TEST(SparseOpsTest, AddManySparseToTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("AddManySparseToTensorsMap");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?]");
}
TEST(SparseOpsTest, TakeManySparseFromTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("TakeManySparseFromTensorsMap");
INFER_ERROR("must be rank 1", op, "[?,1]");
INFER_OK(op, "?", "[?,?];[?];[?]");
INFER_OK(op, "[?]", "[?,?];[?];[?]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54cdfcaf-1fbf-4422-882b-6441c5eea09a | cpp | tensorflow/tensorflow | candidate_sampling_ops | tensorflow/core/ops/candidate_sampling_ops.cc | tensorflow/core/ops/candidate_sampling_ops_test.cc | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status CandidateSamplerShapeFn(InferenceContext* c) {
int64_t num_sampled;
TF_RETURN_IF_ERROR(c->GetAttr("num_sampled", &num_sampled));
int64_t num_true;
TF_RETURN_IF_ERROR(c->GetAttr("num_true", &num_true));
ShapeHandle true_classes_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &true_classes_shape));
DimensionHandle batch_size = c->Dim(true_classes_shape, 0);
ShapeHandle num_sampled_v = c->Vector(num_sampled);
c->set_output(0, num_sampled_v);
c->set_output(1, c->Matrix(batch_size, num_true));
c->set_output(2, num_sampled_v);
return absl::OkStatus();
}
}
REGISTER_OP("UniformCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("range_max: int >= 1")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("LogUniformCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("range_max: int >= 1")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("LearnedUnigramCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("range_max: int >= 1")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("ThreadUnsafeUnigramCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("range_max: int >= 1")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("FixedUnigramCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("range_max: int >= 1")
.Attr("vocab_file: string = ''")
.Attr("distortion: float = 1.0")
.Attr("num_reserved_ids: int = 0")
.Attr("num_shards: int >= 1 = 1")
.Attr("shard: int >= 0 = 0")
.Attr("unigrams: list(float) = []")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("AllCandidateSampler")
.Input("true_classes: int64")
.Output("sampled_candidates: int64")
.Output("true_expected_count: float")
.Output("sampled_expected_count: float")
.Attr("num_true: int >= 1")
.Attr("num_sampled: int >= 1")
.Attr("unique: bool")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn(CandidateSamplerShapeFn)
.SetIsStateful();
REGISTER_OP("ComputeAccidentalHits")
.Input("true_classes: int64")
.Input("sampled_candidates: int64")
.Output("indices: int32")
.Output("ids: int64")
.Output("weights: float")
.Attr("num_true: int")
.Attr("seed: int = 0")
.Attr("seed2: int = 0")
.SetShapeFn([](InferenceContext* c) {
int64_t num_true;
TF_RETURN_IF_ERROR(c->GetAttr("num_true", &num_true));
ShapeHandle true_classes;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &true_classes));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(true_classes, 1), num_true, &unused));
ShapeHandle sampled_candidates;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &sampled_candidates));
ShapeHandle v = c->Vector(InferenceContext::kUnknownDim);
c->set_output(0, v);
c->set_output(1, v);
c->set_output(2, v);
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CandidateSamplerOpsTest, CandidateSampler_ShapeFn) {
for (const char* op_name : {
"AllCandidateSampler",
"FixedUnigramCandidateSampler",
"LearnedUnigramCandidateSampler",
"LogUniformCandidateSampler",
"ThreadUnsafeUnigramCandidateSampler",
"UniformCandidateSampler",
}) {
ShapeInferenceTestOp op(op_name);
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_INT64})
.Attr("num_sampled", 5)
.Attr("num_true", 10)
.Finalize(&op.node_def));
INFER_OK(op, "?", "[5];[?,10];[5]");
INFER_OK(op, "[?,?]", "[5];[d0_0,10];[5]");
INFER_OK(op, "[8,9]", "[5];[d0_0,10];[5]");
INFER_ERROR("must be rank 2", op, "[1]");
}
}
TEST(CandidateSamplerOpsTest, ComputeAccidentalHits_ShapeFn) {
ShapeInferenceTestOp op("ComputeAccidentalHits");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_INT64})
.Input({"b", 0, DT_INT64})
.Attr("num_true", 10)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "[?];[?];[?]");
INFER_OK(op, "[?,?];?", "[?];[?];[?]");
INFER_OK(op, "[?,10];?", "[?];[?];[?]");
INFER_OK(op, "[5,?];?", "[?];[?];[?]");
INFER_ERROR("must be rank 2", op, "[1];?");
INFER_ERROR("must be 10", op, "[?,11];?");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/candidate_sampling_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/candidate_sampling_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
41389981-1007-4f8c-826c-97fd840bdf44 | cpp | tensorflow/tensorflow | nn_ops | tensorflow/c/experimental/ops/nn_ops.cc | tensorflow/core/ops/nn_ops_test.cc | #include "tensorflow/c/experimental/ops/nn_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status SparseSoftmaxCrossEntropyWithLogits(AbstractContext* ctx,
AbstractTensorHandle* const features,
AbstractTensorHandle* const labels,
AbstractTensorHandle** loss,
AbstractTensorHandle** backprop,
const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(
op_ptr->Reset("SparseSoftmaxCrossEntropyWithLogits", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
TF_RETURN_IF_ERROR(op_ptr->AddInput(labels));
int num_retvals = 2;
AbstractTensorHandle* temp_outputs[2];
Status status = op_ptr->Execute(temp_outputs, &num_retvals);
*loss = temp_outputs[0];
*backprop = temp_outputs[1];
return status;
}
Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients,
AbstractTensorHandle* const features,
AbstractTensorHandle** backprops, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ReluGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(gradients));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(backprops, 1), &num_retvals);
}
Status Relu(AbstractContext* ctx, AbstractTensorHandle* const features,
AbstractTensorHandle** activations, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Relu", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(features));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(activations, 1), &num_retvals);
}
Status BiasAdd(AbstractContext* ctx, AbstractTensorHandle* const value,
AbstractTensorHandle* const bias, AbstractTensorHandle** output,
const char* data_format, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("BiasAdd", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(value));
TF_RETURN_IF_ERROR(op_ptr->AddInput(bias));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrString("data_format", data_format, strlen(data_format)));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status BiasAddGrad(AbstractContext* ctx,
AbstractTensorHandle* const out_backprop,
AbstractTensorHandle** output, const char* data_format,
const char* name, const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("BiasAddGrad", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(out_backprop));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrString("data_format", data_format, strlen(data_format)));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
}
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(NNOpsTest, TopK_ShapeFn) {
ShapeInferenceTestOp op("TopK");
auto set_k = [&op](int k) {
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input({{"a", 0, DT_FLOAT}})
.Attr("k", k)
.Finalize(&op.node_def));
};
set_k(20);
INFER_OK(op, "?", "?;?");
INFER_OK(op, "[20]", "[20];[20]");
INFER_OK(op, "[21]", "[20];[20]");
INFER_OK(op, "[1,?,21]", "[d0_0,d0_1,20];[d0_0,d0_1,20]");
INFER_OK(op, "[1,?,21,?]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 1", op, "[1]");
INFER_ERROR("input must have last dimension >= k = 20 but is 4", op,
"[1,2,3,4]");
set_k(-1);
INFER_ERROR("Need k >= 0, got -1", op, "[1,2,3,4]");
}
TEST(NNOpsTest, TopKV2_ShapeFn) {
ShapeInferenceTestOp op("TopKV2");
op.input_tensors.resize(2);
Tensor k_t;
op.input_tensors[1] = &k_t;
k_t = test::AsScalar<int32>(20);
INFER_OK(op, "?;[]", "?;?");
INFER_OK(op, "[20];[]", "[20];[20]");
INFER_OK(op, "[1,?,21];[]", "[d0_0,d0_1,20];[d0_0,d0_1,20]");
INFER_OK(op, "[1,?,21,?];[]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 1", op,
"[1];[]");
INFER_ERROR("input must have last dimension >= k = 20 but is 4", op,
"[1,2,3,4];[]");
k_t = test::AsScalar<int32>(-1);
INFER_ERROR(
"Dimension size, given by scalar input 1, must be non-negative but is -1",
op, "[1,2,3,4];[]");
}
TEST(NNOpsTest, NthElement_ShapeFn) {
ShapeInferenceTestOp op("NthElement");
op.input_tensors.resize(2);
Tensor n_t;
op.input_tensors[1] = &n_t;
n_t = test::AsScalar<int32>(20);
INFER_OK(op, "?;[]", "?");
INFER_OK(op, "[21];[]", "[]");
INFER_OK(op, "[2,?,?];[]", "[d0_0,d0_1]");
INFER_OK(op, "[?,3,?,21];[]", "[d0_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Input must have last dimension > n = 20 but is 1", op, "[1];[]");
INFER_ERROR("Input must have last dimension > n = 20 but is 20", op,
"[1,2,3,20];[]");
n_t = test::AsScalar<int32>(-1);
INFER_ERROR(
"Dimension size, given by scalar input 1, must be non-negative but is -1",
op, "[1,2,3,4];[]");
}
TEST(NNOpsTest, BatchNormWithGlobalNormalization_ShapeFn) {
ShapeInferenceTestOp op("BatchNormWithGlobalNormalization");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0]");
}
TEST(NNOpsTest, QuantizedBatchNormWithGlobalNormalization_ShapeFn) {
ShapeInferenceTestOp op("QuantizedBatchNormWithGlobalNormalization");
INFER_ERROR("Shape must be rank 4 but is rank 3", op,
"[1,2,3];?;?;?;?;?;?;?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;[1,2,3];?;?;?;?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;[1,2,3];?;?;?;?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;?;?;?;[1,2,3];?;?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op,
"?;?;?;?;?;?;?;?;?;?;?;?;[1,2,3];?;?");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];?;[];[]", "[?,?,?,?];[];[]");
INFER_OK(op, "?;[];[];[1];[];[];?;[];[];?;[];[];?;[];[]",
"[?,?,?,d3_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];[1];[];[];?;[];[];?;[];[]",
"[?,?,?,d6_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];[1];[];[];?;[];[]",
"[?,?,?,d9_0];[];[]");
INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];[1];[];[]",
"[?,?,?,d12_0];[];[]");
INFER_OK(op, "[1,2,3,4];[];[];[4];[];[];[4];[];[];[4];[];[];[4];[];[]",
"[d0_0,d0_1,d0_2,d0_3|d3_0|d6_0|d9_0|d12_0];[];[]");
}
TEST(NNOpsTest, BatchNormWithGlobalNormalizationGrad_ShapeFn) {
ShapeInferenceTestOp op("BatchNormWithGlobalNormalizationGrad");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op,
"?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "[1,?,3,?];[?];[?];[?];[?,2,?,4]",
"[d0_0,d4_1,d0_2,d4_3];[d4_3];[d4_3];[d4_3];[d4_3]");
}
TEST(NNOpsTest, FusedBatchNorm_ShapeFn) {
ShapeInferenceTestOp op("FusedBatchNorm");
auto set_op = [&op](bool is_training, float exponential_avg_factor,
string data_format) {
TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("data_format", data_format)
.Attr("is_training", is_training)
.Attr("exponential_avg_factor", exponential_avg_factor)
.Finalize(&op.node_def));
};
set_op(true, 1.0, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];?;?",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]");
set_op(true, 0.5, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];?;?",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];"
"[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]");
set_op(true, 1.0, "NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "[1,4,2,3];[4];[4];?;?",
"[d0_0,d0_1|d1_0|d2_0,d0_2,d0_3];"
"[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0];"
"[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0]");
set_op(false, 1.0, "NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[d4_0];[d4_0]");
INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0];"
"[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0];"
"[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0]");
set_op(false, 1.0, "NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]");
INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]");
INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[d3_0];[d3_0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[d4_0];[d4_0]");
INFER_OK(op, "[1,4,2,3];[4];[4];[4];[4]",
"[d0_0,d0_1|d1_0|d2_0|d3_0|d4_0,d0_2,d0_3];"
"[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0];"
"[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0]");
}
TEST(NNOpsTest, FusedBatchNormGrad_ShapeFn) {
ShapeInferenceTestOp op("FusedBatchNormGrad");
auto set_op = [&op](string data_format) {
TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNormGrad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("data_format", data_format)
.Finalize(&op.node_def));
};
set_op("NCHW");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]");
INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[0];[0]");
INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[0];[0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[0];[0]");
INFER_OK(op, "[1,4,2,3];[1,4,2,3];[4];[4];[4]",
"[d0_0,d0_1|d2_0|d3_0|d4_0,d0_2,d0_3];"
"[d0_1|d2_0|d3_0|d4_0];[d0_1|d2_0|d3_0|d4_0];[0];[0]");
set_op("NHWC");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]");
INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[0];[0]");
INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[0];[0]");
INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[0];[0]");
INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4];[4]",
"[d0_0,d0_1,d0_2,d0_3|d2_0|d3_0|d4_0];"
"[d0_3|d2_0|d3_0|d4_0];[d0_3|d2_0|d3_0|d4_0];[0];[0]");
}
TEST(NNOpsTest, Conv2DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Conv2DBackpropInput");
INFER_ERROR("input_sizes to contain 4 values or 2 values", op,
"[3];[?,?,?,?];[?,?,?,?]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op,
"[4];[?,?,?,?];[?,?,?]");
INFER_OK(op, "[4];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,?]");
INFER_OK(op, "[2];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,d1_2]");
}
TEST(NNOpsTest, Conv3DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Conv3DBackpropInput");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "[?,?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4,?];?;?", "in0");
}
TEST(NNOpsTest, Conv3DBackpropFilter_ShapeFn) {
ShapeInferenceTestOp op("Conv3DBackpropFilter");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "?;[1,2,3];?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "?;[?,?,?,?,?];?", "in1");
INFER_OK(op, "?;[?,2,?,4,?];?", "in1");
}
TEST(NNOpsTest, MaxPool3DGrad_ShapeFn) {
ShapeInferenceTestOp op("MaxPool3DGrad");
INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?,?]");
INFER_OK(op, "[?,?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4,?];?;?", "in0");
}
TEST(NNOpsTest, LRNGrad_ShapeFn) {
ShapeInferenceTestOp op("LRNGrad");
INFER_OK(op, "[1,?,?,4];[?,2,?,?];[?,?,3,?]", "[d0_0,d1_1,d2_2,d0_3]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;[1,2,3];?");
INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;?;[1,2,3]");
}
TEST(NNOpsTest, MaxPoolGrad_ShapeFn) {
for (const char* op_name : {"MaxPoolGrad", "MaxPoolGradWithArgmax"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_OK(op, "?;?;?", "[?,?,?,?]");
INFER_OK(op, "[?,?,?,?];?;?", "in0");
INFER_OK(op, "[?,2,?,4];?;?", "in0");
}
}
TEST(NNOpsTest, Dilation2DBackpropInput_ShapeFn) {
ShapeInferenceTestOp op("Dilation2DBackpropInput");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "?;[?,?,?,?,?];?", "in0");
INFER_OK(op, "?;[?,2,?,4,?];?", "in0");
}
TEST(NNOpsTest, Dilation2DBackpropFilter_ShapeFn) {
ShapeInferenceTestOp op("Dilation2DBackpropFilter");
INFER_OK(op, "?;?;?", "in1");
INFER_OK(op, "?;[?,?,?,?,?];?", "in1");
INFER_OK(op, "?;[?,2,?,4,?];?", "in1");
}
TEST(NNOpsTest, MergeBothInputs_ShapeFn) {
for (const char* op_name : {"ReluGrad", "Relu6Grad", "EluGrad", "SeluGrad",
"SoftplusGrad", "SoftsignGrad"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?", "in0|in1");
INFER_OK(op, "?;[1,?,3]", "in1");
INFER_OK(op, "[1,?,3];?", "in0");
INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op,
"[1,3];[?,2]");
}
}
TEST(NNOpsTest, SoftmaxCrossEntropyWithLogits_ShapeFn) {
ShapeInferenceTestOp op("SoftmaxCrossEntropyWithLogits");
INFER_OK(op, "?;?", "[?];[?,?]");
INFER_OK(op, "[?,?];[?,?]", "[d0_0|d1_0];in0|in1");
INFER_OK(op, "[1,2];[?,2]", "[d0_0];in0");
INFER_OK(op, "[1,?];[?,2]", "[d0_0];[d0_0,d0_1|d1_1]");
INFER_OK(op, "[?,2];[1,2]", "[d1_0];in1");
INFER_ERROR("Shape must be broadcasted with rank 2", op, "[1,2,3];?");
INFER_ERROR("Shape must be broadcasted with rank 2", op, "?;[1,2,3]");
INFER_OK(op, "[1,4];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]");
INFER_OK(op, "[2,4];[2,1]", "[d0_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[1,?];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]");
INFER_OK(op, "[2,4];[?,1]", "[d0_0];[d0_0|d1_0,d0_1]");
}
TEST(NNOpsTest, SparseSoftmaxCrossEntropyWithLogits_ShapeFn) {
ShapeInferenceTestOp op("SparseSoftmaxCrossEntropyWithLogits");
INFER_OK(op, "?;?", "[?];[?,?]");
INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]");
INFER_OK(op, "[?,2];[1]", "[d1_0];[d1_0,d0_1]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
}
TEST(NNOpsTest, InTopK_ShapeFn) {
ShapeInferenceTestOp op("InTopK");
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0]");
INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0]");
INFER_OK(op, "[?,2];[1]", "[d1_0]");
INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]");
}
TEST(NNOpsTest, Dilation2DShapeTest) {
ShapeInferenceTestOp op("Dilation2D");
auto set_op = [&op](const std::vector<int32>& strides,
const std::vector<int32>& rates, const string& padding) {
TF_ASSERT_OK(NodeDefBuilder("test", "Dilation2D")
.Input("input", 0, DT_FLOAT)
.Input("filter", 0, DT_FLOAT)
.Attr("strides", strides)
.Attr("rates", rates)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, "VALID");
INFER_OK(op, "[1,2,2,2];[1,1,2]", "[d0_0,2,2,d1_2]");
set_op({1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2];[2,2,2]", "[d0_0,5,5,d1_2]");
}
TEST(NNOpsTest, FractionalPool_ShapeFn) {
for (const char* op_name : {"FractionalAvgPool", "FractionalMaxPool"}) {
ShapeInferenceTestOp op(op_name);
auto set_op = [&op, op_name](const std::vector<float>& pooling_ratio) {
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input("input", 0, DT_FLOAT)
.Attr("pooling_ratio", pooling_ratio)
.Finalize(&op.node_def));
};
set_op(std::vector<float>{2.0f, 1, 1.5f, 4.0f});
INFER_ERROR("must be rank 4", op, "[?,?,?]");
INFER_OK(op, "?", "[?,?,?,?];[?];[?]");
INFER_OK(op, "[?,?,?,?]", "[?,?,?,?];[?];[?]");
INFER_OK(op, "[10,20,30,40]", "[5,20,20,10];[20];[20]");
INFER_OK(op, "[?,20,30,40]", "[?,20,20,10];[20];[20]");
INFER_OK(op, "[10,?,30,40]", "[5,?,20,10];[?];[20]");
INFER_OK(op, "[10,20,?,40]", "[5,20,?,10];[20];[?]");
INFER_OK(op, "[10,20,30,?]", "[5,20,20,?];[20];[20]");
set_op(std::vector<float>{.5, 1.0, 1.5});
INFER_ERROR("pooling_ratio field", op, "?");
set_op(std::vector<float>{1, 2, 3, 4, 5});
INFER_ERROR("pooling_ratio field", op, "?");
set_op(std::vector<float>{-1, 2, 3, 4});
INFER_ERROR("is negative", op, "[1,2,3,4]");
}
}
TEST(NNOpsTest, FractionalMaxPoolGrad) {
ShapeInferenceTestOp op("FractionalMaxPoolGrad");
INFER_ERROR("must be rank 4", op, "[?,?,?];?;?;?;?");
INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]");
INFER_OK(op, "[?,?,3,4];?;?;?;?", "in0");
}
TEST(NNOpsTest, FractionalAvgPoolGrad) {
ShapeInferenceTestOp op("FractionalAvgPoolGrad");
op.input_tensors.resize(1);
INFER_OK(op, "?;?;?;?", "[?,?,?,?]");
std::vector<int32> shape{1, 2, 3, 4};
Tensor shape_t = test::AsTensor<int32>(shape);
op.input_tensors[0] = &shape_t;
INFER_OK(op, "[5];?;?;?", "[1,2,3,4]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/nn_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/nn_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5e2b254-879d-48d9-939b-833510f208ac | cpp | tensorflow/tensorflow | io_ops | tensorflow/c/experimental/ops/io_ops.cc | tensorflow/core/ops/io_ops_test.cc | #include "tensorflow/c/experimental/ops/io_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status RestoreV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle*> tensors,
absl::Span<DataType> dtypes, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("RestoreV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(prefix));
TF_RETURN_IF_ERROR(op_ptr->AddInput(tensor_names));
TF_RETURN_IF_ERROR(op_ptr->AddInput(shape_and_slices));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrTypeList("dtypes", dtypes.data(), dtypes.length()));
int num_retvals = tensors.size();
return op_ptr->Execute(tensors, &num_retvals);
}
Status SaveV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle* const> tensors, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("SaveV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(prefix));
TF_RETURN_IF_ERROR(op_ptr->AddInput(tensor_names));
TF_RETURN_IF_ERROR(op_ptr->AddInput(shape_and_slices));
TF_RETURN_IF_ERROR(op_ptr->AddInputList(tensors));
int num_retvals = 0;
std::vector<AbstractTensorHandle*> dummy_outputs;
return op_ptr->Execute(absl::MakeSpan(dummy_outputs), &num_retvals);
}
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(IoOpsTest, Save_ShapeFn) {
ShapeInferenceTestOp op("Save");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({{"c", 0, DT_FLOAT}, {"d", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "");
INFER_OK(op, "[];[2];?;?", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?");
}
TEST(IoOpsTest, SaveSlices_ShapeFn) {
ShapeInferenceTestOp op("SaveSlices");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({"c", 0, DT_STRING})
.Input({{"d", 0, DT_FLOAT}, {"e", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "");
INFER_OK(op, "[];[2];[2];?;?", "");
INFER_OK(op, "[];[2];[2];[100,200,300];[4,5]", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[2];[3];?;?");
}
TEST(IoOpsTest, Restore_ShapeFn) {
ShapeInferenceTestOp op("Restore");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, RestoreV2_ShapeFn) {
ShapeInferenceTestOp op("RestoreV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"prefix", 0, DT_STRING})
.Input({"tensor_names", 0, DT_STRING})
.Input({"shapes_and_slices", 0, DT_STRING})
.Attr("dtypes", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;?");
INFER_OK(op, "[];[10];[10]", "?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?,?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?];[?,?]");
INFER_ERROR("in both shapes must be equal", op, "[];[10];[20]");
}
TEST(IoOpsTest, RestoreSlice_ShapeFn) {
ShapeInferenceTestOp op("RestoreSlice");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[];[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilename_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilename");
INFER_OK(op, "?;?;?", "[]");
INFER_OK(op, "[];[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilespec_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilespec");
INFER_OK(op, "?;?", "[]");
INFER_OK(op, "[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, SingleScalarInputAndOutput_ShapeFns) {
for (const char* op_name : {"ReadFile"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
}
}
TEST(IoOpsTest, TwoElementVectorInputsAndScalarOutput_ShapeFns) {
for (const char* op_name :
{"ReaderNumRecordsProduced", "ReaderNumWorkUnitsCompleted",
"ReaderSerializeState"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
}
TEST(IoOpsTest, ReaderRead_ShapeFn) {
ShapeInferenceTestOp op("ReaderRead");
INFER_OK(op, "?;?", "[];[]");
INFER_OK(op, "[2];[?]", "[];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
}
TEST(IoOpsTest, ReaderReadUpTo_ShapeFn) {
ShapeInferenceTestOp op("ReaderReadUpTo");
INFER_OK(op, "[2];[2];[]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[2];[?]");
}
TEST(IoOpsTest, ReaderReset_ShapeFn) {
ShapeInferenceTestOp op("ReaderReset");
INFER_OK(op, "[2]", "");
INFER_OK(op, "[?]", "");
INFER_OK(op, "?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(IoOpsTest, ReaderRestoreState_ShapeFn) {
ShapeInferenceTestOp op("ReaderRestoreState");
INFER_OK(op, "?;?", "");
INFER_OK(op, "[2];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?]");
}
TEST(IoOpsTest, MatchingFiles_ShapeFn) {
ShapeInferenceTestOp op("MatchingFiles");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[]", "[?]");
INFER_OK(op, "[42]", "[?]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[?,?]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/io_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/io_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de005b97-03d9-41d3-b166-71fb677e9413 | cpp | tensorflow/tensorflow | ctc_ops | tensorflow/core/ops/ctc_ops.cc | tensorflow/core/ops/ctc_ops_test.cc | #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("CTCLoss")
.Input("inputs: T")
.Input("labels_indices: int64")
.Input("labels_values: int32")
.Input("sequence_length: int32")
.Attr("preprocess_collapse_repeated: bool = false")
.Attr("ctc_merge_repeated: bool = true")
.Attr("ignore_longer_outputs_than_inputs: bool = false")
.Output("loss: T")
.Output("gradient: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle labels_indices;
ShapeHandle labels_values;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &labels_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &labels_values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &sequence_length));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0),
c->Dim(labels_values, 0), &unused));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(inputs, 1, batch_size, &inputs));
c->set_output(0, c->Vector(batch_size));
c->set_output(1, inputs);
return absl::OkStatus();
});
REGISTER_OP("CTCLossV2")
.Input("inputs: float")
.Input("labels_indices: int64")
.Input("labels_values: int32")
.Input("sequence_length: int32")
.Attr("preprocess_collapse_repeated: bool = false")
.Attr("ctc_merge_repeated: bool = true")
.Attr("ignore_longer_outputs_than_inputs: bool = false")
.Output("loss: float")
.Output("gradient: float")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle labels_indices;
ShapeHandle labels_values;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &labels_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &labels_values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &sequence_length));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(labels_indices, 0),
c->Dim(labels_values, 0), &unused));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
TF_RETURN_IF_ERROR(c->ReplaceDim(inputs, 1, batch_size, &inputs));
c->set_output(0, c->Vector(batch_size));
c->set_output(1, inputs);
return absl::OkStatus();
});
REGISTER_OP("CTCGreedyDecoder")
.Input("inputs: T")
.Input("sequence_length: int32")
.Attr("merge_repeated: bool = false")
.Attr("blank_index: int = -1")
.Output("decoded_indices: int64")
.Output("decoded_values: int64")
.Output("decoded_shape: int64")
.Output("log_probability: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &sequence_length));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
DimensionHandle total_decoded_outputs = c->UnknownDim();
c->set_output(0, c->Matrix(total_decoded_outputs, 2));
c->set_output(1, c->Vector(total_decoded_outputs));
c->set_output(2, c->Vector(2));
c->set_output(3, c->Matrix(batch_size, 1));
return absl::OkStatus();
});
REGISTER_OP("CTCBeamSearchDecoder")
.Input("inputs: T")
.Input("sequence_length: int32")
.Attr("beam_width: int >= 1")
.Attr("top_paths: int >= 1")
.Attr("merge_repeated: bool = true")
.Output("decoded_indices: top_paths * int64")
.Output("decoded_values: top_paths * int64")
.Output("decoded_shape: top_paths * int64")
.Output("log_probability: T")
.Attr("T: {float, double} = DT_FLOAT")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle inputs;
ShapeHandle sequence_length;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 3, &inputs));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &sequence_length));
DimensionHandle batch_size;
TF_RETURN_IF_ERROR(
c->Merge(c->Dim(inputs, 1), c->Dim(sequence_length, 0), &batch_size));
int32_t top_paths;
TF_RETURN_IF_ERROR(c->GetAttr("top_paths", &top_paths));
int out_idx = 0;
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, c->Matrix(InferenceContext::kUnknownDim, 2));
}
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, c->Vector(InferenceContext::kUnknownDim));
}
ShapeHandle shape_v = c->Vector(2);
for (int i = 0; i < top_paths; ++i) {
c->set_output(out_idx++, shape_v);
}
c->set_output(out_idx++, c->Matrix(batch_size, top_paths));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CtcOpsTest, CTCLoss_ShapeFn) {
ShapeInferenceTestOp op("CTCLoss");
INFER_ERROR("must be rank 3", op, "[];?;?;?");
INFER_ERROR("must be rank 2", op, "?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;[]");
INFER_ERROR("must be equal", op, "?;[1,?];[2];?");
INFER_OK(op, "[?,?,?];?;?;[?]", "[d0_1|d3_0];[d0_0,d0_1|d3_0,d0_2]");
INFER_OK(op, "[?,1,?];?;?;[1]", "[d0_1|d3_0];[d0_0,d0_1|d3_0,d0_2]");
INFER_OK(op, "[?,?,?];?;?;[1]", "[d3_0];[d0_0,d3_0,d0_2]");
INFER_OK(op, "[?,1,?];?;?;[?]", "[d0_1];[d0_0,d0_1,d0_2]");
INFER_ERROR("must be equal", op, "[?,1,?];?;?;[2]");
}
TEST(CtcOpsTest, CTCGreedyDecoder_ShapeFn) {
ShapeInferenceTestOp op("CTCGreedyDecoder");
INFER_ERROR("must be rank 3", op, "[];?");
INFER_ERROR("must be rank 1", op, "?;[]");
INFER_OK(op, "[?,?,?];[?]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,1,?];[1]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,?,?];[1]", "[?,2];[?];[2];[d1_0,1]");
INFER_OK(op, "[?,1,?];[?]", "[?,2];[?];[2];[d0_1,1]");
INFER_ERROR("must be equal", op, "[?,1,?];[2]");
}
TEST(CtcOpsTest, CTCBeamSearchDecoder_ShapeFn) {
ShapeInferenceTestOp op("CTCBeamSearchDecoder");
auto set_top_paths = [&op](int top_paths) {
TF_ASSERT_OK(NodeDefBuilder("test", "CTCBeamSearchDecoder")
.Input({"a", 0, DT_FLOAT})
.Input({"b", 0, DT_INT32})
.Attr("top_paths", top_paths)
.Finalize(&op.node_def));
};
set_top_paths(1);
INFER_ERROR("must be rank 3", op, "[];?");
INFER_ERROR("must be rank 1", op, "?;[]");
INFER_OK(op, "[?,?,?];[?]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,1,?];[1]", "[?,2];[?];[2];[d0_1|d1_0,1]");
INFER_OK(op, "[?,?,?];[1]", "[?,2];[?];[2];[d1_0,1]");
INFER_OK(op, "[?,1,?];[?]", "[?,2];[?];[2];[d0_1,1]");
INFER_ERROR("must be equal", op, "[?,1,?];[2]");
set_top_paths(2);
INFER_OK(op, "?;?", "[?,2];[?,2];[?];[?];[2];[2];[?,2]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/ctc_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/ctc_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f229d361-0fa4-493a-b68e-5e828b20bda1 | cpp | tensorflow/tensorflow | array_grad | tensorflow/c/experimental/gradients/array_grad.cc | tensorflow/c/experimental/gradients/array_grad_test.cc | #include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/eager/abstract_context.h"
namespace tensorflow {
namespace gradients {
namespace {
class IdentityNGradientFunction : public GradientFunction {
public:
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
for (int i = 0; i < grad_outputs.size(); i++) {
auto grad_input = grad_outputs[i];
if (grad_input) {
grad_input->Ref();
}
grad_inputs[i] = grad_input;
}
return absl::OkStatus();
}
~IdentityNGradientFunction() override {}
};
}
GradientFunction* IdentityNRegisterer(const ForwardOperation& op) {
return new IdentityNGradientFunction;
}
}
} | #include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/grad_test_helper.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
Status IdentityNModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
std::vector<AbstractTensorHandle*> temp_outputs(2);
TF_RETURN_IF_ERROR(
ops::IdentityN(ctx, inputs, absl::MakeSpan(temp_outputs), "IdentityN"));
outputs[0] = temp_outputs[1];
temp_outputs[0]->Unref();
return absl::OkStatus();
}
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr immediate_execution_ctx_;
GradientRegistry registry_;
Status status_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(CppGradients, TestIdentityNGrad) {
AbstractTensorHandlePtr x1;
{
AbstractTensorHandle* x1_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x1_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x1.reset(x1_raw);
}
AbstractTensorHandlePtr x2;
{
AbstractTensorHandle* x2_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x2_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x2.reset(x2_raw);
}
status_ = registry_.Register("IdentityN", IdentityNRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto IdentityNGradModel = BuildGradModel(IdentityNModel, registry_);
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(IdentityNGradModel, immediate_execution_ctx_.get(),
{x1.get(), x2.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
EXPECT_EQ(outputs[0], nullptr);
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], {1.0f}, {},
0));
outputs[1]->Unref();
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/array_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/array_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65156ca6-0b0f-4ed2-aba1-0ec9220c7da4 | cpp | tensorflow/tensorflow | array_ops | tensorflow/c/experimental/ops/array_ops.cc | tensorflow/core/ops/array_ops_test.cc | #include "tensorflow/c/experimental/ops/array_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status Identity(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Identity", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status IdentityN(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> input,
absl::Span<AbstractTensorHandle*> output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("IdentityN", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInputList(input));
int num_retvals = output.size();
return op_ptr->Execute(output, &num_retvals);
}
Status ZerosLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ZerosLike", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Shape(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, DataType out_type, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Shape", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->SetAttrType("out_type", out_type));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status ExpandDims(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle* const dim,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ExpandDims", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->AddInput(dim));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status OnesLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("OnesLike", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
}
} | #include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(ArrayOpsTest, TensorScatterUpdate_ShapeFn) {
ShapeInferenceTestOp op("TensorScatterUpdate");
INFER_OK(op, "[4,3];[8,2];[8]", "in0");
INFER_OK(op, "[?,?];[?,2];[?]", "in0");
INFER_OK(op, "[?];[?];[?]", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[?,2];[?]");
INFER_ERROR("Indices and updates specified for empty input", op,
"[0,2,2];[8,2];[8]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[?,?];[8,2];[9]");
INFER_ERROR(
"Dimensions [2,2) of input[shape=[?,?]] = [] must match "
"dimensions [1,2) of updates[shape=[?,1]] = [1]",
op, "[?,?];[?,2];[?,1]");
}
TEST(ArrayOpsTest, ScatterNd_ShapeFn) {
ShapeInferenceTestOp op("ScatterNd");
INFER_OK(op, "[8,2];[8];[2]", "[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[?,2];[?];[]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[8,2];[9];[?]");
}
TEST(ArrayOpsTest, UnravelIndex_ShapeFn) {
ShapeInferenceTestOp op("UnravelIndex");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[?]", "[d1_0]");
INFER_OK(op, "[4,5];[?]", "[d1_0,20]");
INFER_OK(op, "[2,3,4];[?]", "[d1_0,24]");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "[d1_0,?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,1]");
}
TEST(ArrayOpsTest, Pack_ShapeFn) {
ShapeInferenceTestOp op("Pack");
auto set_axis = [&op](int axis) {
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input(src_list)
.Attr("N", n)
.Attr("axis", axis)
.Finalize(&op.node_def));
};
set_axis(0);
INFER_OK(op, "?;?;?", "?");
for (int axis : {0, -3}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[3,d0_0|d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[3,d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[3,d1_0,d1_1]");
}
for (int axis : {1, -2}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,3,d1_1]");
}
for (int axis : {2, -1}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,d1_1,3]");
}
set_axis(-4);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(0);
INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op,
"[1,2,3];?;[1,4]");
INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2,3];?;[1,4]");
}
TEST(ArrayOpsTest, UnPack_ShapeFn) {
ShapeInferenceTestOp op("Unpack");
auto set_axis_and_num = [&op](int axis, int num) {
TF_ASSERT_OK(NodeDefBuilder("test", "Unpack")
.Input("a", 0, DT_FLOAT)
.Attr("axis", axis)
.Attr("num", num)
.Finalize(&op.node_def));
};
set_axis_and_num(0, 1);
INFER_OK(op, "?", "?");
for (int axis : {0, -3}) {
set_axis_and_num(axis, 1);
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,2,3]", "[d0_1,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_1,d0_2]");
}
for (int axis : {1, -2}) {
set_axis_and_num(axis, 2);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_2];[d0_0,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_2];[d0_0,d0_2]");
}
for (int axis : {2, -1}) {
set_axis_and_num(axis, 3);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
}
set_axis_and_num(2, 2);
INFER_ERROR("Dimension must be 2 but is 3", op, "[1,2,3]");
set_axis_and_num(-4, 3);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,2,3]");
set_axis_and_num(3, 3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,2,3]");
}
TEST(ArrayOpsTest, Const_ShapeFn) {
ShapeInferenceTestOp op("Const");
TensorProto tensor_proto;
auto* shape_proto = tensor_proto.mutable_tensor_shape();
auto rebuild_node_def = [&op, &tensor_proto]() {
TF_ASSERT_OK(NodeDefBuilder("test", "Const")
.Attr("value", tensor_proto)
.Finalize(&op.node_def));
};
TensorShape{}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[]");
TensorShape{1, 2, 3, 4}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[1,2,3,4]");
shape_proto->add_dim()->set_size(-1);
rebuild_node_def();
INFER_ERROR("Shape [1,2,3,4,?] is not fully defined", op, "");
}
TEST(ArrayOpsTest, UnchangedShapes_ShapeFn) {
for (const char* op_name : {
"CheckNumerics",
"Identity",
"RefIdentity",
"QuantizeAndDequantize",
"StopGradient",
"ZerosLike",
"OnesLike",
}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
ShapeInferenceTestOp op("MatrixBandPart");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[];?;?", "in0");
INFER_OK(op, "[1,2,?,4,5];?;?", "in0");
}
TEST(ArrayOpsTest, GuaranteeConst_ShapeFn) {
ShapeInferenceTestOp op("GuaranteeConst");
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
TEST(ArrayOpsTest, Identity_ShapeFnHandles) {
const char* op_name = "Identity";
ShapeInferenceTestOp op(op_name);
const OpRegistrationData* op_reg_data;
TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
std::vector<
std::unique_ptr<std::vector<std::pair<PartialTensorShape, DataType>>>>
handle_data;
handle_data.emplace_back(
new std::vector<std::pair<PartialTensorShape, DataType>>(
{{PartialTensorShape(), DT_BOOL}}));
shape_inference::InferenceContext c(
TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def,
{PartialTensorShape()}, {}, {}, handle_data);
TF_ASSERT_OK(c.construction_status());
ASSERT_TRUE(op_reg_data->shape_inference_fn != nullptr);
TF_ASSERT_OK(c.Run(op_reg_data->shape_inference_fn));
const auto* shapes_and_types = c.output_handle_shapes_and_types(0);
ASSERT_TRUE(shapes_and_types != nullptr);
ASSERT_EQ(1, shapes_and_types->size());
EXPECT_EQ((*shapes_and_types)[0].dtype, DT_BOOL);
}
TEST(ArrayOpsTest, Diag_ShapeFn) {
ShapeInferenceTestOp op("Diag");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,3]", "[d0_0,d0_1,d0_2,d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2,d0_3,d0_0,d0_1,d0_2,d0_3]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, DiagPart_ShapeFn) {
ShapeInferenceTestOp op("DiagPart");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_3]");
INFER_OK(op, "[1,?,3,?,4,3]", "[d0_0,d0_4,d0_2|d0_5]");
INFER_OK(op, "[1,2,3,?,?,?,?,4]", "[d0_0,d0_1,d0_2,d0_7]");
INFER_ERROR("Input must have even and non-zero rank", op, "[]");
INFER_ERROR("Input must have even and non-zero rank", op, "[?]");
INFER_ERROR("Input must have even and non-zero rank", op, "[1,2,3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10", op, "[1,2,?,10]");
}
TEST(ArrayOpsTest, MatrixDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiag");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "[d0_0,d0_0]");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,d0_3]");
}
TEST(ArrayOpsTest, MatrixDiagPart_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiagPart");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?]");
INFER_OK(op, "[?,1,2,2]", "[d0_0,d0_1,d0_2|d0_3]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,3,2]", "[d0_0,d0_1,d0_3]");
}
TEST(ArrayOpsTest, Reverse_ShapeFn) {
ShapeInferenceTestOp op("Reverse");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4]");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, ReverseV2_ShapeFn) {
ShapeInferenceTestOp op("ReverseV2");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_OK(op, "[1,2,3];[2]", "in0");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, Fill_ShapeFn) {
ShapeInferenceTestOp op("Fill");
AddNodeAttr("index_type", DT_INT32, &op.node_def);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "[4];?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[0] = &in_t;
INFER_OK(op, "[4];?", "[1,2,3,4]");
}
TEST(ArrayOpsTest, Gather_ShapeFn) {
ShapeInferenceTestOp op("Gather");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,2];[3]", "[d1_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1,2,3]");
}
TEST(ArrayOpsTest, GatherV2_ShapeFn) {
ShapeInferenceTestOp op("GatherV2");
AddNodeAttr("batch_dims", 0, &op.node_def);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,2,3];[3];[]", "[?,?,?]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[1,2,3];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];[1,2,3];[1]");
Tensor axis_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &axis_dim_t;
axis_dim_t = test::AsScalar(1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[1];[1,2];[]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[];[]", "[d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_1]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5];[]", "[d1_0,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d1_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d0_1,d1_0]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
axis_dim_t = test::AsScalar(-3);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(-2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(-1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
ShapeInferenceTestOp batch_op("GatherV2");
AddNodeAttr("batch_dims", 1, &batch_op.node_def);
INFER_OK(batch_op, "[1,4800,8];[1,28400];[]", "[?,?,?]");
ShapeInferenceTestOp batch_op_2("GatherV2");
AddNodeAttr("batch_dims", 2, &batch_op_2.node_def);
INFER_OK(batch_op_2, "[1,2,3,4,5];[1,2,3];[]", "[?,?,?,?,?]");
}
TEST(ArrayOpsTest, GatherNd_ShapeFn) {
ShapeInferenceTestOp op("GatherNd");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,3,?];[?,0]", "[d1_0,d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4]", "[d1_0]");
INFER_ERROR("indices.shape[-1] must be <= params.rank", op, "[1,2,3];[4]");
}
TEST(ArrayOpsTest, Shape_ShapeFn) {
ShapeInferenceTestOp op("Shape");
AddNodeAttr("out_type", DT_INT32, &op.node_def);
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[?]", "[1]");
INFER_OK(op, "[?,2,3,4,5]", "[5]");
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
REGISTER_OP("ArrayOpsTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"))
.SetShapeFn(shape_inference::UnknownShape);
TEST(ArrayOpsTest, Shape_TypeCtor) {
Graph graph(OpRegistry::Global());
Node* input_tensor_op;
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_tensor_op", "ArrayOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op));
Node* shape_op;
TF_EXPECT_OK(NodeBuilder("shape_op", "Shape")
.Input(input_tensor_op)
.Attr("T", DT_FLOAT)
.Attr("out_type", DT_INT32)
.Finalize(&graph, &shape_op));
TF_EXPECT_OK(type_inference(graph));
FullTypeDef expected_shape_op_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_SHAPE_TENSOR
args { type_id: TFT_INT32 }
})pb",
&expected_shape_op_t));
EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(),
expected_shape_op_t))
<< "fulltype is\n"
<< shape_op->def().experimental_type().DebugString() << "\nexpected\n"
<< expected_shape_op_t.DebugString();
}
TEST(ArrayOpsTest, ShapeN_ShapeFn) {
ShapeInferenceTestOp op("ShapeN");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ShapeN")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "[?];[?];[?]");
INFER_OK(op, "[?];[?];[?]", "[1];[1];[1]");
INFER_OK(op, "[?,2,3,4,5];?;[1,?,3]", "[5];[?];[3]");
}
TEST(ArrayOpsTest, Unique_ShapeFn) {
ShapeInferenceTestOp op("Unique");
INFER_OK(op, "?", "[?];in0");
INFER_OK(op, "[5]", "[?];in0");
INFER_ERROR("Shape must be rank 1 but is rank 5", op, "[1,2,3,?,5]");
}
TEST(ArrayOpsTest, UniqueWithCounts_ShapeFn) {
ShapeInferenceTestOp op("UniqueWithCounts");
INFER_OK(op, "?", "[?];in0;[?]");
INFER_OK(op, "[1,2,3,?,5]", "[?];in0;[?]");
}
TEST(ArrayOpsTest, InvertPermutation_ShapeFn) {
ShapeInferenceTestOp op("InvertPermutation");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[1]", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, PadD_ShapeFn) {
for (const char* op_name : {"Pad", "MirrorPad"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3]");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2]");
INFER_OK(op, "[1,2,3];?", "[?,?,?]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2]", "[111,?,333]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
INFER_OK(op, "?;?", "[?,?,?]");
}
}
TEST(ArrayOpsTest, PadV2_ShapeFn) {
ShapeInferenceTestOp op("PadV2");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2];[]");
INFER_OK(op, "[1,2,3];?;[]", "[?,?,?]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2];[]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2];[]", "[111,?,333]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
INFER_OK(op, "?;?;[]", "[?,?,?]");
}
TEST(ArrayOpsTest, MirrorPadGrad_ShapeFn) {
ShapeInferenceTestOp op("MirrorPadGrad");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[?,4]", "?");
INFER_ERROR("must be rank 3 but is rank 2", op, "[?,?];[3,2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op,
"[?,?,?];[3,3]");
INFER_OK(op, "[?,?,?];[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[111,222,333];[3,2]", "[100,200,300]");
INFER_OK(op, "[111,?,333];[3,2]", "[100,?,300]");
}
TEST(ArrayOpsTest, BroadcastArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastArgs");
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "[123];[1]", "[123]");
INFER_OK(op, "[1];[123]", "[123]");
INFER_OK(op, "[123];[121]", "[123]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, BroadcastTo_ShapeFn) {
ShapeInferenceTestOp op("BroadcastTo");
op.input_tensors.resize(2);
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[];[1]", "[?]");
INFER_OK(op, "[1];[1]", "[?]");
INFER_OK(op, "[1];[2]", "[?,?]");
INFER_OK(op, "[2,2];[3]", "[?,d0_0,d0_1]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[2,2];[1]");
Tensor shape_t(DT_INT64, TensorShape{3});
test::FillValues<int64_t>(&shape_t, {2, 10, 3});
op.input_tensors[1] = &shape_t;
INFER_OK(op, "[1,?,1];[3]", "[2,10,3]");
INFER_OK(op, "[1,1,1];[3]", "[2,10,3]");
INFER_OK(op, "[10,1];[3]", "[2,d0_0,3]");
INFER_ERROR("Dimensions must be equal, but are 3 and 2 for", op,
"[3,1,1];[3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10 for", op,
"[2,2,1];[3]");
}
TEST(ArrayOpsTest, BroadcastGradientArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, ListDiff_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, MatrixSetDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixSetDiag");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[2,2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,2];[2,2]");
INFER_ERROR("Dimensions must be equal, but are 2 and 3", op, "[2,3];[3]");
INFER_OK(op, "?;?", "in0");
INFER_OK(op, "[1,2,2];[1,2]", "in0");
INFER_OK(op, "[1,2,3];?", "in0");
INFER_OK(op, "[1,3,2];?", "in0");
INFER_OK(op, "[1,?,2];[?,?]", "in0");
INFER_OK(op, "[1,?,?];[?,2]", "in0");
INFER_OK(op, "?;[1,2]", "[d1_0,?,?]");
INFER_OK(op, "[?,?,3];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,?];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2];[1,2]", "[d1_0,d0_1,d0_2]");
}
TEST(ArrayOpsTest, ExpandDims_ShapeFn) {
ShapeInferenceTestOp op("ExpandDims");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
Tensor dim_t;
op.input_tensors[1] = &dim_t;
for (int32_t idx : {0, -4}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
}
for (int32_t idx : {1, -3}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
}
for (int32_t idx : {2, -2}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
}
for (int32_t idx : {3, -1}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
}
for (int32_t idx : {4, -5}) {
dim_t = test::AsScalar<int32>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
dim_t = test::AsScalar<int64_t>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
}
std::vector<int32> dims;
dims.push_back(0);
dim_t = test::AsTensor<int32>(dims);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
dims.push_back(1);
dim_t = test::AsTensor<int32>(dims);
INFER_ERROR("'dim' input must be a tensor with a single", op, "?;?");
INFER_ERROR("'dim' input must be a tensor with a single", op, "[5,6,7];?");
dim_t = test::AsScalar<int32>(0);
INFER_OK(op, "[2];[]", "[1,d0_0]");
dim_t = test::AsScalar<int32>(1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
dim_t = test::AsScalar<int32>(-1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
}
TEST(ArrayOpsTest, ImmutableConst_ShapeFn) {
ShapeInferenceTestOp op("ImmutableConst");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({1, 2, 3}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", "invalid")
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_ERROR("AttrValue had value with type 'string' when 'shape' expected",
op, "");
}
TEST(ArrayOpsTest, Concat_ShapeFn) {
ShapeInferenceTestOp op("Concat");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Concat")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?;?");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;?;[1,2,3,4];[4,3,2,1]", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;?;[];[]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;?;[1,2];[1,2,3]");
Tensor concat_dim_t;
op.input_tensors.push_back(&concat_dim_t);
set_n(2);
for (int concat_dim : {0, -3}) {
concat_dim_t = test::AsScalar(concat_dim);
INFER_OK(op, "[];[100,2,?];[10,?,3]", "[110,d1_1,d2_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[];[100,2,5];[10,?,3]");
INFER_OK(op, "[];[100,2,?];[?,?,3]", "[?,d1_1,d2_2]");
INFER_OK(op, "[];[?,2,?];[10,?,3]", "[?,d1_1,d2_2]");
}
for (bool use_negative : {false, true}) {
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_OK(op, "[];[1,100,?];[?,10,3]", "[d1_0,110,d2_2]");
concat_dim_t = test::AsScalar(use_negative ? -1 : 1);
INFER_OK(op, "[];[1,100];[?,10]", "[d1_0,110]");
INFER_OK(op, "[];[?,100];[1,10]", "[d2_0,110]");
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
}
concat_dim_t = test::AsScalar(-2);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
set_n(5);
concat_dim_t = test::AsScalar(1);
INFER_OK(op, "[];?;[1,100,?];[?,?,?];[?,10,3];?", "[d2_0,?,d4_2]");
}
TEST(ArrayOpsTest, ConcatV2_ShapeFn) {
ShapeInferenceTestOp op("ConcatV2");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ConcatV2")
.Input(src_list)
.Input({"axis", 0, DT_INT32})
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;[1,2,3,4];[4,3,2,1];?", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;[];[];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2];[1,2,3];?");
Tensor concat_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &concat_dim_t;
set_n(2);
concat_dim_t = test::AsScalar(0);
INFER_OK(op, "[100,2,?];[10,?,3];[]", "[110,d0_1,d1_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[100,2,5];[10,?,3];[]");
INFER_OK(op, "[100,2,?];[?,?,3];[]", "[?,d0_1,d1_2]");
INFER_OK(op, "[?,2,?];[10,?,3];[]", "[?,d0_1,d1_2]");
concat_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,100,?];[?,10,3];[]", "[d0_0,110,d1_2]");
INFER_OK(op, "[1,100];[?,10];[]", "[d0_0,110]");
INFER_OK(op, "[?,100];[1,10];[]", "[d1_0,110]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100];[10,?];[]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100,5];[10];[]");
concat_dim_t = test::AsScalar(-2);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100];[10,?];[]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100,5];[10];[]");
op.input_tensors.resize(6);
op.input_tensors[3] = nullptr;
op.input_tensors[5] = &concat_dim_t;
concat_dim_t = test::AsScalar(1);
set_n(5);
INFER_OK(op, "?;[1,100,?];[?,?,?];[?,10,3];?;[]", "[d1_0,?,d3_2]");
}
TEST(ArrayOpsTest, ConcatOffset_ShapeFn) {
ShapeInferenceTestOp op("ConcatOffset");
const int n = 4;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_INT32);
TF_ASSERT_OK(NodeDefBuilder("test", "ConcatOffset")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "in1;in2;in3;in4");
}
TEST(ArrayOpsTest, Reshape_ShapeFn) {
ShapeInferenceTestOp op("Reshape");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "?");
INFER_OK(op, "[4];[?]", "?");
Tensor new_shape = test::AsTensor<int32>({1, 2, 3});
op.input_tensors[1] = &new_shape;
INFER_OK(op, "?;[3]", "[1,2,3]");
INFER_OK(op, "[?];[3]", "[1,2,3]");
INFER_OK(op, "[6];[3]", "[1,2,3]");
INFER_ERROR(
"Cannot reshape a tensor with 12 elements to shape [1,2,3] (6 elements)",
op, "[3,4];[3]");
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "?;[1]", "[?]");
INFER_OK(op, "[?];[1]", "[d0_0]");
INFER_OK(op, "[2,2];[1]", "[4]");
new_shape = test::AsTensor<int32>({2, -1});
INFER_OK(op, "[3,4];[2]", "[2,6]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 7", op,
"[7];[2]");
new_shape = test::AsTensor<int32>({-1, -1, 2});
INFER_OK(op, "[8];[3]", "[?,?,2]");
INFER_OK(op, "?;[3]", "[?,?,2]");
new_shape = test::AsTensor<int32>({-1, 2, 3});
INFER_OK(op, "[?,2,3];[3]", "[d0_0,2,3]");
new_shape = test::AsTensor<int32>({});
INFER_OK(op, "[1];[0]", "[]");
INFER_ERROR(
"Cannot reshape a tensor with 2 elements to shape [] (1 elements)", op,
"[1,2];[0]");
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "[0];[1]", "[0]");
new_shape = test::AsTensor<int32>({-1, 6});
INFER_OK(op, "[0,2];[1]", "[0,6]");
new_shape = test::AsTensor<int32>({0, -1});
INFER_OK(op, "[0,2];[1]", "[0,?]");
}
TEST(ArrayOpsTest, QuantizedReshape_ShapeFn) {
ShapeInferenceTestOp op("QuantizedReshape");
op.input_tensors.resize(2);
INFER_OK(op, "?;?;?;?", "?;[];[]");
INFER_OK(op, "[?];?;?;?", "?;[];[]");
INFER_OK(op, "[?];[?];?;?", "?;[];[]");
INFER_OK(op, "[4];[?];?;?", "?;[];[]");
Tensor new_shape = test::AsTensor<int32>({1, 2, 3});
op.input_tensors[1] = &new_shape;
INFER_OK(op, "[?];[3];?;?", "[1,2,3];[];[]");
INFER_OK(op, "[6];[3];?;?", "[1,2,3];[];[]");
INFER_ERROR(
"Cannot reshape a tensor with 12 elements to shape [1,2,3] (6 elements)",
op, "[3,4];[3];?;?");
INFER_ERROR("must be rank 0", op, "?;?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;?;[1]");
}
TEST(ArrayOpsTest, Placeholder_ShapeFn) {
{
ShapeInferenceTestOp op("Placeholder");
TensorShape shape({1, 2});
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2]");
}
{
ShapeInferenceTestOp op("Placeholder");
TensorShape shape({});
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
}
{
ShapeInferenceTestOp op("Placeholder");
const int64_t dims[2] = {1, -1};
PartialTensorShape shape;
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 2, &shape));
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,?]");
}
{
ShapeInferenceTestOp op("Placeholder");
PartialTensorShape shape;
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "?");
}
}
TEST(ArrayOpsTest, Transpose_ShapeFn) {
ShapeInferenceTestOp op("Transpose");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "?;[2]", "[?,?]");
INFER_OK(op, "[?];?", "[?]");
INFER_OK(op, "[?,?];[2]", "[?,?]");
INFER_ERROR("Dimension must be 3 but is 2", op, "[1,2,3];[2]");
Tensor perm = test::AsTensor<int32>({0});
op.input_tensors[1] = &perm;
INFER_OK(op, "[?];[?]", "[d0_0]");
perm = test::AsTensor<int32>({1, 0});
INFER_OK(op, "?;[2]", "[?,?]");
INFER_OK(op, "[?,?];[2]", "[d0_1,d0_0]");
INFER_OK(op, "[1,?];[2]", "[d0_1,d0_0]");
INFER_OK(op, "?;[0]", "in0");
perm = test::AsTensor<int32>({1, 2});
INFER_ERROR("perm dim 2 is out of range of input rank 2", op, "[1,2];[2]");
perm = test::AsTensor<int32>({0});
INFER_ERROR("Dimension must be 2 but is 1", op, "[1,2];[1]");
perm = test::AsTensor<int32>({1, 0, 3, 4, 2});
INFER_OK(op, "[0,1,2,3,4];[5]", "[d0_1,d0_0,d0_3,d0_4,d0_2]");
INFER_OK(op, "[0,?,2,3,4];[5]", "[d0_1,d0_0,d0_3,d0_4,d0_2]");
}
TEST(ArrayOpsTest, Bitcast_ShapeFn) {
ShapeInferenceTestOp op("Bitcast");
auto rebuild_node_def = [&op](DataType input_type, DataType output_type) {
TF_ASSERT_OK(NodeDefBuilder("test", "Bitcast")
.Input("input", 0, input_type)
.Attr("type", output_type)
.Finalize(&op.node_def));
};
rebuild_node_def(DT_FLOAT, DT_INT32);
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,2]", "in0");
rebuild_node_def(DT_INT32, DT_INT64);
INFER_OK(op, "[1,2]", "[d0_0]");
INFER_OK(op, "[1,?]", "[d0_0]");
INFER_ERROR("does not match", op, "[1,4]");
INFER_ERROR("does not match", op, "[1,3]");
rebuild_node_def(DT_INT64, DT_INT32);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,2]");
rebuild_node_def(DT_COMPLEX128, DT_INT32);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,4]");
rebuild_node_def(DT_COMPLEX128, DT_HALF);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,8]");
rebuild_node_def(DT_COMPLEX128, DT_INT8);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,16]");
rebuild_node_def(DT_STRING, DT_INT32);
INFER_ERROR("one of the type sizes is zero", op, "[1,2,3]");
rebuild_node_def(DT_INT32, DT_STRING);
INFER_ERROR("one of the type sizes is zero", op, "[1,2,3]");
}
TEST(ArrayOpsTest, Squeeze_ShapeFn) {
ShapeInferenceTestOp op("Squeeze");
auto rebuild_node_def = [&op](const std::vector<int32>& squeeze_dims) {
TF_ASSERT_OK(NodeDefBuilder("test", "Squeeze")
.Input("input", 0, DT_FLOAT)
.Attr("squeeze_dims", squeeze_dims)
.Finalize(&op.node_def));
};
rebuild_node_def({});
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,4,1,5,1]", "[d0_1,d0_3]");
INFER_OK(op, "[1,?,1,?,1]", "?");
rebuild_node_def({1});
INFER_OK(op, "[4,1,5]", "[d0_0,d0_2]");
INFER_OK(op, "[4,?,5]", "[d0_0,d0_2]");
INFER_ERROR("Can not squeeze dim[1]", op, "[4,6,5]");
rebuild_node_def({1, 2});
INFER_OK(op, "[4,1,1,5]", "[d0_0,d0_3]");
rebuild_node_def({1, -2});
INFER_OK(op, "[4,1,1,5]", "[d0_0,d0_3]");
rebuild_node_def({-2});
INFER_OK(op, "[4,1,5]", "[d0_0,d0_2]");
rebuild_node_def({-4});
INFER_ERROR("not in [-3,3)", op, "[1,2,3]");
rebuild_node_def({3});
INFER_ERROR("not in [-3,3)", op, "[1,2,3]");
}
TEST(ArrayOpsTest, ReverseSequence_ShapeFn) {
ShapeInferenceTestOp op("ReverseSequence");
auto rebuild_node_def = [&op](const int32_t seq_dim,
const int32_t batch_dim) {
TF_ASSERT_OK(NodeDefBuilder("test", "ReverseSequence")
.Input("input", 0, DT_FLOAT)
.Input("seq_lengths", 1, DT_INT64)
.Attr("seq_dim", seq_dim)
.Attr("batch_dim", batch_dim)
.Finalize(&op.node_def));
};
rebuild_node_def(1, 2);
INFER_OK(op, "?;[10]", "?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[10,10]");
rebuild_node_def(1, 4);
INFER_ERROR("batch_dim must be < input rank", op, "[1,2,3];[3]");
rebuild_node_def(4, 1);
INFER_ERROR("seq_dim must be < input rank", op, "[1,2,3];[3]");
rebuild_node_def(1, 2);
INFER_OK(op, "[1,2,3];[3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[1,2,?];[3]", "[d0_0,d0_1,d1_0]");
INFER_OK(op, "[1,2,3];[?]", "[d0_0,d0_1,d0_2]");
}
TEST(ArrayOpsTest, Split_ShapeFn) {
ShapeInferenceTestOp op("Split");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Split")
.Input("split_dim", 0, DT_INT32)
.Input("value", 1, DT_FLOAT)
.Attr("num_split", 2)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[?,?];[?,?]");
INFER_OK(op, "?;[1,4]", "[?,?];[?,?]");
Tensor split_dim = test::AsTensor<int32>({1, 2});
op.input_tensors[0] = &split_dim;
INFER_ERROR("Input must be scalar but has rank 1", op, "[?];[?,?]");
split_dim = test::AsScalar<int32>(1);
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,4]", "[d1_0,2];[d1_0,2]");
INFER_OK(op, "?;[1,?]", "[d1_0,?];[d1_0,?]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 5", op,
"?;[1,5]");
split_dim = test::AsScalar<int32>(3);
INFER_ERROR(
"Dimension size, given by scalar input 3 must be in range [-3, 3)", op,
"?;[1,4,8]");
split_dim = test::AsScalar<int32>(-1);
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,4]", "[d1_0,2];[d1_0,2]");
INFER_OK(op, "?;[1,4,8]", "[d1_0,d1_1,4];[d1_0,d1_1,4]");
split_dim = test::AsScalar<int32>(-2);
INFER_OK(op, "?;[1,4,8]", "[d1_0,2,d1_2];[d1_0,2,d1_2]");
split_dim = test::AsScalar<int32>(-4);
INFER_ERROR(
"Dimension size, given by scalar input -4 must be in range [-3, 3)", op,
"?;[1,4,8]");
}
TEST(ArrayOpsTest, Tile_ShapeFn) {
ShapeInferenceTestOp op("Tile");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Tile")
.Input("input", 0, DT_FLOAT)
.Input("multiples", 1, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[2,3,1,4];?", "[?,?,?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,3,1,4];[4,1]");
INFER_OK(op, "?;[4]", "[?,?,?,?]");
Tensor multiples = test::AsTensor<int32>({2, 3, 4, 5});
op.input_tensors[1] = &multiples;
INFER_OK(op, "[2,3,1,4];[4]", "[4,9,4,20]");
multiples = test::AsTensor<int64_t>({2, 3, 4, 5});
INFER_OK(op, "[2,3,1,4];[4]", "[4,9,4,20]");
}
TEST(ArrayOpsTest, EditDistance_ShapeFn) {
ShapeInferenceTestOp op("EditDistance");
op.input_tensors.resize(6);
INFER_OK(op, "[?,?];[?];[4];[?,?];[?];[4]", "?");
Tensor hypothesis_shape = test::AsTensor<int64_t>({2, 30, 4, 50});
op.input_tensors[2] = &hypothesis_shape;
Tensor truth_shape = test::AsTensor<int64_t>({20, 3, 40, 5});
op.input_tensors[5] = &truth_shape;
INFER_OK(op, "[?,?];[?];[4];[?,?];[?];[4]", "[20,30,40]");
hypothesis_shape = test::AsTensor<int64_t>({2});
op.input_tensors[2] = &hypothesis_shape;
INFER_ERROR("Num elements of hypothesis_shape does not match truth_shape", op,
"[?,?];[?];[1];[?,?];[?];[4]");
}
TEST(ArrayOpsTest, OneHot_ShapeFn) {
ShapeInferenceTestOp op("OneHot");
op.input_tensors.resize(4);
auto set_axis = [&op](int axis) {
TF_ASSERT_OK(NodeDefBuilder("test", "OneHot")
.Input("indices", 0, DT_FLOAT)
.Input("depth", 1, DT_INT32)
.Input("on_value", 2, DT_FLOAT)
.Input("off_value", 3, DT_FLOAT)
.Attr("axis", axis)
.Finalize(&op.node_def));
};
set_axis(-2);
INFER_ERROR("axis must be >= -1", op, "?;?;?;?");
set_axis(1);
INFER_OK(op, "?;[];?;?", "?");
Tensor depth = test::AsTensor<int32>({1, 2});
op.input_tensors[1] = &depth;
INFER_ERROR("Input must be scalar but has rank 1", op, "?;[2];?;?");
depth = test::AsScalar<int32>(2);
INFER_OK(op, "[1,3,4];[];?;?", "[d0_0,2,d0_1,d0_2]");
set_axis(-1);
INFER_OK(op, "[1,3,4];[];?;?", "[d0_0,d0_1,d0_2,2]");
}
TEST(ArrayOpsTest, ExtractImagePatchesShapeTest) {
ShapeInferenceTestOp op("ExtractImagePatches");
auto set_op = [&op](const std::vector<int32>& ksizes,
const std::vector<int32>& strides,
const std::vector<int32>& rates, const string& padding) {
TF_ASSERT_OK(NodeDefBuilder("test", "ExtractImagePatches")
.Input("input", 0, DT_FLOAT)
.Attr("ksizes", ksizes)
.Attr("strides", strides)
.Attr("rates", rates)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 2, 2, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2]", "[d0_0,5,5,8]");
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2]", "[d0_0,7,7,d0_3]");
set_op({1, 2, 2, 1, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_ERROR(
"ExtractImagePatches requires the ksizes attribute to contain 4 values, "
"but got: 5",
op, "[1,7,7,2]");
}
TEST(ArrayOpsTest, QuantizeAndDequantizeV2_ShapeFn) {
ShapeInferenceTestOp op("QuantizeAndDequantizeV2");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "QuantizeAndDequantizeV2")
.Input("input", 0, DT_FLOAT)
.Input("input_min", 1, DT_FLOAT)
.Input("input_max", 2, DT_FLOAT)
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("narrow_range", false)
.Attr("axis", -1)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[];?;?", "in0");
INFER_OK(op, "[1,2,?,4,5];?;?", "in0");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1,2,?,4,5];[1];[]");
INFER_ERROR("Shapes must be equal rank, but are 1 and 0", op,
"[1,2,?,4,5];[];[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1,2,?,4,5];[1];[1]");
(*op.node_def.mutable_attr())["axis"].set_i(-2);
INFER_ERROR("axis should be at least -1, got -2", op, "?;?;?");
}
TEST(ArrayOpsTest, SpaceToBatch_ShapeFn) {
ShapeInferenceTestOp op("SpaceToBatch");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToBatch")
.Input("input", 0, DT_FLOAT)
.Input("paddings", 1, DT_INT32)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,?,?,d0_3]");
INFER_OK(op, "[1,10,10,3];?", "[4,?,?,d0_3]");
INFER_ERROR("rank", op, "[1,10,10,3];[4]");
INFER_ERROR("3 and 2", op, "[1,10,10,3];[2,3]");
Tensor paddings = test::AsTensor<int32>({4, 2, 2, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,8,8,d0_3]");
paddings = test::AsTensor<int64_t>({4, 2, 2, 4}, {{2, 2}});
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,8,8,d0_3]");
paddings = test::AsTensor<int32>({1, 2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 13", op,
"[1,10,10,3];[2,2]");
paddings = test::AsTensor<int32>({1, -2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_ERROR("cannot be negative", op, "[1,10,10,3];[2,2]");
}
TEST(ArrayOpsTest, SpaceToBatchND_ShapeFn) {
ShapeInferenceTestOp op("SpaceToBatchND");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToBatchND")
.Input("input", 0, DT_FLOAT)
.Input("block_shape", 1, DT_INT32)
.Input("paddings", 2, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;[2];?", "?");
INFER_OK(op, "[?,?,?,?];[2];?", "[?,?,?,d0_3]");
INFER_OK(op, "[?,?,?,2];[2];?", "[?,?,?,d0_3]");
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_OK(op, "[3,?,?,2];[2];?", "[18,?,?,d0_3]");
{
Tensor paddings = test::AsTensor<int32>({1, 1, 0, 1}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_OK(op, "[3,?,2,2];[2];[2,2]", "[18,?,1,d0_3]");
op.input_tensors[2] = nullptr;
}
{
Tensor paddings = test::AsTensor<int32>({1, 1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_OK(op, "[3,2,3,2];[2];[2,2]", "[18,2,1,d0_3]");
op.input_tensors[2] = nullptr;
}
op.input_tensors[1] = nullptr;
}
INFER_ERROR("block_shape must have rank 1", op, "?;[1,1];?");
INFER_ERROR("block_shape must have known size", op, "?;[?];?");
{
Tensor block_shape = test::AsTensor<int32>({0, 2});
op.input_tensors[1] = &block_shape;
INFER_ERROR("block_shape must be positive", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({1, 1});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, -1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("paddings cannot be negative", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({3, 3});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, 0, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("divisible", op, "[1,2,3,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({});
op.input_tensors[2] = &paddings;
INFER_OK(op, "?;[0];[0,2]", "?");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
INFER_ERROR("rank", op, "[1,3,3,1];[2];[1]");
INFER_ERROR("shape", op, "[1,3,3,1];[2];[1,2]");
}
TEST(ArrayOpsTest, BatchToSpace_ShapeFn) {
ShapeInferenceTestOp op("BatchToSpace");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpace")
.Input("input", 0, DT_FLOAT)
.Input("crops", 1, DT_INT32)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[4,8,8,3];[2,2]", "[1,?,?,d0_3]");
INFER_ERROR("Dimension size must be evenly divisible by", op,
"[5,8,8,3];[2,2]");
INFER_OK(op, "[4,8,8,3];?", "[1,?,?,d0_3]");
INFER_ERROR("rank", op, "[4,8,8,3];[4]");
INFER_ERROR("3 and 2", op, "[4,8,8,3];[2,3]");
Tensor croppings = test::AsTensor<int64_t>({4, 2, 2, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_OK(op, "[4,8,8,3];[2,2]", "[1,10,10,d0_3]");
croppings = test::AsTensor<int32>({100, 2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("Negative dimension size caused by subtracting", op,
"[4,8,8,3];[2,2]");
croppings = test::AsTensor<int32>({1, 2, 3, 400}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("Negative dimension size caused by subtracting", op,
"[4,8,8,3];[2,2]");
croppings = test::AsTensor<int32>({1, -2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("cannot be negative", op, "[4,8,8,3];[2,2]");
}
TEST(ArrayOpsTest, BatchToSpaceND_ShapeFn) {
ShapeInferenceTestOp op("BatchToSpaceND");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpaceND")
.Input("input", 0, DT_FLOAT)
.Input("block_shape", 1, DT_INT32)
.Input("crops", 2, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;[2];?", "?");
INFER_OK(op, "[?,?,?,?];[2];?", "[?,?,?,d0_3]");
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_OK(op, "[?,?,?,2];[2];?", "[?,?,?,d0_3]");
INFER_OK(op, "[18,?,?,2];[2];?", "[3,?,?,d0_3]");
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 1}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,?,2,2];[2];[2,2]", "[3,?,5,d0_3]");
op.input_tensors[2] = nullptr;
}
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,2,1,2];[2];[2,2]", "[3,2,3,d0_3]");
op.input_tensors[2] = nullptr;
}
op.input_tensors[1] = nullptr;
}
INFER_ERROR("block_shape must have rank 1", op, "?;[1,1];?");
INFER_ERROR("block_shape must have known size", op, "?;[?];?");
INFER_ERROR("rank", op, "[2,2];[2];[2,2]");
INFER_ERROR("rank", op, "[2,2,3];[3];[3,2]");
{
Tensor block_shape = test::AsTensor<int32>({0, 2});
op.input_tensors[1] = &block_shape;
INFER_ERROR("block_shape must be positive", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({1, 1});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, -1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("crops cannot be negative", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({2, 2});
op.input_tensors[1] = &block_shape;
Tensor crops = test::AsTensor<int32>({3, 2, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_ERROR("Negative", op, "[4,2,3,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_ERROR("divisible", op, "[3,1,1,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
}
TEST(ArrayOpsTest, SpaceToDepth_ShapeFn) {
ShapeInferenceTestOp op("SpaceToDepth");
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToDepth")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,2,4,4]", "[d0_0,1,2,16]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 3", op,
"[1,3,8,4]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 5", op,
"[1,2,5,4]");
INFER_OK(op, "[1,2,4,?]", "[d0_0,1,2,?]");
}
TEST(ArrayOpsTest, DepthToSpace_ShapeFn) {
ShapeInferenceTestOp op("DepthToSpace");
TF_ASSERT_OK(NodeDefBuilder("test", "DepthToSpace")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,1,2,16]", "[d0_0,2,4,4]");
INFER_ERROR("Dimension size must be evenly divisible by 4 but is 15", op,
"[1,1,2,15]");
INFER_OK(op, "[1,2,4,?]", "[d0_0,4,8,?]");
TF_ASSERT_OK(NodeDefBuilder("test", "DepthToSpace")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 10)
.Finalize(&op.node_def));
INFER_OK(op, "[1,1,2,200]", "[d0_0,10,20,2]");
}
TEST(ArrayOpsTest, Slice_ShapeFn) {
ShapeInferenceTestOp op("Slice");
TF_ASSERT_OK(NodeDefBuilder("test", "Slice")
.Input("input", 0, DT_FLOAT)
.Input("begin", 1, DT_INT64)
.Input("sizes", 2, DT_INT64)
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,4,5];[4];[4]", "[?,?,?,?]");
INFER_OK(op, "[2,3,4,5];[?];[?]", "[?,?,?,?]");
INFER_OK(op, "?;[?];[?]", "?");
INFER_OK(op, "?;[4];[?]", "[?,?,?,?]");
INFER_ERROR("must be rank 1", op, "[2,3,4,5];[2,3];[3]");
INFER_ERROR("must be rank 1", op, "[2,3,4,5];[2];[3,4]");
INFER_ERROR("must be rank 2", op, "[2,3,4,5];[2];[2]");
op.input_tensors.resize(3);
Tensor begin = test::AsTensor<int32>({0, 1, 2, 1});
Tensor sizes = test::AsTensor<int32>({1, 2, 1, 3});
op.input_tensors[1] = &begin;
op.input_tensors[2] = &sizes;
INFER_OK(op, "[2,3,4,5];[4];[4]", "[1,2,1,3]");
sizes = test::AsTensor<int32>({-1, -1, 1, -1});
INFER_OK(op, "[2,3,4,5];[4];[4]", "[d0_0,2,1,4]");
begin = test::AsTensor<int32>({0, 1, 2, 6});
sizes = test::AsTensor<int32>({-1, -1, -1, -1});
INFER_ERROR("Negative dimension size", op, "[2,3,4,5];[4];[4]");
begin = test::AsTensor<int32>({0, 1, 2, 5});
sizes = test::AsTensor<int32>({-1, -1, -1, -2});
INFER_ERROR("cannot be < -1", op, "[2,3,4,5];[4];[4]");
}
TEST(ArrayOpsTest, StridedSlice_ShapeFn) {
ShapeInferenceTestOp op("StridedSlice");
TF_ASSERT_OK(NodeDefBuilder("test", "StridedSlice")
.Input("input", 0, DT_FLOAT)
.Input("begin", 1, DT_INT32)
.Input("end", 2, DT_INT32)
.Input("strides", 3, DT_INT32)
.Attr("shrink_axis_mask", 1)
.Finalize(&op.node_def));
op.input_tensors.resize(4);
Tensor strides = test::AsTensor<int32>({1});
op.input_tensors[3] = &strides;
INFER_OK(op, "[2,3,4,5];[1];[1];[1]", "[3,4,5]");
INFER_OK(op, "[2,0,3,4];[1];[1];[1]", "[0,3,4]");
}
TEST(ArrayOpsTest, StridedSliceGrad_ShapeFn) {
ShapeInferenceTestOp op("StridedSliceGrad");
op.input_tensors.resize(5);
INFER_OK(op, "?;?;?;?;?", "?");
INFER_OK(op, "[?];?;?;?;?", "?");
INFER_OK(op, "[4];?;?;?;?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[0] = &in_t;
INFER_OK(op, "[4];?;?;?;?", "[1,2,3,4]");
}
TEST(ArrayOpsTest, UnchangedWithQuantizationScalars_ShapeFn) {
for (const char* op_name : {"Dequantize", "FakeQuantWithMinMaxVars"}) {
ShapeInferenceTestOp op(op_name);
if (op_name[0] == 'D') {
TF_ASSERT_OK(NodeDefBuilder("test", "Dequantize")
.Input("input", 0, DT_QINT8)
.Input("input_min", 1, DT_FLOAT)
.Input("input_max", 2, DT_FLOAT)
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("axis", -1)
.Finalize(&op.node_def));
}
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[1,?,3];[];[]", "in0");
INFER_ERROR("be rank 0", op, "[1,?,3];[1];[]");
INFER_ERROR("be rank 0", op, "[1,?,3];[];[1]");
}
}
TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannel) {
ShapeInferenceTestOp op("FakeQuantWithMinMaxVarsPerChannel");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[?];?;?", "in0");
INFER_OK(op, "[1,?,3];[3];[3]", "in0");
INFER_OK(op, "[3];[3];[3]", "in0");
INFER_ERROR("be rank 1", op, "[1,?,3];[1];[]");
INFER_ERROR("be rank 1", op, "[1,?,3];[];[1]");
INFER_ERROR("must be equal", op, "[1,?,3];[2];[?]");
INFER_ERROR("must be equal", op, "[1,?,3];[?];[2]");
INFER_ERROR("must be equal", op, "[1,?,?];[1];[2]");
INFER_ERROR("must be equal", op, "[5];[4];[?]");
}
TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannelGradient) {
ShapeInferenceTestOp op("FakeQuantWithMinMaxVarsPerChannelGradient");
INFER_OK(op, "?;?;?;?", "in0;[?];[?]");
INFER_OK(op, "[3];[3];[3];[3]", "in0;in3;in3");
INFER_OK(op, "[1,3];[1,3];[3];[3]", "in0;in3;in3");
INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4]", "in0;in3;in3");
INFER_ERROR("be equal rank", op, "[1,?,3];[1,?,3];[3];[]");
INFER_ERROR("be rank 1", op, "[1,?,3];[1,?,3];[];[3]");
INFER_ERROR("be at least rank 1", op, "[];[];[1];[1]");
INFER_ERROR("be at most rank 4", op, "[1,2,3,4,5];[1,2,3,4,5];[1];[1]");
INFER_ERROR("must be equal", op, "[1,3];[1,3];[2];[3]");
INFER_ERROR("must be equal", op, "[1,3];[1,3];[3];[2]");
}
TEST(ArrayOpsTest, QuantizedConcat_ShapeFn) {
ShapeInferenceTestOp op("QuantizedConcat");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
std::vector<NodeDefBuilder::NodeOut> limit_list;
for (int i = 0; i < n; ++i) {
src_list.emplace_back("a", 0, DT_QUINT8);
limit_list.emplace_back("b", 0, DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "QuantizedConcat")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Input(limit_list)
.Input(limit_list)
.Attr("N", n)
.Finalize(&op.node_def));
};
set_n(1);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?;?;?");
set_n(2);
INFER_ERROR("must be rank 0", op, "[];?;?;?;?;?;[1]");
INFER_ERROR("must be rank 0", op, "[];?;?;?;?;[1];?");
INFER_ERROR("must be rank 0", op, "[];?;?;?;[1];?;?");
INFER_ERROR("must be rank 0", op, "[];?;?;[1];?;?;?");
set_n(2);
INFER_ERROR("must be rank 2", op, "[];[1,2];[1,2,3];?;?;?;?");
INFER_OK(op, "[];[1,2];[1,3];?;?;?;?", "[?,?];[];[]");
Tensor concat_dim_t;
op.input_tensors.push_back(&concat_dim_t);
set_n(2);
concat_dim_t = test::AsScalar(0);
INFER_OK(op, "[];[100,2,?];[10,?,3];?;?;?;?", "[110,d1_1,d2_2];[];[]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[];[100,2,5];[10,?,3];?;?;?;?");
}
TEST(StateOpsTest, _ParallelConcatStart_ShapeFn) {
ShapeInferenceTestOp op("_ParallelConcatStart");
TensorShape shape({1, 2, 3});
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
TF_ASSERT_OK(NodeDefBuilder("test", "_ParallelConcatStart")
.Attr("shape", shape_proto)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/array_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/array_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f50e333-75f7-40b8-94b5-107747cf6b16 | cpp | tensorflow/tensorflow | string_ops | tensorflow/core/ops/string_ops.cc | tensorflow/core/ops/string_ops_test.cc | #include <string>
#include <vector>
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace shape_inference {
class InferenceContext;
}
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("RegexReplace")
.Input("input: string")
.Input("pattern: string")
.Input("rewrite: string")
.Output("output: string")
.Attr("replace_global: bool = true")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
c->set_output(0, c->input(0));
return absl::OkStatus();
});
REGISTER_OP("StaticRegexReplace")
.Input("input: string")
.Attr("pattern: string")
.Attr("rewrite: string")
.Output("output: string")
.Attr("replace_global: bool = true")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RegexFullMatch")
.Input("input: string")
.Input("pattern: string")
.Output("output: bool")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->input(0));
return absl::OkStatus();
});
REGISTER_OP("StaticRegexFullMatch")
.Input("input: string")
.Attr("pattern: string")
.Output("output: bool")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringToHashBucketFast")
.Input("input: string")
.Output("output: int64")
.Attr("num_buckets: int >= 1")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("_TensorToHashBucketFast")
.Input("input: T")
.Output("output: int64")
.Attr("T: {int8, uint8, int16, uint16, int32, uint32, int64, uint64}")
.Attr("num_buckets: int >= 1")
.SetShapeFn(shape_inference::UnchangedShape)
.Doc(R"doc(
Internal operation which is a composition of converting the tensor to a string
tensor (AsString) and then calling hash functions (StringToHashBucketFast):
reserved for internal use.
Do not invoke this operator directly in Python. A fusion optimization is
expected to create these operators.
)doc");
REGISTER_OP("StringToHashBucketStrong")
.Input("input: string")
.Output("output: int64")
.Attr("num_buckets: int >= 1")
.Attr("key: list(int)")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringToHashBucket")
.Input("string_tensor: string")
.Output("output: int64")
.Attr("num_buckets: int >= 1")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("ReduceJoin")
.Input("inputs: string")
.Input("reduction_indices: int32")
.Attr("keep_dims: bool = false")
.Attr("separator: string = ''")
.Output("output: string")
.SetShapeFn(shape_inference::ReductionShape);
REGISTER_OP("UnsortedSegmentJoin")
.Input("inputs: string")
.Input("segment_ids: Tindices")
.Input("num_segments: Tnumsegments")
.Attr("separator: string = ''")
.Attr("Tindices: {int32,int64}")
.Attr("Tnumsegments: {int32,int64} = DT_INT32")
.Output("output: string")
.SetShapeFn(shape_inference::SegmentReductionWithNumSegmentsShapeFn);
REGISTER_OP("AsString")
.Input("input: T")
.Output("output: string")
.Attr("T: {realnumbertype, complex64, complex128, bool, variant, string}")
.Attr("precision: int = -1")
.Attr("scientific: bool = false")
.Attr("shortest: bool = false")
.Attr("width: int = -1")
.Attr("fill: string = ''")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringFormat")
.Input("inputs: T")
.Output("output: string")
.Attr("T: list(type) >= 0")
.Attr("template: string = '%s'")
.Attr("placeholder: string = '%s'")
.Attr("summarize: int = 3")
.SetShapeFn([](InferenceContext* c) {
string template_;
string placeholder;
TF_RETURN_IF_ERROR(c->GetAttr("template", &template_));
TF_RETURN_IF_ERROR(c->GetAttr("placeholder", &placeholder));
std::vector<std::string> split_template;
split_template = absl::StrSplit(template_, placeholder);
int64_t num_placeholders = split_template.size() - 1;
if (c->num_inputs() != num_placeholders) {
return errors::InvalidArgument(strings::StrCat(
"num placeholders in template and num inputs must match: ",
num_placeholders, " vs. ", c->num_inputs()));
}
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("StringJoin")
.Input("inputs: N * string")
.Attr("N: int>=0")
.Attr("separator: string = ''")
.Output("output: string")
.SetShapeFn([](InferenceContext* c) {
bool all_scalar = true;
for (int i = 0; i < c->num_inputs(); ++i) {
if (c->Rank(c->input(i)) != 0) all_scalar = false;
}
if (all_scalar) {
c->set_output(0, c->Scalar());
return absl::OkStatus();
}
ShapeHandle out = c->UnknownShape();
for (int i = 0; i < c->num_inputs(); ++i) {
if (c->RankKnown(c->input(i)) && c->Rank(c->input(i)) != 0) {
TF_RETURN_IF_ERROR(c->Merge(out, c->input(i), &out));
}
}
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("StringSplit")
.Input("input: string")
.Input("delimiter: string")
.Output("indices: int64")
.Output("values: string")
.Output("shape: int64")
.Attr("skip_empty: bool = true")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 2));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("StringSplitV2")
.Input("input: string")
.Input("sep: string")
.Output("indices: int64")
.Output("values: string")
.Output("shape: int64")
.Attr("maxsplit: int = -1")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 2));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("StringLower")
.Input("input: string")
.Output("output: string")
.Attr("encoding: string =''")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringUpper")
.Input("input: string")
.Output("output: string")
.Attr("encoding: string =''")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringStrip")
.Input("input: string")
.Output("output: string")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("StringLength")
.Input("input: string")
.Output("output: int32")
.Attr("unit: {'BYTE', 'UTF8_CHAR'} = 'BYTE'")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("EncodeBase64")
.Input("input: string")
.Output("output: string")
.Attr("pad: bool = false")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("DecodeBase64")
.Input("input: string")
.Output("output: string")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("Substr")
.Input("input: string")
.Input("pos: T")
.Input("len: T")
.Output("output: string")
.Attr("T: {int32, int64}")
.Attr("unit: {'BYTE', 'UTF8_CHAR'} = 'BYTE'")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle pos_shape = c->input(1);
ShapeHandle len_shape = c->input(2);
ShapeHandle unused;
if (c->RankKnown(len_shape)) {
TF_RETURN_IF_ERROR(c->WithRank(pos_shape, c->Rank(len_shape), &unused));
}
for (int32_t i = 0; i < c->Rank(pos_shape); ++i) {
DimensionHandle pos_dim = c->Dim(pos_shape, i);
DimensionHandle len_dim = c->Dim(len_shape, i);
if (c->Value(pos_dim) != c->Value(len_dim)) {
return errors::InvalidArgument(
"pos and len shapes must match: ", c->DebugString(pos_shape),
" vs. ", c->DebugString(len_shape));
}
}
return shape_inference::BroadcastBinaryOpShapeFn(c);
});
REGISTER_OP("UnicodeScript")
.Input("input: int32")
.Output("output: int32")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UnicodeEncode")
.Input("input_values: int32")
.Input("input_splits: Tsplits")
.Attr("errors: {'ignore', 'replace', 'strict'} = 'replace'")
.Attr("output_encoding: {'UTF-8', 'UTF-16-BE', 'UTF-32-BE'}")
.Attr("replacement_char: int = 65533")
.Attr("Tsplits: {int32, int64} = DT_INT64")
.Output("output: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_inner_values_shape = c->input(0);
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(input_inner_values_shape, 1, &unused));
ShapeHandle splits_shape = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(splits_shape, 1, &unused));
std::vector<DimensionHandle> dims(1);
TF_RETURN_IF_ERROR(c->Subtract(c->Dim(splits_shape, 0), 1, &dims[0]));
c->set_output(0, c->MakeShape(dims));
return absl::OkStatus();
});
REGISTER_OP("UnicodeTranscode")
.Input("input: string")
.Output("output: string")
.Attr("input_encoding: string")
.Attr("output_encoding: {'UTF-8', 'UTF-16-BE', 'UTF-32-BE'}")
.Attr("errors: {'strict', 'replace', 'ignore'} = 'replace'")
.Attr("replacement_char: int = 65533")
.Attr("replace_control_characters: bool = false")
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("UnicodeDecode")
.Input("input: string")
.Output("row_splits: Tsplits")
.Output("char_values: int32")
.Attr("input_encoding: string")
.Attr("errors: {'strict', 'replace', 'ignore'} = 'replace'")
.Attr("replacement_char: int = 65533")
.Attr("replace_control_characters: bool = false")
.Attr("Tsplits: {int32, int64} = DT_INT64")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle num_row_splits;
DimensionHandle input_size = c->NumElements(c->input(0));
TF_RETURN_IF_ERROR(c->Add(input_size, 1, &num_row_splits));
c->set_output(0, c->Vector(num_row_splits));
DimensionHandle num_chars = c->UnknownDim();
c->set_output(1, c->Vector(num_chars));
return absl::OkStatus();
});
REGISTER_OP("UnicodeDecodeWithOffsets")
.Input("input: string")
.Output("row_splits: Tsplits")
.Output("char_values: int32")
.Output("char_to_byte_starts: int64")
.Attr("input_encoding: string")
.Attr("errors: {'strict', 'replace', 'ignore'} = 'replace'")
.Attr("replacement_char: int = 65533")
.Attr("replace_control_characters: bool = false")
.Attr("Tsplits: {int32, int64} = DT_INT64")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle num_row_splits;
DimensionHandle input_size = c->NumElements(c->input(0));
TF_RETURN_IF_ERROR(c->Add(input_size, 1, &num_row_splits));
c->set_output(0, c->Vector(num_row_splits));
DimensionHandle num_chars = c->UnknownDim();
c->set_output(1, c->Vector(num_chars));
c->set_output(2, c->Vector(num_chars));
return absl::OkStatus();
});
REGISTER_OP("StringNGrams")
.Attr("separator: string")
.Attr("ngram_widths: list(int) >= 0")
.Attr("left_pad: string")
.Attr("right_pad: string")
.Attr("pad_width: int")
.Attr("preserve_short_sequences: bool")
.Attr("Tsplits: {int32, int64} = DT_INT64")
.Input("data: string")
.Input("data_splits: Tsplits")
.Output("ngrams: string")
.Output("ngrams_splits: Tsplits")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->UnknownShapeOfRank(1));
ShapeHandle data = c->input(0);
TF_RETURN_IF_ERROR(c->WithRank(data, 1, &data));
ShapeHandle data_splits = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(data_splits, 1, &data_splits));
c->set_output(1, data_splits);
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(StringOpsTest, StringJoin_ShapeFn) {
ShapeInferenceTestOp op("StringJoin");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_STRING);
TF_ASSERT_OK(NodeDefBuilder("test", "StringJoin")
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
INFER_OK(op, "[];[];[]", "[]");
INFER_OK(op, "[];?;[]", "?");
INFER_OK(op, "[1,?];[];[?,2]", "[d0_0,d2_1]");
INFER_OK(op, "[1,?];?;[?,2]", "[d0_0,d2_1]");
INFER_ERROR("must be equal", op, "[1,2];[];[?,3]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/string_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/string_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits